diff --git a/.circleci/config.yml b/.circleci/config.yml index 578e0bcbd..fb9159ff3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,91 +2,214 @@ defaults: defaults: &defaults working_directory: '/go/src/github.com/influxdata/telegraf' - go-1_9: &go-1_9 + environment: + GOFLAGS: -p=8 + go-1_13: &go-1_13 docker: - - image: 'circleci/golang:1.9.7' - go-1_10: &go-1_10 + - image: 'quay.io/influxdb/telegraf-ci:1.13.11' + go-1_14: &go-1_14 docker: - - image: 'circleci/golang:1.10.3' + - image: 'quay.io/influxdb/telegraf-ci:1.14.3' + mac: &mac + macos: + xcode: 11.3.1 + working_directory: '~/go/src/github.com/influxdata/telegraf' + environment: + HOMEBREW_NO_AUTO_UPDATE: 1 + GOFLAGS: -p=8 version: 2 jobs: deps: - <<: [ *defaults, *go-1_10 ] + <<: [ *defaults, *go-1_14 ] steps: - checkout - restore_cache: - key: vendor-{{ checksum "Gopkg.lock" }} + key: go-mod-v1-{{ checksum "go.sum" }} - run: 'make deps' + - run: 'make tidy' - save_cache: - name: 'vendored deps' - key: vendor-{{ checksum "Gopkg.lock" }} + name: 'go module cache' + key: go-mod-v1-{{ checksum "go.sum" }} paths: - - './vendor' + - '/go/pkg/mod' - persist_to_workspace: - root: '/go/src' + root: '/go' paths: - '*' - test-go-1.9: - <<: [ *defaults, *go-1_9 ] + macdeps: + <<: [ *mac ] + steps: + - checkout + - restore_cache: + key: mac-go-mod-v1-{{ checksum "go.sum" }} + - run: 'brew install go@1.13' + - run: 'make deps' + - run: 'make tidy' + - save_cache: + name: 'go module cache' + key: mac-go-mod-v1-{{ checksum "go.sum" }} + paths: + - '~/go/pkg/mod' + - '/usr/local/Cellar/go' + - '/usr/local/bin/go' + - '/usr/local/bin/gofmt' + - persist_to_workspace: + root: '/' + paths: + - 'usr/local/bin/go' + - 'usr/local/Cellar/go' + - 'usr/local/bin/gofmt' + - 'Users/distiller/go' + + test-go-1.13: + <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: - at: '/go/src' - - run: 'make test-ci' - test-go-1.10: - <<: [ *defaults, *go-1_10 ] + at: '/go' + - run: 'make' + - run: 'make test' + test-go-1.13-386: + <<: [ *defaults, *go-1_13 ] steps: - attach_workspace: - at: '/go/src' - - run: 'make test-ci' - - run: 'GOARCH=386 make test-ci' + at: '/go' + - run: 'GOARCH=386 make' + - run: 'GOARCH=386 make test' + test-go-1.14: + <<: [ *defaults, *go-1_14 ] + steps: + - attach_workspace: + at: '/go' + - run: 'make' + - run: 'make check' + - run: 'make check-deps' + - run: 'make test' + test-go-1.14-386: + <<: [ *defaults, *go-1_14 ] + steps: + - attach_workspace: + at: '/go' + - run: 'GOARCH=386 make' + - run: 'GOARCH=386 make check' + - run: 'GOARCH=386 make test' + test-go-1.13-darwin: + <<: [ *mac ] + steps: + - attach_workspace: + at: '/' + - run: 'make' + - run: 'make check' + - run: 'make test' + + package: + <<: [ *defaults, *go-1_14 ] + steps: + - attach_workspace: + at: '/go' + - run: 'make package' + - store_artifacts: + path: './build' + destination: 'build' release: - <<: [ *defaults, *go-1_10 ] + <<: [ *defaults, *go-1_14 ] steps: - attach_workspace: - at: '/go/src' - - run: './scripts/release.sh' + at: '/go' + - run: 'make package-release' - store_artifacts: - path: './artifacts' - destination: '.' + path: './build' + destination: 'build' nightly: - <<: [ *defaults, *go-1_10 ] + <<: [ *defaults, *go-1_14 ] steps: - attach_workspace: - at: '/go/src' - - run: './scripts/release.sh' + at: '/go' + - run: 'make package-nightly' - store_artifacts: - path: './artifacts' - destination: '.' + path: './build' + destination: 'build' workflows: version: 2 - build_and_release: + check: jobs: - - 'deps' - - 'test-go-1.9': + - 'macdeps': + filters: + tags: + only: /.*/ + - 'deps': + filters: + tags: + only: /.*/ + - 'test-go-1.13': requires: - 'deps' - - 'test-go-1.10': + filters: + tags: + only: /.*/ + - 'test-go-1.13-386': requires: - 'deps' + filters: + tags: + only: /.*/ + - 'test-go-1.14': + requires: + - 'deps' + filters: + tags: + only: /.*/ + - 'test-go-1.14-386': + requires: + - 'deps' + filters: + tags: + only: /.*/ + - 'test-go-1.13-darwin': + requires: + - 'macdeps' + filters: + tags: # only runs on tags if you specify this filter + only: /.*/ + - 'package': + requires: + - 'test-go-1.13' + - 'test-go-1.13-386' + - 'test-go-1.14' + - 'test-go-1.14-386' - 'release': requires: - - 'test-go-1.9' - - 'test-go-1.10' + - 'test-go-1.13' + - 'test-go-1.13-386' + - 'test-go-1.14' + - 'test-go-1.14-386' + filters: + tags: + only: /.*/ + branches: + ignore: /.*/ nightly: jobs: - 'deps' - - 'test-go-1.9': + - 'test-go-1.13': requires: - 'deps' - - 'test-go-1.10': + - 'test-go-1.13-386': + requires: + - 'deps' + - 'test-go-1.14': + requires: + - 'deps' + - 'test-go-1.14-386': requires: - 'deps' - 'nightly': requires: - - 'test-go-1.9' - - 'test-go-1.10' + - 'test-go-1.13' + - 'test-go-1.13-386' + - 'test-go-1.14' + - 'test-go-1.14-386' triggers: - schedule: cron: "0 7 * * *" diff --git a/.gitattributes b/.gitattributes index 276cc7709..21bc439bf 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,5 @@ CHANGELOG.md merge=union README.md merge=union +go.sum merge=union plugins/inputs/all/all.go merge=union plugins/outputs/all/all.go merge=union diff --git a/.github/ISSUE_TEMPLATE/Bug_report.md b/.github/ISSUE_TEMPLATE/Bug_report.md index b84aad767..e03395f6c 100644 --- a/.github/ISSUE_TEMPLATE/Bug_report.md +++ b/.github/ISSUE_TEMPLATE/Bug_report.md @@ -1,24 +1,45 @@ ---- -name: Bug report -about: Create a report to help us improve - ---- - -### Relevant telegraf.conf: - -### System info: - -[Include Telegraf version, operating system name, and other relevant details] - -### Steps to reproduce: - -1. ... -2. ... - -### Expected behavior: - -### Actual behavior: - -### Additional info: - -[Include gist of relevant config, logs, etc.] +--- +name: Bug report +about: Create a report to help us improve + +--- + + +### Relevant telegraf.conf: + +```toml + +``` + +### System info: + + + +### Docker + + + +### Steps to reproduce: + + + +1. ... +2. ... + +### Expected behavior: + + + +### Actual behavior: + + + +### Additional info: + + diff --git a/.github/ISSUE_TEMPLATE/Feature_request.md b/.github/ISSUE_TEMPLATE/Feature_request.md index 84d45fcd6..20aba04be 100644 --- a/.github/ISSUE_TEMPLATE/Feature_request.md +++ b/.github/ISSUE_TEMPLATE/Feature_request.md @@ -1,17 +1,17 @@ ---- -name: Feature request -about: Suggest an idea for this project - ---- - -## Feature Request - -Opening a feature request kicks off a discussion. - -### Proposal: - -### Current behavior: - -### Desired behavior: - -### Use case: [Why is this important (helps with prioritizing requests)] +--- +name: Feature request +about: Suggest an idea for this project + +--- + +## Feature Request + +Opening a feature request kicks off a discussion. + +### Proposal: + +### Current behavior: + +### Desired behavior: + +### Use case: diff --git a/CHANGELOG.md b/CHANGELOG.md index bd0c262ab..d458426df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,19 +1,1013 @@ -## v1.8 [unreleased] +## v1.15 [unreleased] -### Release Notes +#### Release Notes + +- The `logparser` input is deprecated, use the `tail` input with `data_format = + "grok"` as a replacement. + +- Several fields used primarily for debugging have been removed from the + `splunkmetric` serializer, if you are making use of these fields they can be + added back with the `tag` option. + +- Telegraf's `--test` mode now runs processors and aggregators before printing + metrics. + +- Official packages now built with Go 1.14.3. + +#### New Processors + +- [defaults](/plugins/processors/defaults/README.md) - Contributed by @jregistr +- [filepath](/plugins/processors/filepath/README.md) - Contributed by @kir4h + +#### New Outputs + +- [newrelic](/plugins/outputs/newrelic/README.md) - Contributed by @hsinghkalsi + +#### Features + +- [#7634](https://github.com/influxdata/telegraf/pull/7634): Add support for streaming processors. +- [#6905](https://github.com/influxdata/telegraf/pull/6905): Add commands stats to mongodb input plugin. +- [#7193](https://github.com/influxdata/telegraf/pull/7193): Add additional concurrent transaction information. +- [#7223](https://github.com/influxdata/telegraf/pull/7223): Add ability to specify HTTP Headers in http_listener_v2 which will added as tags. +- [#7140](https://github.com/influxdata/telegraf/pull/7140): Apply ping deadline to dns lookup. +- [#7225](https://github.com/influxdata/telegraf/pull/7225): Add support for 64-bit integer types to modbus input. +- [#7231](https://github.com/influxdata/telegraf/pull/7231): Add possibility to specify measurement per register. +- [#7136](https://github.com/influxdata/telegraf/pull/7136): Support multiple templates for graphite serializers. +- [#7250](https://github.com/influxdata/telegraf/pull/7250): Deploy telegraf configuration as a "non config" file. +- [#7214](https://github.com/influxdata/telegraf/pull/7214): Add VolumeSpace query for sqlserver input with metric_version 2. +- [#7304](https://github.com/influxdata/telegraf/pull/7304): Add reading bearer token from a file to http input. +- [#7366](https://github.com/influxdata/telegraf/pull/7366): add support for SIGUSR1 to trigger flush. +- [#7271](https://github.com/influxdata/telegraf/pull/7271): Add retry when slave is busy to modbus input. +- [#7356](https://github.com/influxdata/telegraf/pull/7356): Add option to save retention policy as tag in influxdb_listener. +- [#6915](https://github.com/influxdata/telegraf/pull/6915): Add support for MDS and RGW sockets to ceph input. +- [#7391](https://github.com/influxdata/telegraf/pull/7391): Extract target as a tag for each rule in iptables input. +- [#7434](https://github.com/influxdata/telegraf/pull/7434): Use docker log timestamp as metric time. +- [#7359](https://github.com/influxdata/telegraf/pull/7359): Add cpu query to sqlserver input. +- [#7464](https://github.com/influxdata/telegraf/pull/7464): Add field creation to date processor and integer unix time support. +- [#7483](https://github.com/influxdata/telegraf/pull/7483): Add integer mapping support to enum processor. +- [#7321](https://github.com/influxdata/telegraf/pull/7321): Add additional fields to mongodb input. +- [#7491](https://github.com/influxdata/telegraf/pull/7491): Add authentication support to the http_response input plugin. +- [#7503](https://github.com/influxdata/telegraf/pull/7503): Add truncate_tags setting to wavefront output. +- [#7545](https://github.com/influxdata/telegraf/pull/7545): Add configurable separator graphite serializer and output. +- [#7489](https://github.com/influxdata/telegraf/pull/7489): Add cluster state integer to mongodb input. +- [#7515](https://github.com/influxdata/telegraf/pull/7515): Add option to disable mongodb cluster status. +- [#7319](https://github.com/influxdata/telegraf/pull/7319): Add support for battery level monitoring to the fibaro input. +- [#7405](https://github.com/influxdata/telegraf/pull/7405): Allow collection of HTTP Headers in http_response input. +- [#7540](https://github.com/influxdata/telegraf/pull/7540): Add processor to look up service name by port. +- [#7474](https://github.com/influxdata/telegraf/pull/7474): Add new once mode that write to outputs and exits. +- [#7474](https://github.com/influxdata/telegraf/pull/7474): Run processors and aggregators during test mode. +- [#7294](https://github.com/influxdata/telegraf/pull/7294): Add SNMPv3 trap support to snmp_trap input. + +#### Bugfixes + +- [#7631](https://github.com/influxdata/telegraf/issues/7617): Fix issue with influx stream parser blocking when data is in buffer. +- [#7371](https://github.com/influxdata/telegraf/issues/7371): Fix unable to write metrics to CloudWatch with IMDSv1 disabled. +- [#7233](https://github.com/influxdata/telegraf/issues/7233): Fix vSphere 6.7 missing data issue. +- [#7448](https://github.com/influxdata/telegraf/issues/7448): Remove debug fields from splunkmetric serializer. +- [#7446](https://github.com/influxdata/telegraf/issues/7446): Fix gzip support in socket_listener with tcp sockets. +- [#7390](https://github.com/influxdata/telegraf/issues/7390): Fix interval drift when round_interval is set in agent. +- [#7524](https://github.com/influxdata/telegraf/pull/7524): Fix typo in total_elapsed_time_ms field of sqlserver input. +- [#7203](https://github.com/influxdata/telegraf/issues/7203): Exclude csv_timestamp_column and csv_measurement_column from fields. + +## v1.14.4 [unreleased] + +#### Bugfixes + +- [#7325](https://github.com/influxdata/telegraf/issues/7325): Fix "cannot insert the value NULL error" with PerformanceCounters query. +- [#7579](https://github.com/influxdata/telegraf/pull/7579): Fix numeric to bool conversion in converter processor. +- [#7551](https://github.com/influxdata/telegraf/issues/7551): Fix typo in name of gc_cpu_fraction field of the influxdb input. + +## v1.14.3 [2020-05-19] + +#### Bugfixes + +- [#7412](https://github.com/influxdata/telegraf/pull/7412): Use same timestamp for all objects in arrays in the json parser. +- [#7343](https://github.com/influxdata/telegraf/issues/7343): Handle multiple metrics with the same timestamp in dedup processor. +- [#5905](https://github.com/influxdata/telegraf/issues/5905): Fix reconnection of timed out HTTP2 connections influxdb outputs. +- [#7468](https://github.com/influxdata/telegraf/issues/7468): Fix negative value parsing in impi_sensor input. + +## v1.14.2 [2020-04-28] + +#### Bugfixes + +- [#7241](https://github.com/influxdata/telegraf/issues/7241): Trim whitespace from instance tag in sqlserver input. +- [#7322](https://github.com/influxdata/telegraf/issues/7322): Use increased AWS Cloudwatch GetMetricData limit of 500 metrics per call. +- [#7318](https://github.com/influxdata/telegraf/issues/7318): Fix dimension limit on azure_monitor output. +- [#7407](https://github.com/influxdata/telegraf/pull/7407): Fix 64-bit integer to string conversion in snmp input. +- [#7327](https://github.com/influxdata/telegraf/issues/7327): Fix shard indices reporting in elasticsearch input. +- [#7388](https://github.com/influxdata/telegraf/issues/7388): Ignore fields with NaN or Inf floats in the JSON serializer. +- [#7402](https://github.com/influxdata/telegraf/issues/7402): Fix typo in name of gc_cpu_fraction field of the kapacitor input. +- [#7235](https://github.com/influxdata/telegraf/issues/7235): Don't retry `create database` when using database_tag if forbidden by the server in influxdb output. +- [#7406](https://github.com/influxdata/telegraf/issues/7406): Allow CR and FF inside of string fields in influx parser. + +## v1.14.1 [2020-04-14] + +#### Bugfixes + +- [#7236](https://github.com/influxdata/telegraf/issues/7236): Fix PerformanceCounter query performance degradation in sqlserver input. +- [#7257](https://github.com/influxdata/telegraf/issues/7257): Fix error when using the Name field in template processor. +- [#7289](https://github.com/influxdata/telegraf/pull/7289): Fix export timestamp not working for prometheus on v2. +- [#7310](https://github.com/influxdata/telegraf/issues/7310): Fix exclude database and retention policy tags is shared. +- [#7262](https://github.com/influxdata/telegraf/issues/7262): Fix status path when using globs in phpfpm. + +## v1.14 [2020-03-26] + +#### Release Notes + +- In the `sqlserver` input, the `sqlserver_azurestats` measurement has been + renamed to `sqlserver_azure_db_resource_stats` due to an issue where numeric + metrics were previously being reported incorrectly as strings. + +- The `date` processor now uses the UTC timezone when creating its tag. In + previous versions the local time was used. + +#### New Inputs + +- [clickhouse](/plugins/inputs/clickhouse/README.md) - Contributed by @kshvakov +- [execd](/plugins/inputs/execd/README.md) - Contributed by @jgraichen +- [eventhub_consumer](/plugins/inputs/eventhub_consumer/README.md) - Contributed by @R290 +- [infiniband](/plugins/inputs/infiniband/README.md) - Contributed by @willfurnell +- [lanz](/plugins/inputs/lanz/README.md): Contributed by @timhughes +- [modbus](/plugins/inputs/modbus/README.md) - Contributed by @garciaolais +- [monit](/plugins/inputs/monit/README.md) - Contributed by @SirishaGopigiri +- [sflow](/plugins/inputs/sflow/README.md) - Contributed by @influxdata +- [wireguard](/plugins/inputs/wireguard/README.md) - Contributed by @LINKIWI + +#### New Processors + +- [dedup](/plugins/processors/dedup/README.md) - Contributed by @igomura +- [template](/plugins/processors/template/README.md) - Contributed by @RobMalvern +- [s2geo](/plugins/processors/s2geo/README.md) - Contributed by @alespour + +#### New Outputs + +- [warp10](/plugins/outputs/warp10/README.md) - Contributed by @aurrelhebert + +#### Features + +- [#6730](https://github.com/influxdata/telegraf/pull/6730): Add page_faults for mongodb wired tiger. +- [#6798](https://github.com/influxdata/telegraf/pull/6798): Add use_sudo option to ipmi_sensor input. +- [#6764](https://github.com/influxdata/telegraf/pull/6764): Add ability to collect pod labels to kubernetes input. +- [#6770](https://github.com/influxdata/telegraf/pull/6770): Expose unbound-control config file option. +- [#6508](https://github.com/influxdata/telegraf/pull/6508): Add support for new nginx plus api endpoints. +- [#6342](https://github.com/influxdata/telegraf/pull/6342): Add kafka SASL version control to support Azure Event Hub. +- [#6869](https://github.com/influxdata/telegraf/pull/6869): Add RBPEX IO statistics to DatabaseIO query in sqlserver input. +- [#6869](https://github.com/influxdata/telegraf/pull/6869): Add space on disk for each file to DatabaseIO query in the sqlserver input. +- [#6869](https://github.com/influxdata/telegraf/pull/6869): Calculate DB Name instead of GUID in physical_db_name in the sqlserver input. +- [#6733](https://github.com/influxdata/telegraf/pull/6733): Add latency stats to mongo input. +- [#6844](https://github.com/influxdata/telegraf/pull/6844): Add source and port tags to jenkins_job metrics. +- [#6886](https://github.com/influxdata/telegraf/pull/6886): Add date offset and timezone options to date processor. +- [#6859](https://github.com/influxdata/telegraf/pull/6859): Exclude resources by inventory path in vsphere input. +- [#6700](https://github.com/influxdata/telegraf/pull/6700): Allow a user defined field to be used as the graylog short_message. +- [#6917](https://github.com/influxdata/telegraf/pull/6917): Add server_name override for x509_cert plugin. +- [#6921](https://github.com/influxdata/telegraf/pull/6921): Add udp internal metrics for the statsd input. +- [#6914](https://github.com/influxdata/telegraf/pull/6914): Add replica set tag to mongodb input. +- [#6935](https://github.com/influxdata/telegraf/pull/6935): Add counters for merged reads and writes to diskio input. +- [#6982](https://github.com/influxdata/telegraf/pull/6982): Add support for titlecase transformation to strings processor. +- [#6993](https://github.com/influxdata/telegraf/pull/6993): Add support for MDB database information to openldap input. +- [#6957](https://github.com/influxdata/telegraf/pull/6957): Add new fields for Jenkins total and busy executors. +- [#7035](https://github.com/influxdata/telegraf/pull/7035): Fix dash to underscore replacement when handling embedded tags in Cisco MDT. +- [#7039](https://github.com/influxdata/telegraf/pull/7039): Add process created_at time to procstat input. +- [#7022](https://github.com/influxdata/telegraf/pull/7022): Add support for credentials file to nats_consumer and nats output. +- [#7065](https://github.com/influxdata/telegraf/pull/7065): Add additional tags and fields to apcupsd. +- [#7084](https://github.com/influxdata/telegraf/pull/7084): Add RabbitMQ slave_nodes and synchronized_slave_nodes metrics. +- [#7089](https://github.com/influxdata/telegraf/pull/7089): Allow globs in FPM unix socket paths. +- [#7071](https://github.com/influxdata/telegraf/pull/7071): Add non-cumulative histogram to histogram aggregator. +- [#6969](https://github.com/influxdata/telegraf/pull/6969): Add label and field selectors to prometheus input k8s discovery. +- [#7049](https://github.com/influxdata/telegraf/pull/7049): Add support for converting tag or field to measurement in converter processor. +- [#7103](https://github.com/influxdata/telegraf/pull/7103): Add volume_mount_point to DatabaseIO query in sqlserver input. +- [#7142](https://github.com/influxdata/telegraf/pull/7142): Add topic tag options to kafka output. +- [#7141](https://github.com/influxdata/telegraf/pull/7141): Add support for setting InfluxDB retention policy using tag. +- [#7163](https://github.com/influxdata/telegraf/pull/7163): Add Database IO Tempdb per Azure DB to sqlserver input. +- [#7150](https://github.com/influxdata/telegraf/pull/7150): Add option for explicitly including queries in sqlserver input. +- [#7173](https://github.com/influxdata/telegraf/pull/7173): Add support for GNMI DecimalVal type to cisco_telemetry_gnmi. + +#### Bugfixes + +- [#6397](https://github.com/influxdata/telegraf/issues/6397): Fix conversion to floats in AzureDBResourceStats query in the sqlserver input. +- [#6867](https://github.com/influxdata/telegraf/issues/6867): Fix case sensitive collation in sqlserver input. +- [#7005](https://github.com/influxdata/telegraf/pull/7005): Search for chronyc only when chrony input plugin is enabled. +- [#2280](https://github.com/influxdata/telegraf/issues/2280): Fix request to InfluxDB Listener failing with EOF. +- [#6124](https://github.com/influxdata/telegraf/issues/6124): Fix InfluxDB listener to continue parsing after error. +- [#7133](https://github.com/influxdata/telegraf/issues/7133): Fix log rotation to use actual file size instead of bytes written. +- [#7103](https://github.com/influxdata/telegraf/pull/7103): Fix several issues with DatabaseIO query in sqlserver input. +- [#7119](https://github.com/influxdata/telegraf/pull/7119): Fix internal metrics for output split into multiple lines. +- [#7021](https://github.com/influxdata/telegraf/pull/7021): Fix schedulers query compatibility with pre SQL-2016. +- [#7182](https://github.com/influxdata/telegraf/pull/7182): Set headers on influxdb_listener ping URL. +- [#7165](https://github.com/influxdata/telegraf/issues/7165): Fix url encoding of job names in jenkins input plugin. + +## v1.13.4 [2020-02-25] + +#### Release Notes + +- Official packages now built with Go 1.13.8. + +#### Bugfixes + +- [#6988](https://github.com/influxdata/telegraf/issues/6988): Parse NaN values from summary types in prometheus input. +- [#6820](https://github.com/influxdata/telegraf/issues/6820): Fix pgbouncer input when used with newer pgbouncer versions. +- [#6913](https://github.com/influxdata/telegraf/issues/6913): Support up to 8192 stats in the ethtool input. +- [#7060](https://github.com/influxdata/telegraf/issues/7060): Fix perf counters collection on named instances in sqlserver input. +- [#6926](https://github.com/influxdata/telegraf/issues/6926): Use add time for prometheus expiration calculation. +- [#7057](https://github.com/influxdata/telegraf/issues/7057): Fix inconsistency with input error counting in internal input. +- [#7063](https://github.com/influxdata/telegraf/pull/7063): Use the same timestamp per call if no time is provided in prometheus input. + +## v1.13.3 [2020-02-04] + +#### Bugfixes + +- [#5744](https://github.com/influxdata/telegraf/issues/5744): Fix kibana input with Kibana versions greater than 6.4. +- [#6960](https://github.com/influxdata/telegraf/issues/6960): Fix duplicate TrackingIDs can be returned in queue consumer plugins. +- [#6913](https://github.com/influxdata/telegraf/issues/6913): Support up to 4096 stats in the ethtool input. +- [#6973](https://github.com/influxdata/telegraf/issues/6973): Expire metrics on query in addition to on add. + +## v1.13.2 [2020-01-21] + +#### Bugfixes + +- [#2652](https://github.com/influxdata/telegraf/issues/2652): Warn without error when processes input is started on Windows. +- [#6890](https://github.com/influxdata/telegraf/issues/6890): Only parse certificate blocks in x509_cert input. +- [#6883](https://github.com/influxdata/telegraf/issues/6883): Add custom attributes for all resource types in vsphere input. +- [#6899](https://github.com/influxdata/telegraf/pull/6899): Fix URL agent address form with udp in snmp input. +- [#6619](https://github.com/influxdata/telegraf/issues/6619): Change logic to allow recording of device fields when attributes is false. +- [#6903](https://github.com/influxdata/telegraf/issues/6903): Do not add invalid timestamps to kafka messages. +- [#6906](https://github.com/influxdata/telegraf/issues/6906): Fix json_strict option and set default of true. + +## v1.13.1 [2020-01-08] + +#### Bugfixes + +- [#6788](https://github.com/influxdata/telegraf/issues/6788): Fix ServerProperty query stops working on Azure after failover. +- [#6803](https://github.com/influxdata/telegraf/pull/6803): Add leading period to OID in SNMP v1 generic traps. +- [#6823](https://github.com/influxdata/telegraf/pull/6823): Fix missing config fields in prometheus serializer. +- [#6694](https://github.com/influxdata/telegraf/issues/6694): Fix panic on connection loss with undelivered messages in mqtt_consumer. +- [#6679](https://github.com/influxdata/telegraf/issues/6679): Encode query hash fields as hex strings in sqlserver input. +- [#6345](https://github.com/influxdata/telegraf/issues/6345): Invalidate diskio cache if the metadata mtime has changed. +- [#6800](https://github.com/influxdata/telegraf/issues/6800): Show platform not supported warning only on plugin creation. +- [#6814](https://github.com/influxdata/telegraf/issues/6814): Fix rabbitmq cannot complete gather after request error. +- [#6846](https://github.com/influxdata/telegraf/issues/6846): Fix /sbin/init --version executed on Telegraf startup. +- [#6847](https://github.com/influxdata/telegraf/issues/6847): Use last path element as field key if path fully specified in cisco_telemetry_gnmi input. + +## v1.13 [2019-12-12] + +#### Release Notes + +- Official packages built with Go 1.13.5. This affects the minimum supported + version on several platforms, most notably requiring Windows 7 (2008 R2) or + later. For details, check the release notes for Go + [ports](https://golang.org/doc/go1.13#ports). +- The `prometheus` input and `prometheus_client` output have a new mapping to + and from Telegraf metrics, which can be enabled by setting `metric_version = 2`. + The original mapping is deprecated. When both plugins have the same setting, + passthrough metrics will be unchanged. Refer to the `prometheus` input for + details about the mapping. + +#### New Inputs + +- [azure_storage_queue](/plugins/inputs/azure_storage_queue/README.md) - Contributed by @mjiderhamn +- [ethtool](/plugins/inputs/ethtool/README.md) - Contributed by @philippreston +- [snmp_trap](/plugins/inputs/snmp_trap/README.md) - Contributed by @influxdata +- [suricata](/plugins/inputs/suricata/README.md) - Contributed by @satta +- [synproxy](/plugins/inputs/synproxy/README.md) - Contributed by @rfrenayworldstream +- [systemd_units](/plugins/inputs/systemd_units/README.md) - Contributed by @benschweizer + +#### New Processors + +- [clone](/plugins/processors/clone/README.md) - Contributed by @adrianlzt + +#### New Aggregators + +- [merge](/plugins/aggregators/merge/README.md) - Contributed by @influxdata + +#### Features + +- [#6326](https://github.com/influxdata/telegraf/pull/5842): Add per node memory stats to rabbitmq input. +- [#6361](https://github.com/influxdata/telegraf/pull/6361): Add ability to read query from file to postgresql_extensible input. +- [#5921](https://github.com/influxdata/telegraf/pull/5921): Add replication metrics to the redis input. +- [#6177](https://github.com/influxdata/telegraf/pull/6177): Support NX-OS telemetry extensions in cisco_telemetry_mdt. +- [#6415](https://github.com/influxdata/telegraf/pull/6415): Allow graphite parser to create Inf and NaN values. +- [#6434](https://github.com/influxdata/telegraf/pull/6434): Use prefix base detection for ints in grok parser. +- [#6465](https://github.com/influxdata/telegraf/pull/6465): Add more performance counter metrics to sqlserver input. +- [#6476](https://github.com/influxdata/telegraf/pull/6476): Add millisecond unix time support to grok parser. +- [#6473](https://github.com/influxdata/telegraf/pull/6473): Add container id as optional source tag to docker and docker_log input. +- [#6504](https://github.com/influxdata/telegraf/pull/6504): Add lang parameter to OpenWeathermap input plugin. +- [#6540](https://github.com/influxdata/telegraf/pull/6540): Log file open errors at debug level in tail input. +- [#6553](https://github.com/influxdata/telegraf/pull/6553): Add timeout option to cloudwatch input. +- [#6549](https://github.com/influxdata/telegraf/pull/6549): Support custom success codes in http input. +- [#6530](https://github.com/influxdata/telegraf/pull/6530): Improve ipvs input error strings and logging. +- [#6532](https://github.com/influxdata/telegraf/pull/6532): Add strict mode to JSON parser that can be disable to ignore invalid items. +- [#6543](https://github.com/influxdata/telegraf/pull/6543): Add support for Kubernetes 1.16 and remove deprecated API usage. +- [#6283](https://github.com/influxdata/telegraf/pull/6283): Add gathering of RabbitMQ federation link metrics. +- [#6356](https://github.com/influxdata/telegraf/pull/6356): Add bearer token defaults for Kubernetes plugins. +- [#5870](https://github.com/influxdata/telegraf/pull/5870): Add support for SNMP over TCP. +- [#6603](https://github.com/influxdata/telegraf/pull/6603): Add support for per output flush jitter. +- [#6650](https://github.com/influxdata/telegraf/pull/6650): Add a nameable file tag to file input plugin. +- [#6640](https://github.com/influxdata/telegraf/pull/6640): Add Splunk MultiMetric support. +- [#6680](https://github.com/influxdata/telegraf/pull/6668): Add support for sending HTTP Basic Auth in influxdb input +- [#5767](https://github.com/influxdata/telegraf/pull/5767): Add ability to configure the url tag in the prometheus input. +- [#5767](https://github.com/influxdata/telegraf/pull/5767): Add prometheus metric_version=2 mapping to internal metrics/line protocol. +- [#6703](https://github.com/influxdata/telegraf/pull/6703): Add prometheus metric_version=2 support to prometheus_client output. +- [#6660](https://github.com/influxdata/telegraf/pull/6660): Add content_encoding compression support to socket_listener. +- [#6689](https://github.com/influxdata/telegraf/pull/6689): Add high resolution metrics support to CloudWatch output. +- [#6716](https://github.com/influxdata/telegraf/pull/6716): Add SReclaimable and SUnreclaim to mem input. +- [#6695](https://github.com/influxdata/telegraf/pull/6695): Allow multiple certificates per file in x509_cert input. +- [#6686](https://github.com/influxdata/telegraf/pull/6686): Add additional tags to the x509 input. +- [#6703](https://github.com/influxdata/telegraf/pull/6703): Add batch data format support to file output. +- [#6688](https://github.com/influxdata/telegraf/pull/6688): Support partition assignment strategy configuration in kafka_consumer. +- [#6731](https://github.com/influxdata/telegraf/pull/6731): Add node type tag to mongodb input. +- [#6669](https://github.com/influxdata/telegraf/pull/6669): Add uptime_ns field to mongodb input. +- [#6735](https://github.com/influxdata/telegraf/pull/6735): Support resolution of symlinks in filecount input. +- [#6746](https://github.com/influxdata/telegraf/pull/6746): Set message timestamp to the metric time in kafka output. +- [#6740](https://github.com/influxdata/telegraf/pull/6740): Add base64decode operation to string processor. +- [#6790](https://github.com/influxdata/telegraf/pull/6790): Add option to control collecting global variables to mysql input. + +#### Bugfixes + +- [#6484](https://github.com/influxdata/telegraf/issues/6484): Show correct default settings in mysql sample config. +- [#6583](https://github.com/influxdata/telegraf/issues/6583): Use 1h or 3h rain values as appropriate in openweathermap input. +- [#6573](https://github.com/influxdata/telegraf/issues/6573): Fix not a valid field error in Windows with nvidia input. +- [#6614](https://github.com/influxdata/telegraf/issues/6614): Fix influxdb output serialization on connection closed. +- [#6690](https://github.com/influxdata/telegraf/issues/6690): Fix ping skips remaining hosts after dns lookup error. +- [#6684](https://github.com/influxdata/telegraf/issues/6684): Log mongodb oplog auth errors at debug level. +- [#6705](https://github.com/influxdata/telegraf/issues/6705): Remove trailing underscore trimming from json flattener. +- [#6421](https://github.com/influxdata/telegraf/issues/6421): Revert change causing cpu usage to be capped at 100 percent. +- [#6523](https://github.com/influxdata/telegraf/issues/6523): Accept any media type in the prometheus input. +- [#6769](https://github.com/influxdata/telegraf/issues/6769): Fix unix socket dial arguments in uwsgi input. +- [#6757](https://github.com/influxdata/telegraf/issues/6757): Replace colon chars in prometheus output labels with metric_version=1. +- [#6773](https://github.com/influxdata/telegraf/issues/6773): Set TrimLeadingSpace when TrimSpace is on in csv parser. + +## v1.12.6 [2019-11-19] + +#### Bugfixes + +- [#6666](https://github.com/influxdata/telegraf/issues/6666): Fix many plugin errors are logged at debug logging level. +- [#6652](https://github.com/influxdata/telegraf/issues/6652): Use nanosecond precision in docker_log input. +- [#6642](https://github.com/influxdata/telegraf/issues/6642): Fix interface option with method = native in ping input. +- [#6680](https://github.com/influxdata/telegraf/pull/6680): Fix panic in mongodb input if shard connection pool stats are unreadable. + +## v1.12.5 [2019-11-12] + +#### Bugfixes + +- [#6576](https://github.com/influxdata/telegraf/issues/6576): Fix incorrect results in ping input plugin. +- [#6610](https://github.com/influxdata/telegraf/pull/6610): Add missing character replacement to sql_instance tag. +- [#6337](https://github.com/influxdata/telegraf/issues/6337): Change no metric error message to debug level in cloudwatch input. +- [#6602](https://github.com/influxdata/telegraf/issues/6602): Add missing ServerProperties query to sqlserver input docs. +- [#6643](https://github.com/influxdata/telegraf/pull/6643): Fix mongodb connections_total_created field loading. +- [#6627](https://github.com/influxdata/telegraf/issues/6578): Fix metric creation when node is offline in jenkins input. +- [#6649](https://github.com/influxdata/telegraf/issues/6615): Fix docker uptime_ns calculation when container has been restarted. +- [#6647](https://github.com/influxdata/telegraf/issues/6646): Fix mysql field type conflict in conversion of gtid_mode to an integer. +- [#5529](https://github.com/influxdata/telegraf/issues/5529): Fix mysql field type conflict with ssl_verify_depth and ssl_ctx_verify_depth. + +## v1.12.4 [2019-10-23] + +#### Release Notes + +- Official packages built with Go 1.12.12. + +#### Bugfixes + +- [#6521](https://github.com/influxdata/telegraf/issues/6521): Fix metric generation with ping input native method. +- [#6541](https://github.com/influxdata/telegraf/issues/6541): Exclude alias tag if unset from plugin internal stats. +- [#6564](https://github.com/influxdata/telegraf/issues/6564): Fix socket_mode option in powerdns_recursor input. + +## v1.12.3 [2019-10-07] + +#### Bugfixes + +- [#6445](https://github.com/influxdata/telegraf/issues/6445): Use batch serialization format in exec output. +- [#6455](https://github.com/influxdata/telegraf/issues/6455): Build official packages with Go 1.12.10. +- [#6464](https://github.com/influxdata/telegraf/pull/6464): Use case insensitive serial number match in smart input. +- [#6469](https://github.com/influxdata/telegraf/pull/6469): Add auth header only when env var is set. +- [#6468](https://github.com/influxdata/telegraf/pull/6468): Fix running multiple mysql and sqlserver plugin instances. +- [#6471](https://github.com/influxdata/telegraf/issues/6471): Fix database routing on retry with exclude_database_tag. +- [#6488](https://github.com/influxdata/telegraf/issues/6488): Fix logging panic in exec input with nagios data format. + +## v1.12.2 [2019-09-24] + +#### Bugfixes + +- [#6386](https://github.com/influxdata/telegraf/issues/6386): Fix detection of layout timestamps in csv and json parser. +- [#6394](https://github.com/influxdata/telegraf/issues/6394): Fix parsing of BATTDATE in apcupsd input. +- [#6398](https://github.com/influxdata/telegraf/issues/6398): Keep boolean values listed in json_string_fields. +- [#6393](https://github.com/influxdata/telegraf/issues/6393): Disable Go plugin support in official builds. +- [#6391](https://github.com/influxdata/telegraf/issues/6391): Fix path handling issues in cisco_telemetry_gnmi. + +## v1.12.1 [2019-09-10] + +#### Bugfixes + +- [#6344](https://github.com/influxdata/telegraf/issues/6344): Fix depends on GLIBC_2.14 symbol version. +- [#6329](https://github.com/influxdata/telegraf/issues/6329): Fix filecount for paths with trailing slash. +- [#6331](https://github.com/influxdata/telegraf/issues/6331): Convert check state to an integer in icinga2 input. +- [#6354](https://github.com/influxdata/telegraf/issues/6354): Fix could not mark message delivered error in kafka_consumer. +- [#6362](https://github.com/influxdata/telegraf/issues/6362): Skip collection stats when disabled in mongodb input. +- [#6366](https://github.com/influxdata/telegraf/issues/6366): Fix error reading closed response body on redirect in http_response. +- [#6373](https://github.com/influxdata/telegraf/issues/6373): Fix apcupsd documentation to reflect plugin. +- [#6375](https://github.com/influxdata/telegraf/issues/6375): Display retry log message only when retry after is received. + +## v1.12 [2019-09-03] + +#### Release Notes + +- The cluster health related fields in the elasticsearch input have been split + out from the `elasticsearch_indices` measurement into the new + `elasticsearch_cluster_health_indices` measurement as they were originally + combined by error. + +#### New Inputs + +- [apcupsd](/plugins/inputs/apcupsd/README.md) - Contributed by @jonaz +- [docker_log](/plugins/inputs/docker_log/README.md) - Contributed by @prashanthjbabu +- [fireboard](/plugins/inputs/fireboard/README.md) - Contributed by @ronnocol +- [logstash](/plugins/inputs/logstash/README.md) - Contributed by @lkmcs @dmitryilyin @arkady-emelyanov +- [marklogic](/plugins/inputs/marklogic/README.md) - Contributed by @influxdata +- [openntpd](/plugins/inputs/openntpd/README.md) - Contributed by @aromeyer +- [uwsgi](/plugins/inputs/uwsgi/README.md) - Contributed by @blaggacao + +#### New Parsers + +- [form_urlencoded](/plugins/parsers/form_urlencoded/README.md) - Contributed by @byonchev + +#### New Processors + +- [date](/plugins/processors/date/README.md) - Contributed by @influxdata +- [pivot](/plugins/processors/pivot/README.md) - Contributed by @influxdata +- [tag_limit](/plugins/processors/tag_limit/README.md) - Contributed by @memory +- [unpivot](/plugins/processors/unpivot/README.md) - Contributed by @influxdata + +#### New Outputs + +- [exec](/plugins/outputs/exec/README.md) - Contributed by @Jaeyo + +#### Features + +- [#5842](https://github.com/influxdata/telegraf/pull/5842): Improve performance of wavefront serializer. +- [#5863](https://github.com/influxdata/telegraf/pull/5863): Allow regex processor to append tag values. +- [#5997](https://github.com/influxdata/telegraf/pull/5997): Add starttime field to phpfpm input. +- [#5998](https://github.com/influxdata/telegraf/pull/5998): Add cluster name tag to elasticsearch indices. +- [#6006](https://github.com/influxdata/telegraf/pull/6006): Add support for interface field in http_response input plugin. +- [#5996](https://github.com/influxdata/telegraf/pull/5996): Add container uptime_ns in docker input plugin. +- [#6016](https://github.com/influxdata/telegraf/pull/6016): Add better user-facing errors for API timeouts in docker input. +- [#6027](https://github.com/influxdata/telegraf/pull/6027): Add TLS mutual auth support to jti_openconfig_telemetry input. +- [#6053](https://github.com/influxdata/telegraf/pull/6053): Add support for ES 7.x to elasticsearch output. +- [#6062](https://github.com/influxdata/telegraf/pull/6062): Add basic auth to prometheus input plugin. +- [#6064](https://github.com/influxdata/telegraf/pull/6064): Add node roles tag to elasticsearch input. +- [#5572](https://github.com/influxdata/telegraf/pull/5572): Support floats in statsd percentiles. +- [#6050](https://github.com/influxdata/telegraf/pull/6050): Add native Go ping method to ping input plugin. +- [#6074](https://github.com/influxdata/telegraf/pull/6074): Resume from last known offset in tail inputwhen reloading Telegraf. +- [#6111](https://github.com/influxdata/telegraf/pull/6111): Add improved support for Azure SQL Database to sqlserver input. +- [#6079](https://github.com/influxdata/telegraf/pull/6079): Add extra attributes for NVMe devices to smart input. +- [#6084](https://github.com/influxdata/telegraf/pull/6084): Add docker_devicemapper measurement to docker input plugin. +- [#6122](https://github.com/influxdata/telegraf/pull/6122): Add basic auth support to elasticsearch input. +- [#6102](https://github.com/influxdata/telegraf/pull/6102): Support string field glob matching in json parser. +- [#6101](https://github.com/influxdata/telegraf/pull/6101): Update gjson to allow multipath syntax in json parser. +- [#6144](https://github.com/influxdata/telegraf/pull/6144): Add support for collecting SQL Requests to identify waits and blocking to sqlserver input. +- [#6105](https://github.com/influxdata/telegraf/pull/6105): Collect k8s endpoints, ingress, and services in kube_inventory plugin. +- [#6129](https://github.com/influxdata/telegraf/pull/6129): Add support for field/tag keys to strings processor. +- [#6143](https://github.com/influxdata/telegraf/pull/6143): Add certificate verification status to x509_cert input. +- [#6163](https://github.com/influxdata/telegraf/pull/6163): Support percentage value parsing in redis input. +- [#6024](https://github.com/influxdata/telegraf/pull/6024): Load external Go plugins from --plugin-directory. +- [#6184](https://github.com/influxdata/telegraf/pull/6184): Add ability to exclude db/bucket tag from influxdb outputs. +- [#6137](https://github.com/influxdata/telegraf/pull/6137): Gather per collections stats in mongodb input plugin. +- [#6195](https://github.com/influxdata/telegraf/pull/6195): Add TLS & credentials configuration for nats_consumer input plugin. +- [#6194](https://github.com/influxdata/telegraf/pull/6194): Add support for enterprise repos to github plugin. +- [#6060](https://github.com/influxdata/telegraf/pull/6060): Add Indices stats to elasticsearch input. +- [#6189](https://github.com/influxdata/telegraf/pull/6189): Add left function to string processor. +- [#6049](https://github.com/influxdata/telegraf/pull/6049): Add grace period for metrics late for aggregation. +- [#4435](https://github.com/influxdata/telegraf/pull/4435): Add diff and non_negative_diff to basicstats aggregator. +- [#6201](https://github.com/influxdata/telegraf/pull/6201): Add device tags to smart_attributes. +- [#5719](https://github.com/influxdata/telegraf/pull/5719): Collect framework_offers and allocator metrics in mesos input. +- [#6216](https://github.com/influxdata/telegraf/pull/6216): Add telegraf and go version to the internal input plugin. +- [#6214](https://github.com/influxdata/telegraf/pull/6214): Update the number of logical CPUs dynamically in system plugin. +- [#6259](https://github.com/influxdata/telegraf/pull/6259): Add darwin (macOS) builds to the release. +- [#6241](https://github.com/influxdata/telegraf/pull/6241): Add configurable timeout setting to smart input. +- [#6249](https://github.com/influxdata/telegraf/pull/6249): Add memory_usage field to procstat input plugin. +- [#5971](https://github.com/influxdata/telegraf/pull/5971): Add support for custom attributes to vsphere input. +- [#5926](https://github.com/influxdata/telegraf/pull/5926): Add cmdstat metrics to redis input. +- [#6261](https://github.com/influxdata/telegraf/pull/6261): Add content_length metric to http_response input plugin. +- [#6257](https://github.com/influxdata/telegraf/pull/6257): Add database_tag option to influxdb_listener to add database from query string. +- [#6246](https://github.com/influxdata/telegraf/pull/6246): Add capability to limit TLS versions and cipher suites. +- [#6266](https://github.com/influxdata/telegraf/pull/6266): Add topic_tag option to mqtt_consumer. +- [#6207](https://github.com/influxdata/telegraf/pull/6207): Add ability to label inputs for logging. +- [#6300](https://github.com/influxdata/telegraf/pull/6300): Add TLS support to nginx_plus, nginx_plus_api and nginx_vts. + +#### Bugfixes + +- [#5692](https://github.com/influxdata/telegraf/issues/5692): Fix sensor read error stops reporting of all sensors in temp input. +- [#4356](https://github.com/influxdata/telegraf/issues/4356): Fix double pct replacement in sysstat input. +- [#6004](https://github.com/influxdata/telegraf/issues/6004): Fix race in master node detection in elasticsearch input. +- [#6100](https://github.com/influxdata/telegraf/issues/6100): Fix SSPI authentication not working in sqlserver input. +- [#6142](https://github.com/influxdata/telegraf/issues/6142): Fix memory error panic in mqtt input. +- [#6136](https://github.com/influxdata/telegraf/issues/6136): Support Kafka 2.3.0 consumer groups. +- [#6232](https://github.com/influxdata/telegraf/issues/6232): Fix persistent session in mqtt_consumer. +- [#6235](https://github.com/influxdata/telegraf/issues/6235): Fix finder inconsistencies in vsphere input. +- [#6138](https://github.com/influxdata/telegraf/issues/6138): Fix parsing multiple metrics on the first line of tailed file. +- [#2526](https://github.com/influxdata/telegraf/issues/2526): Send TERM to exec processes before sending KILL signal. +- [#5326](https://github.com/influxdata/telegraf/issues/5326): Query oplog only when connected to a replica set. +- [#6317](https://github.com/influxdata/telegraf/pull/6317): Use environment variables to locate Program Files on Windows. + +## v1.11.5 [2019-08-27] + +#### Bugfixes + +- [#6250](https://github.com/influxdata/telegraf/pull/6250): Update go-sql-driver/mysql driver to 1.4.1 to address auth issues. +- [#6279](https://github.com/influxdata/telegraf/issues/6279): Return error status from --test if input plugins produce an error. +- [#6309](https://github.com/influxdata/telegraf/issues/6309): Fix with multiple instances only last configuration is used in smart input. +- [#6303](https://github.com/influxdata/telegraf/pull/6303): Build official packages with Go 1.12.9. +- [#6234](https://github.com/influxdata/telegraf/issues/6234): Split out -w argument in iptables input. +- [#6270](https://github.com/influxdata/telegraf/issues/6270): Add support for parked process state on Linux. +- [#6287](https://github.com/influxdata/telegraf/issues/6287): Remove leading slash from rcon command. +- [#6313](https://github.com/influxdata/telegraf/pull/6313): Allow jobs with dashes in the name in lustre2 input. + +## v1.11.4 [2019-08-06] + +#### Bugfixes + +- [#6200](https://github.com/influxdata/telegraf/pull/6200): Correct typo in kubernetes logsfs_available_bytes field. +- [#6191](https://github.com/influxdata/telegraf/issues/6191): Skip floats that are NaN or Inf in Datadog output. +- [#6209](https://github.com/influxdata/telegraf/issues/6209): Fix reload panic in socket_listener input plugin. + +## v1.11.3 [2019-07-23] + +#### Bugfixes + +- [#6054](https://github.com/influxdata/telegraf/issues/6054): Fix unable to reconnect after vCenter reboot in vsphere input. +- [#6073](https://github.com/influxdata/telegraf/issues/6073): Handle unknown error in nvidia-smi output. +- [#6121](https://github.com/influxdata/telegraf/pull/6121): Fix panic in statd input when processing datadog events. +- [#6125](https://github.com/influxdata/telegraf/issues/6125): Treat empty array as successful parse in json parser. +- [#6094](https://github.com/influxdata/telegraf/issues/6094): Add missing rcode and zonestat to bind input. +- [#6114](https://github.com/influxdata/telegraf/issues/6114): Fix lustre2 input plugin config parse regression. +- [#5894](https://github.com/influxdata/telegraf/issues/5894): Fix template pattern partial wildcard matching. +- [#6151](https://github.com/influxdata/telegraf/issues/6151): Fix panic in github input. + +## v1.11.2 [2019-07-09] + +#### Bugfixes + +- [#6056](https://github.com/influxdata/telegraf/pull/6056): Fix source address ping flag on BSD. +- [#6059](https://github.com/influxdata/telegraf/issues/6059): Fix value out of range error on 32-bit systems in bind input. +- [#3573](https://github.com/influxdata/telegraf/issues/3573): Fix tail and logparser stop working after reload. +- [#6077](https://github.com/influxdata/telegraf/pull/6077): Fix filecount path separator handling in Windows. +- [#6075](https://github.com/influxdata/telegraf/issues/6075): Fix panic with empty datadog tag string. +- [#6069](https://github.com/influxdata/telegraf/issues/6069): Apply topic filter to partition metrics in burrow input. + +## v1.11.1 [2019-06-25] + +#### Bugfixes + +- [#5980](https://github.com/influxdata/telegraf/issues/5980): Cannot set mount_points option in disk input. +- [#5983](https://github.com/influxdata/telegraf/issues/5983): Omit keys when creating measurement names for GNMI telemetry. +- [#5972](https://github.com/influxdata/telegraf/issues/5972): Don't consider pid of 0 when using systemd lookup in procstat. +- [#5807](https://github.com/influxdata/telegraf/issues/5807): Skip 404 error reporting in nginx_plus_api input. +- [#5999](https://github.com/influxdata/telegraf/issues/5999): Fix panic if pool_mode column does not exist. +- [#6019](https://github.com/influxdata/telegraf/issues/6019): Add missing container_id field to docker_container_status metrics. +- [#5742](https://github.com/influxdata/telegraf/issues/5742): Ignore error when utmp is missing in system input. +- [#6032](https://github.com/influxdata/telegraf/issues/6032): Add device, serial_no, and wwn tags to synthetic attributes. +- [#6012](https://github.com/influxdata/telegraf/issues/6012): Fix parsing of remote tcp address in statsd input. + +## v1.11 [2019-06-11] + +#### Release Notes + +- The `uptime_format` field in the system input has been deprecated, use the + `uptime` field instead. +- The `cloudwatch` input has been updated to use a more efficient API, it now + requires `GetMetricData` permissions instead of `GetMetricStatistics`. The + `units` tag is not available from this API and is no longer collected. + +#### New Inputs + +- [bind](/plugins/inputs/bind/README.md) - Contributed by @dswarbrick & @danielllek +- [cisco_telemetry_gnmi](/plugins/inputs/cisco_telemetry_gnmi/README.md) - Contributed by @sbyx +- [cisco_telemetry_mdt](/plugins/inputs/cisco_telemetry_mdt/README.md) - Contributed by @sbyx +- [ecs](/plugins/inputs/ecs/README.md) - Contributed by @rbtr +- [github](/plugins/inputs/github/README.md) - Contributed by @influxdata +- [openweathermap](/plugins/inputs/openweathermap/README.md) - Contributed by @regel +- [powerdns_recursor](/plugins/inputs/powerdns_recursor/README.md) - Contributed by @dupondje + +#### New Aggregators + +- [final](/plugins/aggregators/final/README.md) - Contributed by @oplehto + +#### New Outputs + +- [syslog](/plugins/outputs/syslog/README.md) - Contributed by @javicrespo +- [health](/plugins/outputs/health/README.md) - Contributed by @influxdata + +#### New Serializers + +- [wavefront](/plugins/serializers/wavefront/README.md) - Contributed by @puckpuck + +#### Features + +- [#5556](https://github.com/influxdata/telegraf/pull/5556): Add TTL field to ping input. +- [#5569](https://github.com/influxdata/telegraf/pull/5569): Add hexadecimal string to integer conversion to converter processor. +- [#5601](https://github.com/influxdata/telegraf/pull/5601): Add support for multiple line text and perfdata to nagios parser. +- [#5648](https://github.com/influxdata/telegraf/pull/5648): Allow env vars ${} expansion syntax in configuration file. +- [#5641](https://github.com/influxdata/telegraf/pull/5641): Add option to reset buckets on flush to histogram aggregator. +- [#5664](https://github.com/influxdata/telegraf/pull/5664): Add option to use strict sanitization rules to wavefront output. +- [#5697](https://github.com/influxdata/telegraf/pull/5697): Add namespace restriction to prometheus input plugin. +- [#5681](https://github.com/influxdata/telegraf/pull/5681): Add cmdline tag to procstat input. +- [#5704](https://github.com/influxdata/telegraf/pull/5704): Support verbose query param in ping endpoint of influxdb_listener. +- [#5713](https://github.com/influxdata/telegraf/pull/5713): Enhance HTTP connection options for phpfpm input plugin. +- [#5544](https://github.com/influxdata/telegraf/pull/5544): Use more efficient GetMetricData API to collect cloudwatch metrics. +- [#5544](https://github.com/influxdata/telegraf/pull/5544): Allow selection of collected statistic types in cloudwatch input. +- [#5757](https://github.com/influxdata/telegraf/pull/5757): Speed up interface stat collection in net input. +- [#5769](https://github.com/influxdata/telegraf/pull/5769): Add pagefault data to procstat input plugin. +- [#5760](https://github.com/influxdata/telegraf/pull/5760): Add option to set permissions for unix domain sockets to socket_listener. +- [#5585](https://github.com/influxdata/telegraf/pull/5585): Add cli support for outputting sections of the config. +- [#5770](https://github.com/influxdata/telegraf/pull/5770): Add service-display-name option for use with Windows service. +- [#5778](https://github.com/influxdata/telegraf/pull/5778): Add support for log rotation. +- [#5765](https://github.com/influxdata/telegraf/pull/5765): Support more drive types in smart input. +- [#5829](https://github.com/influxdata/telegraf/pull/5829): Add support for HTTP basic auth to solr input. +- [#5791](https://github.com/influxdata/telegraf/pull/5791): Add support for datadog events to statsd input. +- [#5817](https://github.com/influxdata/telegraf/pull/5817): Allow devices option to match against devlinks. +- [#5855](https://github.com/influxdata/telegraf/pull/5855): Support tags in enum processor. +- [#5830](https://github.com/influxdata/telegraf/pull/5830): Add support for gzip compression to amqp plugins. +- [#5831](https://github.com/influxdata/telegraf/pull/5831): Support passive queue declaration in amqp_consumer. +- [#5901](https://github.com/influxdata/telegraf/pull/5901): Set user agent in stackdriver output. +- [#5885](https://github.com/influxdata/telegraf/pull/5885): Extend metrics collected from Nvidia GPUs. +- [#5547](https://github.com/influxdata/telegraf/pull/5547): Add file rotation support to the file output. +- [#5955](https://github.com/influxdata/telegraf/pull/5955): Add source tag to hddtemp plugin. + +#### Bugfixes + +- [#5692](https://github.com/influxdata/telegraf/pull/5692): Temperature input plugin stops working when WiFi is turned off. +- [#5631](https://github.com/influxdata/telegraf/pull/5631): Create Windows service only when specified or in service manager. +- [#5730](https://github.com/influxdata/telegraf/pull/5730): Don't start telegraf when stale pidfile found. +- [#5477](https://github.com/influxdata/telegraf/pull/5477): Support Minecraft server 1.13 and newer in minecraft input. +- [#4098](https://github.com/influxdata/telegraf/issues/4098): Fix inline table support in configuration file. +- [#1598](https://github.com/influxdata/telegraf/issues/1598): Fix multi-line basic strings support in configuration file. +- [#5746](https://github.com/influxdata/telegraf/issues/5746): Verify a process passed by pid_file exists in procstat input. +- [#5455](https://github.com/influxdata/telegraf/issues/5455): Fix unsupported pkt type error in pgbouncer. +- [#5771](https://github.com/influxdata/telegraf/pull/5771): Fix only one job per storage target reported in lustre2 input. +- [#5796](https://github.com/influxdata/telegraf/issues/5796): Set default timeout of 5s in fibaro input. +- [#5835](https://github.com/influxdata/telegraf/issues/5835): Fix docker input does not parse image name correctly. +- [#5661](https://github.com/influxdata/telegraf/issues/5661): Fix direct exchange routing key in amqp output. +- [#5819](https://github.com/influxdata/telegraf/issues/5819): Fix scale set resource id with azure_monitor output. +- [#5883](https://github.com/influxdata/telegraf/issues/5883): Skip invalid power times in apex_neptune input. +- [#3485](https://github.com/influxdata/telegraf/issues/3485): Fix sqlserver connection closing on error. +- [#5917](https://github.com/influxdata/telegraf/issues/5917): Fix toml option name in nginx_upstream_check. +- [#5920](https://github.com/influxdata/telegraf/issues/5920): Fixed datastore name mapping in vsphere input. +- [#5879](https://github.com/influxdata/telegraf/issues/5879): Fix multiple SIGHUP causes Telegraf to shutdown. +- [#5891](https://github.com/influxdata/telegraf/issues/5891): Fix connection leak in influxdb outputs on reload. +- [#5858](https://github.com/influxdata/telegraf/issues/5858): Fix batch fails when single metric is unserializable. +- [#5536](https://github.com/influxdata/telegraf/issues/5536): Log a warning on write if the metric buffer has overflowed. + +## v1.10.4 [2019-05-14] + +#### Bugfixes + +- [#5764](https://github.com/influxdata/telegraf/pull/5764): Fix race condition in the Wavefront parser. +- [#5783](https://github.com/influxdata/telegraf/pull/5783): Create telegraf user in pre-install rpm scriptlet. +- [#5792](https://github.com/influxdata/telegraf/pull/5792): Don't discard metrics on forbidden error in influxdb_v2 output. +- [#5803](https://github.com/influxdata/telegraf/issues/5803): Fix http output cannot set Host header. +- [#5619](https://github.com/influxdata/telegraf/issues/5619): Fix interval estimation in vsphere input. +- [#5782](https://github.com/influxdata/telegraf/pull/5782): Skip lines with missing refid in ntpq input. +- [#5755](https://github.com/influxdata/telegraf/issues/5755): Add support for hex values to ipmi_sensor input. +- [#5824](https://github.com/influxdata/telegraf/issues/5824): Fix parse of unix timestamp with more than ns precision. +- [#5836](https://github.com/influxdata/telegraf/issues/5836): Restore field name case in interrupts input. + +## v1.10.3 [2019-04-16] + +#### Bugfixes + +- [#5680](https://github.com/influxdata/telegraf/pull/5680): Allow colons in metric names in prometheus_client output. +- [#5716](https://github.com/influxdata/telegraf/pull/5716): Set log directory attributes in rpm spec. + +## v1.10.2 [2019-04-02] + +#### Release Notes + +- String fields no longer have leading and trailing quotation marks removed in + the grok parser. If you are capturing quoted strings you may need to update + the patterns. + +#### Bugfixes + +- [#5612](https://github.com/influxdata/telegraf/pull/5612): Fix deadlock when Telegraf is aligning aggregators. +- [#5523](https://github.com/influxdata/telegraf/issues/5523): Fix missing cluster stats in ceph input. +- [#5566](https://github.com/influxdata/telegraf/pull/5566): Fix reading major and minor block devices identifiers in diskio input. +- [#5607](https://github.com/influxdata/telegraf/pull/5607): Add owned directories to rpm package spec. +- [#4998](https://github.com/influxdata/telegraf/issues/4998): Fix last character removed from string field in grok parser. +- [#5632](https://github.com/influxdata/telegraf/pull/5632): Fix drop tracking of metrics removed with aggregator drop_original. +- [#5540](https://github.com/influxdata/telegraf/pull/5540): Fix open file error handling in file output. +- [#5626](https://github.com/influxdata/telegraf/issues/5626): Fix plugin name in influxdb_v2 output logging. +- [#5621](https://github.com/influxdata/telegraf/issues/5621): Fix basedir check and parent dir extraction in filecount input. +- [#5618](https://github.com/influxdata/telegraf/issues/5618): Listen before leaving start in statsd. +- [#5595](https://github.com/influxdata/telegraf/issues/5595): Fix aggregator window alignment. +- [#5637](https://github.com/influxdata/telegraf/issues/5637): Fix panic during shutdown of multiple aggregators. +- [#5642](https://github.com/influxdata/telegraf/issues/5642): Fix parsing of kube config certificate-authority-data in prometheus input. +- [#5636](https://github.com/influxdata/telegraf/issues/5636): Fix tags applied to wrong metric on parse error. +- [#5522](https://github.com/influxdata/telegraf/issues/5522): Remove tags that would create invalid label names in prometheus output. + +## v1.10.1 [2019-03-19] + +#### Bugfixes + +- [#5448](https://github.com/influxdata/telegraf/issues/5448): Show error when TLS configuration cannot be loaded. +- [#5543](https://github.com/influxdata/telegraf/pull/5543): Add Base64-encoding/decoding for Google Cloud PubSub plugins. +- [#5565](https://github.com/influxdata/telegraf/issues/5565): Fix type compatibility in vsphere plugin with use_int_samples option. +- [#5492](https://github.com/influxdata/telegraf/issues/5492): Fix vsphere input shows failed task in vCenter. +- [#5530](https://github.com/influxdata/telegraf/issues/5530): Fix invalid measurement name and skip column in csv parser. +- [#5589](https://github.com/influxdata/telegraf/issues/5589): Fix system input causing high cpu usage on Raspbian. +- [#5575](https://github.com/influxdata/telegraf/issues/5575): Don't add empty healthcheck tags to consul input. + +## v1.10 [2019-03-05] + +#### New Inputs + +- [cloud_pubsub](/plugins/inputs/cloud_pubsub/README.md) - Contributed by @emilymye +- [cloud_pubsub_push](/plugins/inputs/cloud_pubsub_push/README.md) - Contributed by @influxdata +- [kinesis_consumer](/plugins/inputs/kinesis_consumer/README.md) - Contributed by @influxdata +- [kube_inventory](/plugins/inputs/kube_inventory/README.md) - Contributed by @influxdata +- [neptune_apex](/plugins/inputs/neptune_apex/README.md) - Contributed by @MaxRenaud +- [nginx_upstream_check](/plugins/inputs/nginx_upstream_check/README.md) - Contributed by @dmitryilyin +- [multifile](/plugins/inputs/multifile/README.md) - Contributed by @martin2250 +- [stackdriver](/plugins/inputs/stackdriver/README.md) - Contributed by @WuHan0608 + +#### New Outputs + +- [cloud_pubsub](/plugins/outputs/cloud_pubsub/README.md) - Contributed by @emilymye + +#### New Serializers + +- [nowmetric](/plugins/serializers/nowmetric/README.md) - Contributed by @JefMuller +- [carbon2](/plugins/serializers/carbon2/README.md) - Contributed by @frankreno + +#### Features + +- [#4345](https://github.com/influxdata/telegraf/pull/4345): Allow for force gathering ES cluster stats. +- [#5047](https://github.com/influxdata/telegraf/pull/5047): Add support for unix and unix_ms timestamps to csv parser. +- [#5038](https://github.com/influxdata/telegraf/pull/5038): Add ability to tag metrics with topic in kafka_consumer. +- [#5024](https://github.com/influxdata/telegraf/pull/5024): Add option to store cpu as a tag in interrupts input. +- [#5074](https://github.com/influxdata/telegraf/pull/5074): Add support for sending a request body to http input. +- [#5069](https://github.com/influxdata/telegraf/pull/5069): Add running field to procstat_lookup. +- [#5116](https://github.com/influxdata/telegraf/pull/5116): Include DEVLINKS in available diskio udev properties. +- [#5149](https://github.com/influxdata/telegraf/pull/5149): Add micro and nanosecond unix timestamp support to JSON parser. +- [#5160](https://github.com/influxdata/telegraf/pull/5160): Add support for basic auth to couchdb input. +- [#5161](https://github.com/influxdata/telegraf/pull/5161): Add support in wavefront output for the Wavefront Direct Ingestion API. +- [#5168](https://github.com/influxdata/telegraf/pull/5168): Allow counting float values in valuecounter aggregator. +- [#5177](https://github.com/influxdata/telegraf/pull/5177): Add log send and redo queue fields to sqlserver input. +- [#5113](https://github.com/influxdata/telegraf/pull/5113): Improve scalability of vsphere input. +- [#5210](https://github.com/influxdata/telegraf/pull/5210): Add read and write op per second fields to ceph input. +- [#5214](https://github.com/influxdata/telegraf/pull/5214): Add configurable timeout to varnish input. +- [#5273](https://github.com/influxdata/telegraf/pull/5273): Add flush_total_time_ns and additional wired tiger fields to mongodb input. +- [#5295](https://github.com/influxdata/telegraf/pull/5295): Support passing bearer token directly in k8s input. +- [#5294](https://github.com/influxdata/telegraf/pull/5294): Support passing bearer token directly in prometheus input. +- [#5292](https://github.com/influxdata/telegraf/pull/5292): Add option to report input timestamp in prometheus output. +- [#5234](https://github.com/influxdata/telegraf/pull/5234): Add Linux mipsle packages. +- [#5382](https://github.com/influxdata/telegraf/pull/5382): Support unix_us and unix_ns timestamp format in csv parser. +- [#5391](https://github.com/influxdata/telegraf/pull/5391): Add resource type and resource label support to stackdriver output. +- [#5396](https://github.com/influxdata/telegraf/pull/5396): Add internal metric for line too long in influxdb_listener. +- [#4892](https://github.com/influxdata/telegraf/pull/4892): Add option to set retain flag on messages to mqtt output. +- [#5165](https://github.com/influxdata/telegraf/pull/5165): Add resource path based filtering to vsphere input. +- [#5417](https://github.com/influxdata/telegraf/pull/5417): Add rcode tag and field to dns_query input. +- [#5453](https://github.com/influxdata/telegraf/pull/5453): Support Azure Sovereign Environments with endpoint_url option. +- [#5472](https://github.com/influxdata/telegraf/pull/5472): Support configuring a default timezone in JSON parser. +- [#5482](https://github.com/influxdata/telegraf/pull/5482): Add ceph_health metrics to ceph input. +- [#5488](https://github.com/influxdata/telegraf/pull/5488): Add option to disable unique timestamp adjustment in grok parser. +- [#5473](https://github.com/influxdata/telegraf/pull/5473): Add mutual TLS support to prometheus_client output. +- [#4308](https://github.com/influxdata/telegraf/pull/4308): Add additional metrics to rabbitmq input. +- [#5388](https://github.com/influxdata/telegraf/pull/5388): Add multicast support to socket_listener input. +- [#5490](https://github.com/influxdata/telegraf/pull/5490): Add tag based routing in influxdb/influxdb_v2 outputs. +- [#5533](https://github.com/influxdata/telegraf/pull/5533): Allow grok parser to produce metrics with no fields. + +#### Bugfixes + +- [#4610](https://github.com/influxdata/telegraf/pull/4610): Fix initscript removes pidfile of restarted Telegraf process. +- [#5320](https://github.com/influxdata/telegraf/pull/5320): Use datacenter option spelling in consul input. +- [#5316](https://github.com/influxdata/telegraf/pull/5316): Remove auth from /ping route in influxdb_listener. +- [#5304](https://github.com/influxdata/telegraf/issues/5304): Fix x509_cert input stops checking certs after first error. +- [#5404](https://github.com/influxdata/telegraf/issues/5404): Group stackdriver requests to send one point per timeseries. +- [#5449](https://github.com/influxdata/telegraf/issues/5449): Log permission error and ignore in filecount input. +- [#5497](https://github.com/influxdata/telegraf/pull/5497): Create log file in append mode. +- [#5325](https://github.com/influxdata/telegraf/issues/5325): Ignore tracking for metrics added to aggregator. +- [#5514](https://github.com/influxdata/telegraf/issues/5514): Fix panic when rejecting empty batch. +- [#5518](https://github.com/influxdata/telegraf/pull/5518): Fix conversion from string float to integer. +- [#5431](https://github.com/influxdata/telegraf/pull/5431): Sort metrics by timestamp in prometheus output. + +## v1.9.5 [2019-02-26] + +#### Bugfixes + +- [#5315](https://github.com/influxdata/telegraf/issues/5315): Skip string fields when writing to stackdriver output. +- [#5364](https://github.com/influxdata/telegraf/issues/5364): Send metrics in ascending time order in stackdriver output. +- [#5117](https://github.com/influxdata/telegraf/issues/5117): Use systemd in Amazon Linux 2 rpm. +- [#4988](https://github.com/influxdata/telegraf/issues/4988): Set deadlock priority in sqlserver input. +- [#5403](https://github.com/influxdata/telegraf/issues/5403): Remove error log when snmp6 directory does not exists with nstat input. +- [#5437](https://github.com/influxdata/telegraf/issues/5437): Host not added when using custom arguments in ping plugin. +- [#5438](https://github.com/influxdata/telegraf/issues/5438): Fix InfluxDB output UDP line splitting. +- [#5456](https://github.com/influxdata/telegraf/issues/5456): Disable results by row in azuredb query. +- [#5277](https://github.com/influxdata/telegraf/issues/5277): Add backwards compatibility fields in ceph usage and pool stats. + +## v1.9.4 [2019-02-05] + +#### Bugfixes + +- [#5334](https://github.com/influxdata/telegraf/issues/5334): Fix skip_rows and skip_columns options in csv parser. +- [#5181](https://github.com/influxdata/telegraf/issues/5181): Always send basic auth in jenkins input. +- [#5346](https://github.com/influxdata/telegraf/pull/5346): Build official packages with Go 1.11.5. +- [#5368](https://github.com/influxdata/telegraf/issues/5368): Fix definition of multiple syslog plugins. + +## v1.9.3 [2019-01-22] + +#### Bugfixes + +- [#5261](https://github.com/influxdata/telegraf/pull/5261): Fix arithmetic overflow in sqlserver input. +- [#5194](https://github.com/influxdata/telegraf/issues/5194): Fix latest metrics not sent first when output fails. +- [#5285](https://github.com/influxdata/telegraf/issues/5285): Fix amqp_consumer stops consuming when it receives unparseable messages. +- [#5281](https://github.com/influxdata/telegraf/issues/5281): Fix prometheus input not detecting added and removed pods. +- [#5215](https://github.com/influxdata/telegraf/issues/5215): Remove userinfo from cluster tag in couchbase. +- [#5298](https://github.com/influxdata/telegraf/issues/5298): Fix internal_write buffer_size not reset on timed writes. + +## v1.9.2 [2019-01-08] + +#### Bugfixes + +- [#5130](https://github.com/influxdata/telegraf/pull/5130): Increase varnishstat timeout. +- [#5135](https://github.com/influxdata/telegraf/pull/5135): Remove storage calculation for non Azure managed instances and add server version. +- [#5083](https://github.com/influxdata/telegraf/pull/5083): Fix error sending empty tag value in azure_monitor output. +- [#5143](https://github.com/influxdata/telegraf/issues/5143): Fix panic with prometheus input plugin on shutdown. +- [#4482](https://github.com/influxdata/telegraf/issues/4482): Support non-transparent framing of syslog messages. +- [#5151](https://github.com/influxdata/telegraf/issues/5151): Apply global and plugin level metric modifications before filtering. +- [#5167](https://github.com/influxdata/telegraf/pull/5167): Fix num_remapped_pgs field in ceph plugin. +- [#5179](https://github.com/influxdata/telegraf/issues/5179): Add PDH_NO_DATA to known counter error codes in win_perf_counters. +- [#5170](https://github.com/influxdata/telegraf/issues/5170): Fix amqp_consumer stops consuming on empty message. +- [#4906](https://github.com/influxdata/telegraf/issues/4906): Fix multiple replace tables not working in strings processor. +- [#5219](https://github.com/influxdata/telegraf/issues/5219): Allow non local udp connections in net_response. +- [#5218](https://github.com/influxdata/telegraf/issues/5218): Fix toml option names in parser processor. +- [#5225](https://github.com/influxdata/telegraf/issues/5225): Fix panic in docker input with bad endpoint. +- [#5209](https://github.com/influxdata/telegraf/issues/5209): Fix original metric modified by aggregator filters. + +## v1.9.1 [2018-12-11] + +#### Bugfixes + +- [#5006](https://github.com/influxdata/telegraf/issues/5006): Fix boolean handling in splunkmetric serializer. +- [#5046](https://github.com/influxdata/telegraf/issues/5046): Set default config values in jenkins input. +- [#4664](https://github.com/influxdata/telegraf/issues/4664): Fix server connection and document stats in mongodb input. +- [#5010](https://github.com/influxdata/telegraf/issues/5010): Add X-Requested-By header to graylog input. +- [#5052](https://github.com/influxdata/telegraf/issues/5052): Fix metric memory not freed from the metric buffer on write. +- [#3817](https://github.com/influxdata/telegraf/issues/3817): Add support for client tls certificates in postgresql inputs. +- [#5082](https://github.com/influxdata/telegraf/issues/5082): Prevent panic when marking the offset in kafka_consumer. +- [#5084](https://github.com/influxdata/telegraf/issues/5084): Add early metrics to aggregator and honor drop_original setting. +- [#5112](https://github.com/influxdata/telegraf/pull/5112): Use -W flag on bsd variants in ping input. +- [#5114](https://github.com/influxdata/telegraf/issues/5114): Allow delta metrics in wavefront parser. + +## v1.9 [2018-11-20] + +#### Release Notes + +- The `http_listener` input plugin has been renamed to `influxdb_listener` and + use of the original name is deprecated. The new name better describes the + intended use of the plugin as a InfluxDB relay. For general purpose + transfer of metrics in any format via HTTP, it is recommended to use + `http_listener_v2` instead. + +- Input plugins are no longer limited from adding metrics when the output is + writing, and new metrics will move into the metric buffer as needed. This + will provide more robust degradation and recovery when writing to a slow + output at high throughput. + + To avoid over consumption when reading from queue consumers: `kafka_consumer`, + `amqp_consumer`, `mqtt_consumer`, `nats_consumer`, and `nsq_consumer` use + the new option `max_undelivered_messages` to limit the number of outstanding + unwritten metrics. + +#### New Inputs + +- [http_listener_v2](/plugins/inputs/http_listener_v2/README.md) - Contributed by @jul1u5 +- [ipvs](/plugins/inputs/ipvs/README.md) - Contributed by @amoghe +- [jenkins](/plugins/inputs/jenkins/README.md) - Contributed by @influxdata & @lpic10 +- [nginx_plus_api](/plugins/inputs/nginx_plus_api/README.md) - Contributed by @Bugagazavr +- [nginx_vts](/plugins/inputs/nginx_vts/README.md) - Contributed by @monder +- [wireless](/plugins/inputs/wireless/README.md) - Contributed by @jamesmaidment + +#### New Outputs + +- [stackdriver](/plugins/outputs/stackdriver/README.md) - Contributed by @jamesmaidment + +#### Features + +- [#4686](https://github.com/influxdata/telegraf/pull/4686): Add replace function to strings processor. +- [#4754](https://github.com/influxdata/telegraf/pull/4754): Query servers in parallel in dns_query input. +- [#4753](https://github.com/influxdata/telegraf/pull/4753): Add ability to define a custom service name when installing as a Windows service. +- [#4703](https://github.com/influxdata/telegraf/pull/4703): Add support for IPv6 in the ping plugin. +- [#4781](https://github.com/influxdata/telegraf/pull/4781): Add new config for csv column explicit type conversion. +- [#4800](https://github.com/influxdata/telegraf/pull/4800): Add an option to specify a custom datadog URL. +- [#4803](https://github.com/influxdata/telegraf/pull/4803): Use non-allocating field and tag accessors in datadog output. +- [#4752](https://github.com/influxdata/telegraf/pull/4752): Add per-directory file counts in the filecount input. +- [#4811](https://github.com/influxdata/telegraf/pull/4811): Add windows service name lookup to procstat input. +- [#4807](https://github.com/influxdata/telegraf/pull/4807): Add entity-body compression to http output. +- [#4838](https://github.com/influxdata/telegraf/pull/4838): Add telegraf version to User-Agent header. +- [#4864](https://github.com/influxdata/telegraf/pull/4864): Use DescribeStreamSummary in place of ListStreams in kinesis output. +- [#4852](https://github.com/influxdata/telegraf/pull/4852): Add ability to specify bytes options as strings with units. +- [#3903](https://github.com/influxdata/telegraf/pull/3903): Add support for TLS configuration in NSQ input. +- [#4914](https://github.com/influxdata/telegraf/pull/4914): Collect additional stats in memcached input. +- [#3847](https://github.com/influxdata/telegraf/pull/3847): Add wireless input plugin. +- [#4934](https://github.com/influxdata/telegraf/pull/4934): Add LUN to datasource translation in vsphere input. +- [#4798](https://github.com/influxdata/telegraf/pull/4798): Allow connecting to prometheus via unix socket. +- [#4920](https://github.com/influxdata/telegraf/pull/4920): Add scraping for Prometheus endpoint in Kubernetes. +- [#4938](https://github.com/influxdata/telegraf/pull/4938): Add per output flush_interval, metric_buffer_limit and metric_batch_size. + +#### Bugfixes + +- [#4950](https://github.com/influxdata/telegraf/pull/4950): Remove the time_key from the field values in JSON parser. +- [#3968](https://github.com/influxdata/telegraf/issues/3968): Fix input time rounding when using a custom interval. +- [#4938](https://github.com/influxdata/telegraf/pull/4938): Fix potential deadlock or leaked resources on restart/reload. +- [#2919](https://github.com/influxdata/telegraf/pull/2919): Fix outputs block inputs when batch size is reached. +- [#4789](https://github.com/influxdata/telegraf/issues/4789): Fix potential missing datastore metrics in vSphere plugin. +- [#4982](https://github.com/influxdata/telegraf/issues/4982): Log warning when wireless plugin is used on unsupported platform. +- [#4965](https://github.com/influxdata/telegraf/issues/4965): Handle non-tls columns for mysql input. +- [#4983](https://github.com/influxdata/telegraf/issues/4983): Fix panic in influxdb_listener when using gzip encoding. + +## v1.8.3 [2018-10-30] + +### Bugfixes + +- [#4873](https://github.com/influxdata/telegraf/pull/4873): Add DN attributes as tags in x509_cert input to avoid series overwrite. +- [#4921](https://github.com/influxdata/telegraf/issues/4921): Prevent connection leak by closing unused connections in amqp output. +- [#4904](https://github.com/influxdata/telegraf/issues/4904): Use default partition key when tag does not exist in kinesis output. +- [#4901](https://github.com/influxdata/telegraf/pull/4901): Log the correct error in jti_openconfig. +- [#4937](https://github.com/influxdata/telegraf/pull/4937): Handle panic when ipmi_sensor input gets bad input. +- [#4930](https://github.com/influxdata/telegraf/pull/4930): Don't add unserializable fields to jolokia2 input. +- [#4866](https://github.com/influxdata/telegraf/pull/4866): Fix version check in postgresql_extensible. + +## v1.8.2 [2018-10-17] + +### Bugfixes + +- [#4844](https://github.com/influxdata/telegraf/pull/4844): Update write path to match updated InfluxDB v2 API. +- [#4840](https://github.com/influxdata/telegraf/pull/4840): Fix missing timeouts in vsphere input. +- [#4851](https://github.com/influxdata/telegraf/pull/4851): Support uint fields in aerospike input. +- [#4854](https://github.com/influxdata/telegraf/pull/4854): Use container name from list if no name in container stats. +- [#4850](https://github.com/influxdata/telegraf/pull/4850): Prevent panic in filecount input on error in file stat. +- [#4846](https://github.com/influxdata/telegraf/pull/4846): Fix mqtt_consumer connect and reconnect. +- [#4849](https://github.com/influxdata/telegraf/pull/4849): Fix panic in logparser input. +- [#4869](https://github.com/influxdata/telegraf/pull/4869): Lower authorization errors to debug level in mongodb input. +- [#4875](https://github.com/influxdata/telegraf/pull/4875): Return correct response code on ping input. +- [#4874](https://github.com/influxdata/telegraf/pull/4874): Fix segfault in x509_cert input. + +## v1.8.1 [2018-10-03] + +### Bugfixes + +- [#4750](https://github.com/influxdata/telegraf/pull/4750): Fix hardware_type may be truncated in sqlserver input. +- [#4723](https://github.com/influxdata/telegraf/issues/4723): Improve performance in basicstats aggregator. +- [#4747](https://github.com/influxdata/telegraf/pull/4747): Add hostname to TLS config for SNI support. +- [#4675](https://github.com/influxdata/telegraf/issues/4675): Don't add tags with empty values to opentsdb output. +- [#4765](https://github.com/influxdata/telegraf/pull/4765): Fix panic during network error in vsphere input. +- [#4766](https://github.com/influxdata/telegraf/pull/4766): Unify http_listener error response with InfluxDB. +- [#4769](https://github.com/influxdata/telegraf/pull/4769): Add UUID to VMs in vSphere input. +- [#4758](https://github.com/influxdata/telegraf/issues/4758): Skip tags with empty values in cloudwatch output. +- [#4783](https://github.com/influxdata/telegraf/issues/4783): Fix missing non-realtime samples in vSphere input. +- [#4799](https://github.com/influxdata/telegraf/pull/4799): Fix case of timezone/grok_timezone options. + +## v1.8 [2018-09-21] ### New Inputs +- [activemq](./plugins/inputs/activemq/README.md) - Contributed by @mlabouardy +- [beanstalkd](./plugins/inputs/beanstalkd/README.md) - Contributed by @44px +- [filecount](./plugins/inputs/filecount/README.md) - Contributed by @sometimesfood +- [file](./plugins/inputs/file/README.md) - Contributed by @maxunt +- [icinga2](./plugins/inputs/icinga2/README.md) - Contributed by @mlabouardy +- [kibana](./plugins/inputs/kibana/README.md) - Contributed by @lpic10 +- [pgbouncer](./plugins/inputs/pgbouncer/README.md) - Contributed by @nerzhul +- [temp](./plugins/inputs/temp/README.md) - Contributed by @pytimer - [tengine](./plugins/inputs/tengine/README.md) - Contributed by @ertaoxu +- [vsphere](./plugins/inputs/vsphere/README.md) - Contributed by @prydin +- [x509_cert](./plugins/inputs/x509_cert/README.md) - Contributed by @jtyr ### New Processors - [enum](./plugins/processors/enum/README.md) - Contributed by @KarstenSchnitter +- [parser](./plugins/processors/parser/README.md) - Contributed by @Ayrdrie & @maxunt +- [rename](./plugins/processors/rename/README.md) - Contributed by @goldibex +- [strings](./plugins/processors/strings/README.md) - Contributed by @bsmaldon ### New Aggregators - [valuecounter](./plugins/aggregators/valuecounter/README.md) - Contributed by @piotr1212 +### New Outputs + +- [azure_monitor](./plugins/outputs/azure_monitor/README.md) - Contributed by @influxdata +- [influxdb_v2](./plugins/outputs/influxdb_v2/README.md) - Contributed by @influxdata + +### New Parsers + +- [csv](/plugins/parsers/csv/README.md) - Contributed by @maxunt +- [grok](/plugins/parsers/grok/README.md) - Contributed by @maxunt +- [logfmt](/plugins/parsers/logfmt/README.md) - Contributed by @Ayrdrie & @maxunt +- [wavefront](/plugins/parsers/wavefront/README.md) - Contributed by @puckpuck + +### New Serializers + +- [splunkmetric](/plugins/serializers/splunkmetric/README.md) - Contributed by @ronnocol + ### Features - [#4236](https://github.com/influxdata/telegraf/pull/4236): Add SSL/TLS support to redis input. @@ -32,15 +1026,107 @@ - [#4347](https://github.com/influxdata/telegraf/pull/4347): Add http path configuration for OpenTSDB output. - [#4352](https://github.com/influxdata/telegraf/pull/4352): Gather IPMI metrics concurrently. - [#4362](https://github.com/influxdata/telegraf/pull/4362): Add mongo document and connection metrics. -- [#3772](https://github.com/influxdata/telegraf/pull/3772): Add Enum Processor. +- [#3772](https://github.com/influxdata/telegraf/pull/3772): Add enum processor plugin. - [#4386](https://github.com/influxdata/telegraf/pull/4386): Add user tag to procstat input. +- [#4403](https://github.com/influxdata/telegraf/pull/4403): Add support for multivalue metrics to collectd parser. +- [#4418](https://github.com/influxdata/telegraf/pull/4418): Add support for setting kafka client id. +- [#4332](https://github.com/influxdata/telegraf/pull/4332): Add file input plugin and grok parser. +- [#4320](https://github.com/influxdata/telegraf/pull/4320): Improve cloudwatch output performance. +- [#3768](https://github.com/influxdata/telegraf/pull/3768): Add x509_cert input plugin. +- [#4471](https://github.com/influxdata/telegraf/pull/4471): Add IPSIpAddress syntax to ipaddr conversion in snmp plugin. +- [#4363](https://github.com/influxdata/telegraf/pull/4363): Add filecount input plugin. +- [#4485](https://github.com/influxdata/telegraf/pull/4485): Add support for configuring an AWS endpoint_url. +- [#4491](https://github.com/influxdata/telegraf/pull/4491): Send all messages before waiting for results in kafka output. +- [#4492](https://github.com/influxdata/telegraf/pull/4492): Add support for lz4 compression to kafka output. +- [#4450](https://github.com/influxdata/telegraf/pull/4450): Split multiple sensor keys in ipmi input. +- [#4364](https://github.com/influxdata/telegraf/pull/4364): Support StatisticValues in cloudwatch output plugin. +- [#4431](https://github.com/influxdata/telegraf/pull/4431): Add ip restriction for the prometheus_client output. +- [#3918](https://github.com/influxdata/telegraf/pull/3918): Add pgbouncer input plugin. +- [#2689](https://github.com/influxdata/telegraf/pull/2689): Add ActiveMQ input plugin. +- [#4402](https://github.com/influxdata/telegraf/pull/4402): Add wavefront parser plugin. +- [#4528](https://github.com/influxdata/telegraf/pull/4528): Add rename processor plugin. +- [#4537](https://github.com/influxdata/telegraf/pull/4537): Add message 'max_bytes' configuration to kafka input. +- [#4546](https://github.com/influxdata/telegraf/pull/4546): Add gopsutil meminfo fields to mem plugin. +- [#4285](https://github.com/influxdata/telegraf/pull/4285): Document how to parse telegraf logs. +- [#4542](https://github.com/influxdata/telegraf/pull/4542): Use dep v0.5.0. +- [#4433](https://github.com/influxdata/telegraf/pull/4433): Add ability to set measurement from matched text in grok parser. +- [#4565](https://github.com/influxdata/telegraf/pull/4465): Drop message batches in kafka output if too large. +- [#4579](https://github.com/influxdata/telegraf/pull/4579): Add support for static and random routing keys in kafka output. +- [#4539](https://github.com/influxdata/telegraf/pull/4539): Add logfmt parser plugin. +- [#4551](https://github.com/influxdata/telegraf/pull/4551): Add parser processor plugin. +- [#4559](https://github.com/influxdata/telegraf/pull/4559): Add Icinga2 input plugin. +- [#4351](https://github.com/influxdata/telegraf/pull/4351): Add name, time, path and string field options to JSON parser. +- [#4571](https://github.com/influxdata/telegraf/pull/4571): Add forwarded records to sqlserver input. +- [#4585](https://github.com/influxdata/telegraf/pull/4585): Add Kibana input plugin. +- [#4439](https://github.com/influxdata/telegraf/pull/4439): Add csv parser plugin. +- [#4598](https://github.com/influxdata/telegraf/pull/4598): Add read_buffer_size option to statsd input. +- [#4089](https://github.com/influxdata/telegraf/pull/4089): Add azure_monitor output plugin. +- [#4628](https://github.com/influxdata/telegraf/pull/4628): Add queue_durability parameter to amqp_consumer input. +- [#4476](https://github.com/influxdata/telegraf/pull/4476): Add strings processor. +- [#4536](https://github.com/influxdata/telegraf/pull/4536): Add OAuth2 support to HTTP output plugin. +- [#4633](https://github.com/influxdata/telegraf/pull/4633): Add Unix epoch timestamp support for JSON parser. +- [#4657](https://github.com/influxdata/telegraf/pull/4657): Add options for basic auth to haproxy input. +- [#4411](https://github.com/influxdata/telegraf/pull/4411): Add temp input plugin. +- [#4272](https://github.com/influxdata/telegraf/pull/4272): Add Beanstalkd input plugin. +- [#4669](https://github.com/influxdata/telegraf/pull/4669): Add means to specify server password for redis input. +- [#4339](https://github.com/influxdata/telegraf/pull/4339): Add Splunk Metrics serializer. +- [#4141](https://github.com/influxdata/telegraf/pull/4141): Add input plugin for VMware vSphere. +- [#4667](https://github.com/influxdata/telegraf/pull/4667): Align metrics window to interval in cloudwatch input. +- [#4642](https://github.com/influxdata/telegraf/pull/4642): Improve Azure Managed Instance support + more in sqlserver input. +- [#4682](https://github.com/influxdata/telegraf/pull/4682): Allow alternate binaries for iptables input plugin. +- [#4645](https://github.com/influxdata/telegraf/pull/4645): Add influxdb_v2 output plugin. -## v1.7.2 [unreleased] +### Bugfixes + +- [#3438](https://github.com/influxdata/telegraf/issues/3438): Fix divide by zero in logparser input. +- [#4499](https://github.com/influxdata/telegraf/issues/4499): Fix instance and object name in performance counters with backslashes. +- [#4646](https://github.com/influxdata/telegraf/issues/4646): Reset/flush saved contents from bad metric. +- [#4520](https://github.com/influxdata/telegraf/issues/4520): Document all supported cli arguments. +- [#4674](https://github.com/influxdata/telegraf/pull/4674): Log access denied opening a service at debug level in win_services. +- [#4588](https://github.com/influxdata/telegraf/issues/4588): Add support for Kafka 2.0. +- [#4087](https://github.com/influxdata/telegraf/issues/4087): Fix nagios parser does not support ranges in performance data. +- [#4088](https://github.com/influxdata/telegraf/issues/4088): Fix nagios parser does not strip quotes from performance data. +- [#4688](https://github.com/influxdata/telegraf/issues/4688): Fix null value crash in postgresql_extensible input. +- [#4681](https://github.com/influxdata/telegraf/pull/4681): Remove the startup authentication check from the cloudwatch output. +- [#4644](https://github.com/influxdata/telegraf/issues/4644): Support tailing files created after startup in tail input. +- [#4706](https://github.com/influxdata/telegraf/issues/4706): Fix csv format configuration loading. + +## v1.7.4 [2018-08-29] + +### Bugfixes + +- [#4534](https://github.com/influxdata/telegraf/pull/4534): Skip unserializable metric in influxDB UDP output. +- [#4554](https://github.com/influxdata/telegraf/pull/4554): Fix powerdns input tests. +- [#4584](https://github.com/influxdata/telegraf/pull/4584): Fix burrow_group offset calculation for burrow input. +- [#4550](https://github.com/influxdata/telegraf/pull/4550): Add result_code value for errors running ping command. +- [#4605](https://github.com/influxdata/telegraf/pull/4605): Remove timeout deadline for udp syslog input. +- [#4601](https://github.com/influxdata/telegraf/issues/4601): Ensure channel closed if an error occurs in cgroup input. +- [#4544](https://github.com/influxdata/telegraf/issues/4544): Fix sending of basic auth credentials in http output. +- [#4526](https://github.com/influxdata/telegraf/issues/4526): Use the correct GOARM value in the armel package. + +## v1.7.3 [2018-08-07] + +### Bugfixes + +- [#4434](https://github.com/influxdata/telegraf/issues/4434): Reduce required docker API version. +- [#4498](https://github.com/influxdata/telegraf/pull/4498): Keep leading whitespace for messages in syslog input. +- [#4470](https://github.com/influxdata/telegraf/issues/4470): Skip bad entries on interrupt input. +- [#4501](https://github.com/influxdata/telegraf/issues/4501): Preserve metric type when using filters in output plugins. +- [#3794](https://github.com/influxdata/telegraf/issues/3794): Fix error message if URL is unparseable in influxdb output. +- [#4059](https://github.com/influxdata/telegraf/issues/4059): Use explicit zpool properties to fix parse error on FreeBSD 11.2. +- [#4514](https://github.com/influxdata/telegraf/pull/4514): Lock buffer when adding metrics. + +## v1.7.2 [2018-07-18] ### Bugfixes - [#4381](https://github.com/influxdata/telegraf/issues/4381): Use localhost as default server tag in zookeeper input. - [#4374](https://github.com/influxdata/telegraf/issues/4374): Don't set values when pattern doesn't match in regex processor. +- [#4416](https://github.com/influxdata/telegraf/issues/4416): Fix output format of printer processor. +- [#4422](https://github.com/influxdata/telegraf/issues/4422): Fix metric can have duplicate field. +- [#4389](https://github.com/influxdata/telegraf/issues/4389): Return error if NewRequest fails in http output. +- [#4335](https://github.com/influxdata/telegraf/issues/4335): Reset read deadline for syslog input. +- [#4375](https://github.com/influxdata/telegraf/issues/4375): Exclude cached memory on docker input plugin. ## v1.7.1 [2018-07-03] @@ -182,7 +1268,7 @@ ### Release Notes -- The `mysql` input plugin has been updated fix a number of type convertion +- The `mysql` input plugin has been updated fix a number of type conversion issues. This may cause a `field type error` when inserting into InfluxDB due the change of types. @@ -584,7 +1670,7 @@ - [#3058](https://github.com/influxdata/telegraf/issues/3058): Allow iptable entries with trailing text. - [#1680](https://github.com/influxdata/telegraf/issues/1680): Sanitize password from couchbase metric. - [#3104](https://github.com/influxdata/telegraf/issues/3104): Converge to typed value in prometheus output. -- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilcation of logparser and tail on solaris. +- [#2899](https://github.com/influxdata/telegraf/issues/2899): Skip compilation of logparser and tail on solaris. - [#2951](https://github.com/influxdata/telegraf/issues/2951): Discard logging from tail library. - [#3126](https://github.com/influxdata/telegraf/pull/3126): Remove log message on ping timeout. - [#3144](https://github.com/influxdata/telegraf/issues/3144): Don't retry points beyond retention policy. @@ -1031,7 +2117,7 @@ consistent with the behavior of `collection_jitter`. - [#1390](https://github.com/influxdata/telegraf/pull/1390): Add support for Tengine - [#1320](https://github.com/influxdata/telegraf/pull/1320): Logparser input plugin for parsing grok-style log patterns. - [#1397](https://github.com/influxdata/telegraf/issues/1397): ElasticSearch: now supports connecting to ElasticSearch via SSL -- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input pluging. +- [#1262](https://github.com/influxdata/telegraf/pull/1261): Add graylog input plugin. - [#1294](https://github.com/influxdata/telegraf/pull/1294): consul input plugin. Thanks @harnash - [#1164](https://github.com/influxdata/telegraf/pull/1164): conntrack input plugin. Thanks @robinpercy! - [#1165](https://github.com/influxdata/telegraf/pull/1165): vmstat input plugin. Thanks @jshim-xm! @@ -1210,7 +2296,7 @@ It is not included on the report path. This is necessary for reporting host disk - [#1041](https://github.com/influxdata/telegraf/issues/1041): Add `n_cpus` field to the system plugin. - [#1072](https://github.com/influxdata/telegraf/pull/1072): New Input Plugin: filestat. - [#1066](https://github.com/influxdata/telegraf/pull/1066): Replication lag metrics for MongoDB input plugin -- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengleman! +- [#1086](https://github.com/influxdata/telegraf/pull/1086): Ability to specify AWS keys in config file. Thanks @johnrengelman! - [#1096](https://github.com/influxdata/telegraf/pull/1096): Performance refactor of running output buffers. - [#967](https://github.com/influxdata/telegraf/issues/967): Buffer logging improvements. - [#1107](https://github.com/influxdata/telegraf/issues/1107): Support lustre2 job stats. Thanks @hanleyja! @@ -1298,7 +2384,7 @@ because the `value` field is redundant in the graphite/librato context. - [#656](https://github.com/influxdata/telegraf/issues/656): No longer run `lsof` on linux to get netstat data, fixes permissions issue. - [#907](https://github.com/influxdata/telegraf/issues/907): Fix prometheus invalid label/measurement name key. - [#841](https://github.com/influxdata/telegraf/issues/841): Fix memcached unix socket panic. -- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titiliambert! +- [#873](https://github.com/influxdata/telegraf/issues/873): Fix SNMP plugin sometimes not returning metrics. Thanks @titilambert! - [#934](https://github.com/influxdata/telegraf/pull/934): phpfpm: Fix fcgi uri path. Thanks @rudenkovk! - [#805](https://github.com/influxdata/telegraf/issues/805): Kafka consumer stops gathering after i/o timeout. - [#959](https://github.com/influxdata/telegraf/pull/959): reduce mongodb & prometheus collection timeouts. Thanks @PierreF! @@ -1309,7 +2395,7 @@ because the `value` field is redundant in the graphite/librato context. - Primarily this release was cut to fix [#859](https://github.com/influxdata/telegraf/issues/859) ### Features -- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @pierref! +- [#747](https://github.com/influxdata/telegraf/pull/747): Start telegraf on install & remove on uninstall. Thanks @PierreF! - [#794](https://github.com/influxdata/telegraf/pull/794): Add service reload ability. Thanks @entertainyou! ### Bugfixes @@ -1797,7 +2883,7 @@ and filtering when specifying a config file. - [#98](https://github.com/influxdata/telegraf/pull/98): LeoFS plugin. Thanks @mocchira! - [#103](https://github.com/influxdata/telegraf/pull/103): Filter by metric tags. Thanks @srfraser! - [#106](https://github.com/influxdata/telegraf/pull/106): Options to filter plugins on startup. Thanks @zepouet! -- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyong influxdb. Thanks @jipperinbham! +- [#107](https://github.com/influxdata/telegraf/pull/107): Multiple outputs beyond influxdb. Thanks @jipperinbham! - [#108](https://github.com/influxdata/telegraf/issues/108): Support setting per-CPU and total-CPU gathering. - [#111](https://github.com/influxdata/telegraf/pull/111): Report CPU Usage in cpu plugin. Thanks @jpalay! diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9a89e3cbf..d68d726dc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,491 +1,62 @@ -## Steps for Contributing: +### Contributing -1. [Sign the CLA](http://influxdb.com/community/cla.html) -1. Make changes or write plugin (see below for details) -1. Add your plugin to one of: `plugins/{inputs,outputs,aggregators,processors}/all/all.go` -1. If your plugin requires a new Go package, -[add it](https://github.com/influxdata/telegraf/blob/master/CONTRIBUTING.md#adding-a-dependency) -1. Write a README for your plugin, if it's an input plugin, it should be structured -like the [input example here](https://github.com/influxdata/telegraf/blob/master/plugins/inputs/EXAMPLE_README.md). -Output plugins READMEs are less structured, -but any information you can provide on how the data will look is appreciated. -See the [OpenTSDB output](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/opentsdb) -for a good example. -1. **Optional:** Help users of your plugin by including example queries for populating dashboards. Include these sample queries in the `README.md` for the plugin. -1. **Optional:** Write a [tickscript](https://docs.influxdata.com/kapacitor/v1.0/tick/syntax/) for your plugin and add it to [Kapacitor](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf). +1. [Sign the CLA][cla]. +1. Open a [new issue][] to discuss the changes you would like to make. This is + not strictly required but it may help reduce the amount of rework you need + to do later. +1. Make changes or write plugin using the guidelines in the following + documents: + - [Input Plugins][inputs] + - [Processor Plugins][processors] + - [Aggregator Plugins][aggregators] + - [Output Plugins][outputs] +1. Ensure you have added proper unit tests and documentation. +1. Open a new [pull request][]. -## GoDoc +#### Contributing an External Plugin *(experimental)* +Input plugins written for internal Telegraf can be run as externally-compiled plugins through the [Execd Input Plugin](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd) without having to change the plugin code. + +Follow the guidelines of how to integrate your plugin with the [Execd Go Shim](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) to easily compile it as a separate app and run it from the inputs.execd plugin. + +#### Security Vulnerability Reporting +InfluxData takes security and our users' trust very seriously. If you believe you have found a security issue in any of our +open source projects, please responsibly disclose it by contacting security@influxdata.com. More details about +security vulnerability reporting, +including our GPG key, [can be found here](https://www.influxdata.com/how-to-report-security-vulnerabilities/). + +### GoDoc Public interfaces for inputs, outputs, processors, aggregators, metrics, -and the accumulator can be found on the GoDoc +and the accumulator can be found in the GoDoc: [![GoDoc](https://godoc.org/github.com/influxdata/telegraf?status.svg)](https://godoc.org/github.com/influxdata/telegraf) -## Sign the CLA +### Common development tasks -Before we can merge a pull request, you will need to sign the CLA, -which can be found [on our website](http://influxdb.com/community/cla.html) +**Adding a dependency:** -## Adding a dependency +Telegraf uses Go modules. Assuming you can already build the project, run this in the telegraf directory: -Assuming you can already build the project, run these in the telegraf directory: +1. `go get github.com/[dependency]/[new-package]` -1. `go get -u github.com/golang/dep/cmd/dep` -2. `dep ensure` -3. `dep ensure -add github.com/[dependency]/[new-package]` - -## Input Plugins - -This section is for developers who want to create new collection inputs. -Telegraf is entirely plugin driven. This interface allows for operators to -pick and chose what is gathered and makes it easy for developers -to create new ways of generating metrics. - -Plugin authorship is kept as simple as possible to promote people to develop -and submit new inputs. - -### Input Plugin Guidelines - -* A plugin must conform to the [`telegraf.Input`](https://godoc.org/github.com/influxdata/telegraf#Input) interface. -* Input Plugins should call `inputs.Add` in their `init` function to register themselves. -See below for a quick example. -* Input Plugins must be added to the -`github.com/influxdata/telegraf/plugins/inputs/all/all.go` file. -* The `SampleConfig` function should return valid toml that describes how the -plugin can be configured. This is include in `telegraf config`. -* The `Description` function should say in one line what this plugin does. - -Let's say you've written a plugin that emits metrics about processes on the -current host. - -### Input Plugin Example - -```go -package simple - -// simple.go - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" -) - -type Simple struct { - Ok bool -} - -func (s *Simple) Description() string { - return "a demo plugin" -} - -func (s *Simple) SampleConfig() string { - return ` - ## Indicate if everything is fine - ok = true -` -} - -func (s *Simple) Gather(acc telegraf.Accumulator) error { - if s.Ok { - acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil) - } else { - acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil) - } - - return nil -} - -func init() { - inputs.Add("simple", func() telegraf.Input { return &Simple{} }) -} -``` - -### Input Plugin Development - -* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker dev environment -using docker-compose. -* ***[Optional]*** When developing a plugin, add a `dev` directory with a `docker-compose.yml` and `telegraf.conf` -as well as any other supporting files, where sensible. - -## Adding Typed Metrics - -In addition the the `AddFields` function, the accumulator also supports an -`AddGauge` and `AddCounter` function. These functions are for adding _typed_ -metrics. Metric types are ignored for the InfluxDB output, but can be used -for other outputs, such as [prometheus](https://prometheus.io/docs/concepts/metric_types/). - -## Input Plugins Accepting Arbitrary Data Formats - -Some input plugins (such as -[exec](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec)) -accept arbitrary input data formats. An overview of these data formats can -be found -[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). - -In order to enable this, you must specify a `SetParser(parser parsers.Parser)` -function on the plugin object (see the exec plugin for an example), as well as -defining `parser` as a field of the object. - -You can then utilize the parser internally in your plugin, parsing data as you -see fit. Telegraf's configuration layer will take care of instantiating and -creating the `Parser` object. - -You should also add the following to your SampleConfig() return: - -```toml - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -``` - -Below is the `Parser` interface. - -```go -// Parser is an interface defining functions that a parser plugin must satisfy. -type Parser interface { - // Parse takes a byte buffer separated by newlines - // ie, `cpu.usage.idle 90\ncpu.usage.busy 10` - // and parses it into telegraf metrics - Parse(buf []byte) ([]telegraf.Metric, error) - - // ParseLine takes a single string metric - // ie, "cpu.usage.idle 90" - // and parses it into a telegraf metric. - ParseLine(line string) (telegraf.Metric, error) -} -``` - -And you can view the code -[here.](https://github.com/influxdata/telegraf/blob/henrypfhu-master/plugins/parsers/registry.go) - -## Service Input Plugins - -This section is for developers who want to create new "service" collection -inputs. A service plugin differs from a regular plugin in that it operates -a background service while Telegraf is running. One example would be the `statsd` -plugin, which operates a statsd server. - -Service Input Plugins are substantially more complicated than a regular plugin, as they -will require threads and locks to verify data integrity. Service Input Plugins should -be avoided unless there is no way to create their behavior with a regular plugin. - -Their interface is quite similar to a regular plugin, with the addition of `Start()` -and `Stop()` methods. - -### Service Plugin Guidelines - -* Same as the `Plugin` guidelines, except that they must conform to the -[`telegraf.ServiceInput`](https://godoc.org/github.com/influxdata/telegraf#ServiceInput) interface. - -## Output Plugins - -This section is for developers who want to create a new output sink. Outputs -are created in a similar manner as collection plugins, and their interface has -similar constructs. - -### Output Plugin Guidelines - -* An output must conform to the [`telegraf.Output`](https://godoc.org/github.com/influxdata/telegraf#Output) interface. -* Outputs should call `outputs.Add` in their `init` function to register themselves. -See below for a quick example. -* To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdata/telegraf/plugins/outputs/all/all.go` file. -* The `SampleConfig` function should return valid toml that describes how the -output can be configured. This is include in `telegraf config`. -* The `Description` function should say in one line what this output does. - -### Output Example - -```go -package simpleoutput - -// simpleoutput.go - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/outputs" -) - -type Simple struct { - Ok bool -} - -func (s *Simple) Description() string { - return "a demo output" -} - -func (s *Simple) SampleConfig() string { - return ` - ok = true -` -} - -func (s *Simple) Connect() error { - // Make a connection to the URL here - return nil -} - -func (s *Simple) Close() error { - // Close connection to the URL here - return nil -} - -func (s *Simple) Write(metrics []telegraf.Metric) error { - for _, metric := range metrics { - // write `metric` to the output sink here - } - return nil -} - -func init() { - outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} }) -} - -``` - -## Output Plugins Writing Arbitrary Data Formats - -Some output plugins (such as -[file](https://github.com/influxdata/telegraf/tree/master/plugins/outputs/file)) -can write arbitrary output data formats. An overview of these data formats can -be found -[here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md). - -In order to enable this, you must specify a -`SetSerializer(serializer serializers.Serializer)` -function on the plugin object (see the file plugin for an example), as well as -defining `serializer` as a field of the object. - -You can then utilize the serializer internally in your plugin, serializing data -before it's written. Telegraf's configuration layer will take care of -instantiating and creating the `Serializer` object. - -You should also add the following to your SampleConfig() return: - -```toml - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" -``` - -## Service Output Plugins - -This section is for developers who want to create new "service" output. A -service output differs from a regular output in that it operates a background service -while Telegraf is running. One example would be the `prometheus_client` output, -which operates an HTTP server. - -Their interface is quite similar to a regular output, with the addition of `Start()` -and `Stop()` methods. - -### Service Output Guidelines - -* Same as the `Output` guidelines, except that they must conform to the -`output.ServiceOutput` interface. - -## Processor Plugins - -This section is for developers who want to create a new processor plugin. - -### Processor Plugin Guidelines - -* A processor must conform to the [`telegraf.Processor`](https://godoc.org/github.com/influxdata/telegraf#Processor) interface. -* Processors should call `processors.Add` in their `init` function to register themselves. -See below for a quick example. -* To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdata/telegraf/plugins/processors/all/all.go` file. -* The `SampleConfig` function should return valid toml that describes how the -processor can be configured. This is include in the output of `telegraf config`. -* The `Description` function should say in one line what this processor does. - -### Processor Example - -```go -package printer - -// printer.go - -import ( - "fmt" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/processors" -) - -type Printer struct { -} - -var sampleConfig = ` -` - -func (p *Printer) SampleConfig() string { - return sampleConfig -} - -func (p *Printer) Description() string { - return "Print all metrics that pass through this filter." -} - -func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { - for _, metric := range in { - fmt.Println(metric.String()) - } - return in -} - -func init() { - processors.Add("printer", func() telegraf.Processor { - return &Printer{} - }) -} -``` - -## Aggregator Plugins - -This section is for developers who want to create a new aggregator plugin. - -### Aggregator Plugin Guidelines - -* A aggregator must conform to the [`telegraf.Aggregator`](https://godoc.org/github.com/influxdata/telegraf#Aggregator) interface. -* Aggregators should call `aggregators.Add` in their `init` function to register themselves. -See below for a quick example. -* To be available within Telegraf itself, plugins must add themselves to the -`github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file. -* The `SampleConfig` function should return valid toml that describes how the -aggregator can be configured. This is include in `telegraf config`. -* The `Description` function should say in one line what this aggregator does. -* The Aggregator plugin will need to keep caches of metrics that have passed -through it. This should be done using the builtin `HashID()` function of each -metric. -* When the `Reset()` function is called, all caches should be cleared. - -### Aggregator Example - -```go -package min - -// min.go - -import ( - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/aggregators" -) - -type Min struct { - // caches for metric fields, names, and tags - fieldCache map[uint64]map[string]float64 - nameCache map[uint64]string - tagCache map[uint64]map[string]string -} - -func NewMin() telegraf.Aggregator { - m := &Min{} - m.Reset() - return m -} - -var sampleConfig = ` - ## period is the flush & clear interval of the aggregator. - period = "30s" - ## If true drop_original will drop the original metrics and - ## only send aggregates. - drop_original = false -` - -func (m *Min) SampleConfig() string { - return sampleConfig -} - -func (m *Min) Description() string { - return "Keep the aggregate min of each metric passing through." -} - -func (m *Min) Add(in telegraf.Metric) { - id := in.HashID() - if _, ok := m.nameCache[id]; !ok { - // hit an uncached metric, create caches for first time: - m.nameCache[id] = in.Name() - m.tagCache[id] = in.Tags() - m.fieldCache[id] = make(map[string]float64) - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - m.fieldCache[id][k] = fv - } - } - } else { - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - if _, ok := m.fieldCache[id][k]; !ok { - // hit an uncached field of a cached metric - m.fieldCache[id][k] = fv - continue - } - if fv < m.fieldCache[id][k] { - // set new minimum - m.fieldCache[id][k] = fv - } - } - } - } -} - -func (m *Min) Push(acc telegraf.Accumulator) { - for id, _ := range m.nameCache { - fields := map[string]interface{}{} - for k, v := range m.fieldCache[id] { - fields[k+"_min"] = v - } - acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) - } -} - -func (m *Min) Reset() { - m.fieldCache = make(map[uint64]map[string]float64) - m.nameCache = make(map[uint64]string) - m.tagCache = make(map[uint64]map[string]string) -} - -func convert(in interface{}) (float64, bool) { - switch v := in.(type) { - case float64: - return v, true - case int64: - return float64(v), true - default: - return 0, false - } -} - -func init() { - aggregators.Add("min", func() telegraf.Aggregator { - return NewMin() - }) -} -``` - -## Unit Tests +**Unit Tests:** Before opening a pull request you should run the linter checks and the short tests. -### Execute linter +``` +make check +make test +``` -execute `make lint` +**Execute integration tests:** -### Execute short tests - -execute `make test` - -### Execute integration tests +(Optional) Running the integration tests requires several docker containers to be running. You can start the containers with: ``` -make docker-run +docker-compose up ``` And run the full test suite with: @@ -494,3 +65,12 @@ make test-all ``` Use `make docker-kill` to stop the containers. + + +[cla]: https://www.influxdata.com/legal/cla/ +[new issue]: https://github.com/influxdata/telegraf/issues/new/choose +[pull request]: https://github.com/influxdata/telegraf/compare +[inputs]: /docs/INPUTS.md +[processors]: /docs/PROCESSORS.md +[aggregators]: /docs/AGGREGATORS.md +[outputs]: /docs/OUTPUTS.md diff --git a/EXTERNAL_PLUGINS.md b/EXTERNAL_PLUGINS.md new file mode 100644 index 000000000..cba70f9c2 --- /dev/null +++ b/EXTERNAL_PLUGINS.md @@ -0,0 +1,9 @@ +# External Plugins + +This is a list of plugins that can be compiled outside of Telegraf and used via the execd input. + +Pull requests welcome. + +## Inputs +- [rand](https://github.com/ssoroka/rand) - Generate random numbers +- [twitter](https://github.com/inabagumi/twitter-telegraf-plugin) - Gather account information from Twitter accounts diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 194bb61e6..000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,973 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "code.cloudfoundry.org/clock" - packages = ["."] - revision = "02e53af36e6c978af692887ed449b74026d76fec" - -[[projects]] - name = "collectd.org" - packages = [ - "api", - "cdtime", - "network" - ] - revision = "2ce144541b8903101fb8f1483cc0497a68798122" - version = "v0.3.0" - -[[projects]] - name = "github.com/Microsoft/ApplicationInsights-Go" - packages = [ - "appinsights", - "appinsights/contracts" - ] - revision = "d2df5d440eda5372f24fcac03839a64d6cb5f7e5" - version = "v0.4.2" - -[[projects]] - name = "github.com/Microsoft/go-winio" - packages = ["."] - revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f" - version = "v0.4.7" - -[[projects]] - name = "github.com/Shopify/sarama" - packages = ["."] - revision = "35324cf48e33d8260e1c7c18854465a904ade249" - version = "v1.17.0" - -[[projects]] - name = "github.com/StackExchange/wmi" - packages = ["."] - revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338" - version = "1.0.0" - -[[projects]] - name = "github.com/aerospike/aerospike-client-go" - packages = [ - ".", - "internal/lua", - "internal/lua/resources", - "logger", - "pkg/bcrypt", - "pkg/ripemd160", - "types", - "types/atomic", - "types/particle_type", - "types/rand", - "utils/buffer" - ] - revision = "c10b5393e43bd60125aca6289c7b24879edb1787" - version = "v1.33.0" - -[[projects]] - branch = "master" - name = "github.com/alecthomas/template" - packages = [ - ".", - "parse" - ] - revision = "a0175ee3bccc567396460bf5acd36800cb10c49c" - -[[projects]] - branch = "master" - name = "github.com/alecthomas/units" - packages = ["."] - revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a" - -[[projects]] - branch = "master" - name = "github.com/amir/raidman" - packages = [ - ".", - "proto" - ] - revision = "1ccc43bfb9c93cb401a4025e49c64ba71e5e668b" - -[[projects]] - branch = "master" - name = "github.com/apache/thrift" - packages = ["lib/go/thrift"] - revision = "f5f430df56871bc937950274b2c86681d3db6e59" - -[[projects]] - name = "github.com/aws/aws-sdk-go" - packages = [ - "aws", - "aws/awserr", - "aws/awsutil", - "aws/client", - "aws/client/metadata", - "aws/corehandlers", - "aws/credentials", - "aws/credentials/ec2rolecreds", - "aws/credentials/endpointcreds", - "aws/credentials/stscreds", - "aws/csm", - "aws/defaults", - "aws/ec2metadata", - "aws/endpoints", - "aws/request", - "aws/session", - "aws/signer/v4", - "internal/sdkio", - "internal/sdkrand", - "internal/shareddefaults", - "private/protocol", - "private/protocol/json/jsonutil", - "private/protocol/jsonrpc", - "private/protocol/query", - "private/protocol/query/queryutil", - "private/protocol/rest", - "private/protocol/xml/xmlutil", - "service/cloudwatch", - "service/kinesis", - "service/sts" - ] - revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff" - version = "v1.14.8" - -[[projects]] - branch = "master" - name = "github.com/beorn7/perks" - packages = ["quantile"] - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - name = "github.com/bsm/sarama-cluster" - packages = ["."] - revision = "cf455bc755fe41ac9bb2861e7a961833d9c2ecc3" - version = "v2.1.13" - -[[projects]] - name = "github.com/cenkalti/backoff" - packages = ["."] - revision = "2ea60e5f094469f9e65adb9cd103795b73ae743e" - version = "v2.0.0" - -[[projects]] - branch = "master" - name = "github.com/couchbase/go-couchbase" - packages = ["."] - revision = "16db1f1fe037412f12738fa4d8448c549c4edd77" - -[[projects]] - branch = "master" - name = "github.com/couchbase/gomemcached" - packages = [ - ".", - "client" - ] - revision = "0da75df145308b9a4e6704d762ca9d9b77752efc" - -[[projects]] - branch = "master" - name = "github.com/couchbase/goutils" - packages = [ - "logging", - "scramsha" - ] - revision = "e865a1461c8ac0032bd37e2d4dab3289faea3873" - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" - version = "v3.2.0" - -[[projects]] - name = "github.com/docker/distribution" - packages = [ - "digest", - "reference" - ] - revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" - version = "v2.6.2" - -[[projects]] - name = "github.com/docker/docker" - packages = [ - "api/types", - "api/types/blkiodev", - "api/types/container", - "api/types/events", - "api/types/filters", - "api/types/mount", - "api/types/network", - "api/types/reference", - "api/types/registry", - "api/types/strslice", - "api/types/swarm", - "api/types/time", - "api/types/versions", - "api/types/volume", - "client", - "pkg/tlsconfig" - ] - revision = "eef6495eddab52828327aade186443681ed71a4e" - version = "v17.03.2-ce-rc1" - -[[projects]] - name = "github.com/docker/go-connections" - packages = [ - "nat", - "sockets", - "tlsconfig" - ] - revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" - version = "v0.3.0" - -[[projects]] - name = "github.com/docker/go-units" - packages = ["."] - revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" - version = "v0.3.3" - -[[projects]] - name = "github.com/eapache/go-resiliency" - packages = ["breaker"] - revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/eapache/go-xerial-snappy" - packages = ["."] - revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c" - -[[projects]] - name = "github.com/eapache/queue" - packages = ["."] - revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" - version = "v1.1.0" - -[[projects]] - name = "github.com/eclipse/paho.mqtt.golang" - packages = [ - ".", - "packets" - ] - revision = "36d01c2b4cbeb3d2a12063e4880ce30800af9560" - version = "v1.1.1" - -[[projects]] - name = "github.com/go-ini/ini" - packages = ["."] - revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5" - version = "v1.37.0" - -[[projects]] - name = "github.com/go-logfmt/logfmt" - packages = ["."] - revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" - version = "v0.3.0" - -[[projects]] - name = "github.com/go-ole/go-ole" - packages = [ - ".", - "oleutil" - ] - revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506" - version = "v1.2.1" - -[[projects]] - name = "github.com/go-redis/redis" - packages = [ - ".", - "internal", - "internal/consistenthash", - "internal/hashtag", - "internal/pool", - "internal/proto", - "internal/singleflight", - "internal/util" - ] - revision = "83fb42932f6145ce52df09860384a4653d2d332a" - version = "v6.12.0" - -[[projects]] - name = "github.com/go-sql-driver/mysql" - packages = ["."] - revision = "d523deb1b23d913de5bdada721a6071e71283618" - version = "v1.4.0" - -[[projects]] - name = "github.com/gobwas/glob" - packages = [ - ".", - "compiler", - "match", - "syntax", - "syntax/ast", - "syntax/lexer", - "util/runes", - "util/strings" - ] - revision = "5ccd90ef52e1e632236f7326478d4faa74f99438" - version = "v0.2.3" - -[[projects]] - name = "github.com/gogo/protobuf" - packages = ["proto"] - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" - -[[projects]] - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp" - ] - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/golang/snappy" - packages = ["."] - revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" - -[[projects]] - name = "github.com/google/go-cmp" - packages = [ - "cmp", - "cmp/internal/diff", - "cmp/internal/function", - "cmp/internal/value" - ] - revision = "3af367b6b30c263d47e8895973edcca9a49cf029" - version = "v0.2.0" - -[[projects]] - name = "github.com/gorilla/context" - packages = ["."] - revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42" - version = "v1.1.1" - -[[projects]] - name = "github.com/gorilla/mux" - packages = ["."] - revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" - version = "v1.6.2" - -[[projects]] - branch = "master" - name = "github.com/hailocab/go-hostpool" - packages = ["."] - revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" - -[[projects]] - name = "github.com/hashicorp/consul" - packages = ["api"] - revision = "5174058f0d2bda63fa5198ab96c33d9a909c58ed" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-cleanhttp" - packages = ["."] - revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/go-rootcerts" - packages = ["."] - revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" - -[[projects]] - name = "github.com/hashicorp/serf" - packages = ["coordinate"] - revision = "d6574a5bb1226678d7010325fb6c985db20ee458" - version = "v0.8.1" - -[[projects]] - name = "github.com/influxdata/go-syslog" - packages = [ - "rfc5424", - "rfc5425" - ] - revision = "eecd51df3ad85464a2bab9b7d3a45bc1e299059e" - version = "v1.0.1" - -[[projects]] - branch = "master" - name = "github.com/influxdata/tail" - packages = [ - ".", - "ratelimiter", - "util", - "watch", - "winfile" - ] - revision = "c43482518d410361b6c383d7aebce33d0471d7bc" - -[[projects]] - branch = "master" - name = "github.com/influxdata/toml" - packages = [ - ".", - "ast" - ] - revision = "2a2e3012f7cfbef64091cc79776311e65dfa211b" - -[[projects]] - branch = "master" - name = "github.com/influxdata/wlog" - packages = ["."] - revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" - -[[projects]] - name = "github.com/jackc/pgx" - packages = [ - ".", - "chunkreader", - "internal/sanitize", - "pgio", - "pgproto3", - "pgtype", - "stdlib" - ] - revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27" - version = "v3.1.0" - -[[projects]] - name = "github.com/jmespath/go-jmespath" - packages = ["."] - revision = "0b12d6b5" - -[[projects]] - branch = "master" - name = "github.com/kardianos/osext" - packages = ["."] - revision = "ae77be60afb1dcacde03767a8c37337fad28ac14" - -[[projects]] - branch = "master" - name = "github.com/kardianos/service" - packages = ["."] - revision = "615a14ed75099c9eaac6949e22ac2341bf9d3197" - -[[projects]] - branch = "master" - name = "github.com/kballard/go-shellquote" - packages = ["."] - revision = "95032a82bc518f77982ea72343cc1ade730072f0" - -[[projects]] - branch = "master" - name = "github.com/kr/logfmt" - packages = ["."] - revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" - -[[projects]] - branch = "master" - name = "github.com/mailru/easyjson" - packages = [ - ".", - "buffer", - "jlexer", - "jwriter" - ] - revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485" - -[[projects]] - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - name = "github.com/miekg/dns" - packages = ["."] - revision = "5a2b9fab83ff0f8bfc99684bd5f43a37abe560f1" - version = "v1.0.8" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - revision = "3864e76763d94a6df2f9960b16a20a33da9f9a66" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" - -[[projects]] - name = "github.com/multiplay/go-ts3" - packages = ["."] - revision = "d0d44555495c8776880a17e439399e715a4ef319" - version = "v1.0.0" - -[[projects]] - name = "github.com/naoina/go-stringutil" - packages = ["."] - revision = "6b638e95a32d0c1131db0e7fe83775cbea4a0d0b" - version = "v0.1.0" - -[[projects]] - name = "github.com/nats-io/gnatsd" - packages = [ - "conf", - "logger", - "server", - "server/pse", - "util" - ] - revision = "add6d7930ae6d4bff8823b28999ea87bf1bfd23d" - version = "v1.1.0" - -[[projects]] - name = "github.com/nats-io/go-nats" - packages = [ - ".", - "encoders/builtin", - "util" - ] - revision = "062418ea1c2181f52dc0f954f6204370519a868b" - version = "v1.5.0" - -[[projects]] - name = "github.com/nats-io/nuid" - packages = ["."] - revision = "289cccf02c178dc782430d534e3c1f5b72af807f" - version = "v1.0.0" - -[[projects]] - name = "github.com/nsqio/go-nsq" - packages = ["."] - revision = "eee57a3ac4174c55924125bb15eeeda8cffb6e6f" - version = "v1.0.7" - -[[projects]] - branch = "master" - name = "github.com/opentracing-contrib/go-observer" - packages = ["."] - revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c" - -[[projects]] - name = "github.com/opentracing/opentracing-go" - packages = [ - ".", - "ext", - "log" - ] - revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" - version = "v1.0.2" - -[[projects]] - name = "github.com/openzipkin/zipkin-go-opentracing" - packages = [ - ".", - "flag", - "thrift/gen-go/scribe", - "thrift/gen-go/zipkincore", - "types", - "wire" - ] - revision = "26cf9707480e6b90e5eff22cf0bbf05319154232" - version = "v0.3.4" - -[[projects]] - name = "github.com/pierrec/lz4" - packages = [ - ".", - "internal/xxh32" - ] - revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d" - version = "v2.0.2" - -[[projects]] - name = "github.com/pkg/errors" - packages = ["."] - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp" - ] - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - name = "github.com/prometheus/client_model" - packages = ["go"] - revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" - -[[projects]] - branch = "master" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "log", - "model" - ] - revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" - -[[projects]] - branch = "master" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs" - ] - revision = "7d6f385de8bea29190f15ba9931442a0eaef9af7" - -[[projects]] - branch = "master" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - revision = "e2704e165165ec55d062f5919b4b29494e9fa790" - -[[projects]] - branch = "master" - name = "github.com/samuel/go-zookeeper" - packages = ["zk"] - revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47" - -[[projects]] - name = "github.com/satori/go.uuid" - packages = ["."] - revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" - version = "v1.2.0" - -[[projects]] - name = "github.com/shirou/gopsutil" - packages = [ - "cpu", - "disk", - "host", - "internal/common", - "load", - "mem", - "net", - "process" - ] - revision = "eeb1d38d69593f121e060d24d17f7b1f0936b203" - version = "v2.18.05" - -[[projects]] - branch = "master" - name = "github.com/shirou/w32" - packages = ["."] - revision = "bb4de0191aa41b5507caa14b0650cdbddcd9280b" - -[[projects]] - name = "github.com/sirupsen/logrus" - packages = ["."] - revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" - version = "v1.0.5" - -[[projects]] - branch = "master" - name = "github.com/soniah/gosnmp" - packages = ["."] - revision = "bcf840db66be7d64bf96c3c0e075c92e3d98f793" - -[[projects]] - branch = "master" - name = "github.com/streadway/amqp" - packages = ["."] - revision = "e5adc2ada8b8efff032bf61173a233d143e9318e" - -[[projects]] - name = "github.com/stretchr/objx" - packages = ["."] - revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c" - version = "v0.1.1" - -[[projects]] - name = "github.com/stretchr/testify" - packages = [ - "assert", - "mock", - "require" - ] - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - -[[projects]] - name = "github.com/tidwall/gjson" - packages = ["."] - revision = "afaeb9562041a8018c74e006551143666aed08bf" - version = "v1.1.1" - -[[projects]] - branch = "master" - name = "github.com/tidwall/match" - packages = ["."] - revision = "1731857f09b1f38450e2c12409748407822dc6be" - -[[projects]] - name = "github.com/vjeantet/grok" - packages = ["."] - revision = "ce01e59abcf6fbc9833b7deb5e4b8ee1769bcc53" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/wvanbergen/kafka" - packages = ["consumergroup"] - revision = "e2edea948ddfee841ea9a263b32ccca15f7d6c2f" - -[[projects]] - branch = "master" - name = "github.com/wvanbergen/kazoo-go" - packages = ["."] - revision = "f72d8611297a7cf105da904c04198ad701a60101" - -[[projects]] - branch = "master" - name = "github.com/yuin/gopher-lua" - packages = [ - ".", - "ast", - "parse", - "pm" - ] - revision = "ca850f594eaafa5468da2bd53b865e4ee55be18b" - -[[projects]] - branch = "master" - name = "github.com/zensqlmonitor/go-mssqldb" - packages = ["."] - revision = "e8fbf836e44e86764eba398361d1825651709547" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = [ - "bcrypt", - "blowfish", - "ed25519", - "ed25519/internal/edwards25519", - "md4", - "pbkdf2", - "ssh/terminal" - ] - revision = "027cca12c2d63e3d62b670d901e8a2c95854feec" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = [ - "bpf", - "context", - "context/ctxhttp", - "html", - "html/atom", - "html/charset", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/iana", - "internal/socket", - "internal/socks", - "internal/timeseries", - "ipv4", - "ipv6", - "proxy", - "trace", - "websocket" - ] - revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - "windows/registry", - "windows/svc", - "windows/svc/debug", - "windows/svc/eventlog", - "windows/svc/mgr" - ] - revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7" - -[[projects]] - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "encoding", - "encoding/charmap", - "encoding/htmlindex", - "encoding/internal", - "encoding/internal/identifier", - "encoding/japanese", - "encoding/korean", - "encoding/simplifiedchinese", - "encoding/traditionalchinese", - "encoding/unicode", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "internal/utf8internal", - "language", - "runes", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable" - ] - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - name = "google.golang.org/appengine" - packages = ["cloudsql"] - revision = "b1f26356af11148e710935ed1ac8a7f5702c7612" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - revision = "32ee49c4dd805befd833990acba36cb75042378c" - -[[projects]] - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "channelz", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclb/grpc_lb_v1/messages", - "grpclog", - "internal", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - "transport" - ] - revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b" - version = "v1.12.2" - -[[projects]] - name = "gopkg.in/alecthomas/kingpin.v2" - packages = ["."] - revision = "947dcec5ba9c011838740e680966fd7087a71d0d" - version = "v2.2.6" - -[[projects]] - name = "gopkg.in/asn1-ber.v1" - packages = ["."] - revision = "379148ca0225df7a432012b8df0355c2a2063ac0" - version = "v1.2" - -[[projects]] - name = "gopkg.in/fatih/pool.v2" - packages = ["."] - revision = "010e0b745d12eaf8426c95f9c3924d81dd0b668f" - version = "v2.0.0" - -[[projects]] - name = "gopkg.in/fsnotify.v1" - packages = ["."] - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" - version = "v1.4.7" - -[[projects]] - name = "gopkg.in/gorethink/gorethink.v3" - packages = [ - ".", - "encoding", - "ql2", - "types" - ] - revision = "7f5bdfd858bb064d80559b2a32b86669c5de5d3b" - version = "v3.0.5" - -[[projects]] - name = "gopkg.in/ldap.v2" - packages = ["."] - revision = "bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9" - version = "v2.5.1" - -[[projects]] - branch = "v2" - name = "gopkg.in/mgo.v2" - packages = [ - ".", - "bson", - "internal/json", - "internal/sasl", - "internal/scram" - ] - revision = "3f83fa5005286a7fe593b055f0d7771a7dce4655" - -[[projects]] - name = "gopkg.in/olivere/elastic.v5" - packages = [ - ".", - "config", - "uritemplates" - ] - revision = "b708306d715bea9b983685e94ab4602cdc9f988b" - version = "v5.0.69" - -[[projects]] - branch = "v1" - name = "gopkg.in/tomb.v1" - packages = ["."] - revision = "dd632973f1e7218eb1089048e0798ec9ae7dceb8" - -[[projects]] - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "024194b983d91b9500fe97e0aa0ddb5fe725030cb51ddfb034e386cae1098370" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 78d3749a9..000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,243 +0,0 @@ -[[constraint]] - name = "collectd.org" - version = "0.3.0" - -[[constraint]] - name = "github.com/aerospike/aerospike-client-go" - version = "^1.33.0" - -[[constraint]] - name = "github.com/amir/raidman" - branch = "master" - -[[constraint]] - name = "github.com/apache/thrift" - branch = "master" - -[[constraint]] - name = "github.com/aws/aws-sdk-go" - version = "1.14.8" -# version = "1.8.39" - -[[constraint]] - name = "github.com/bsm/sarama-cluster" - version = "2.1.13" -# version = "2.1.10" - -[[constraint]] - name = "github.com/couchbase/go-couchbase" - branch = "master" - -[[constraint]] - name = "github.com/dgrijalva/jwt-go" - version = "3.2.0" -# version = "3.1.0" - -[[constraint]] - name = "github.com/docker/docker" - version = "~17.03.2-ce" - -[[constraint]] - name = "github.com/docker/go-connections" - version = "0.3.0" -# version = "0.2.1" - -[[constraint]] - name = "github.com/eclipse/paho.mqtt.golang" - version = "~1.1.1" -# version = "1.1.0" - -[[constraint]] - name = "github.com/go-sql-driver/mysql" - version = "1.4.0" -# version = "1.3.0" - -[[constraint]] - name = "github.com/gobwas/glob" - version = "0.2.3" -# version = "0.2.2" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "1.1.0" -# version = "1.0.0" - -[[constraint]] - name = "github.com/google/go-cmp" - version = "0.2.0" -# version = "0.1.0" - -[[constraint]] - name = "github.com/gorilla/mux" - version = "1.6.2" -# version = "1.6.1" - -[[constraint]] - name = "github.com/go-redis/redis" - version = "6.12.0" - -[[constraint]] - name = "github.com/hashicorp/consul" - version = "1.1.0" - -[[constraint]] - name = "github.com/influxdata/go-syslog" - version = "1.0.1" - -[[constraint]] - name = "github.com/influxdata/tail" - branch = "master" - -[[constraint]] - name = "github.com/influxdata/toml" - branch = "master" - -[[constraint]] - name = "github.com/influxdata/wlog" - branch = "master" - -[[constraint]] - name = "github.com/jackc/pgx" - version = "3.1.0" - -[[constraint]] - name = "github.com/kardianos/service" - branch = "master" - -[[constraint]] - name = "github.com/kballard/go-shellquote" - branch = "master" - -[[constraint]] - name = "github.com/matttproud/golang_protobuf_extensions" - version = "1.0.1" - -[[constraint]] - name = "github.com/Microsoft/ApplicationInsights-Go" - branch = "master" - -[[constraint]] - name = "github.com/miekg/dns" - version = "1.0.8" -# version = "1.0.0" - -[[constraint]] - name = "github.com/multiplay/go-ts3" - version = "1.0.0" - -[[constraint]] - name = "github.com/nats-io/gnatsd" - version = "1.1.0" -# version = "1.0.4" - -[[constraint]] - name = "github.com/nats-io/go-nats" - version = "1.5.0" -# version = "1.3.0" - -[[constraint]] - name = "github.com/nsqio/go-nsq" - version = "1.0.7" - -[[constraint]] - name = "github.com/openzipkin/zipkin-go-opentracing" - version = "0.3.4" -# version = "0.3.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.8.0" - -[[constraint]] - name = "github.com/prometheus/client_model" - branch = "master" - -[[constraint]] - name = "github.com/prometheus/common" - branch = "master" - -[[constraint]] - name = "github.com/satori/go.uuid" - version = "1.2.0" - -[[constraint]] - name = "github.com/shirou/gopsutil" - version = "2.18.05" -# version = "2.18.04" - -[[constraint]] - name = "github.com/Shopify/sarama" - version = "1.17.0" -# version = "1.15.0" - -[[constraint]] - name = "github.com/soniah/gosnmp" - branch = "master" - -[[constraint]] - name = "github.com/StackExchange/wmi" - version = "1.0.0" - -[[constraint]] - name = "github.com/streadway/amqp" - branch = "master" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "1.2.2" -# version = "1.2.1" - -[[constraint]] - name = "github.com/tidwall/gjson" - version = "1.1.1" -# version = "1.0.0" - -[[constraint]] - name = "github.com/vjeantet/grok" - version = "1.0.0" - -[[constraint]] - name = "github.com/wvanbergen/kafka" - branch = "master" - -[[constraint]] - name = "github.com/zensqlmonitor/go-mssqldb" - branch = "master" - -[[constraint]] - name = "golang.org/x/net" - branch = "master" - -[[constraint]] - name = "golang.org/x/sys" - branch = "master" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.12.2" -# version = "1.8.0" - -[[constraint]] - name = "gopkg.in/gorethink/gorethink.v3" - version = "3.0.5" - -[[constraint]] - name = "gopkg.in/ldap.v2" - version = "2.5.1" - -[[constraint]] - name = "gopkg.in/mgo.v2" - branch = "v2" - -[[constraint]] - name = "gopkg.in/olivere/elastic.v5" - version = "^5.0.69" -# version = "^6.1.23" - -[[constraint]] - name = "gopkg.in/yaml.v2" - version = "^2.2.1" - -[[override]] - source = "https://github.com/fsnotify/fsnotify/archive/v1.4.7.tar.gz" - name = "gopkg.in/fsnotify.v1" diff --git a/LICENSE b/LICENSE index 1393544bb..886dcef0b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2015 InfluxDB +Copyright (c) 2015-2019 InfluxData Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Makefile b/Makefile index 2f3fcecea..2b2e9668e 100644 --- a/Makefile +++ b/Makefile @@ -1,15 +1,23 @@ +ifeq ($(OS), Windows_NT) + VERSION := $(shell git describe --exact-match --tags 2>nul) + HOME := $(HOMEPATH) + CGO_ENABLED ?= 0 + export CGO_ENABLED +else + VERSION := $(shell git describe --exact-match --tags 2>/dev/null) +endif + PREFIX := /usr/local -VERSION := $(shell git describe --exact-match --tags 2>/dev/null) BRANCH := $(shell git rev-parse --abbrev-ref HEAD) COMMIT := $(shell git rev-parse --short HEAD) GOFILES ?= $(shell git ls-files '*.go') -GOFMT ?= $(shell gofmt -l $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))) +GOFMT ?= $(shell gofmt -l -s $(filter-out plugins/parsers/influx/machine.go, $(GOFILES))) BUILDFLAGS ?= ifdef GOBIN PATH := $(GOBIN):$(PATH) else -PATH := $(subst :,/bin:,$(GOPATH))/bin:$(PATH) +PATH := $(subst :,/bin:,$(shell go env GOPATH))/bin:$(PATH) endif LDFLAGS := $(LDFLAGS) -X main.commit=$(COMMIT) -X main.branch=$(BRANCH) @@ -17,33 +25,38 @@ ifdef VERSION LDFLAGS += -X main.version=$(VERSION) endif +.PHONY: all all: - $(MAKE) deps - $(MAKE) telegraf + @$(MAKE) --no-print-directory deps + @$(MAKE) --no-print-directory telegraf +.PHONY: deps deps: - go get -u github.com/golang/lint/golint - go get -u github.com/golang/dep/cmd/dep - dep ensure + go mod download +.PHONY: telegraf telegraf: go build -ldflags "$(LDFLAGS)" ./cmd/telegraf +.PHONY: go-install go-install: go install -ldflags "-w -s $(LDFLAGS)" ./cmd/telegraf +.PHONY: install install: telegraf mkdir -p $(DESTDIR)$(PREFIX)/bin/ cp telegraf $(DESTDIR)$(PREFIX)/bin/ +.PHONY: test test: go test -short ./... +.PHONY: fmt fmt: - @gofmt -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)) + @gofmt -s -w $(filter-out plugins/parsers/influx/machine.go, $(GOFILES)) +.PHONY: fmtcheck fmtcheck: - @echo '[INFO] running gofmt to identify incorrectly formatted code...' @if [ ! -z "$(GOFMT)" ]; then \ echo "[ERROR] gofmt has found errors in the following files:" ; \ echo "$(GOFMT)" ; \ @@ -51,17 +64,17 @@ fmtcheck: echo "Run make fmt to fix them." ; \ exit 1 ;\ fi - @echo '[INFO] done.' +.PHONY: test-windows test-windows: go test -short ./plugins/inputs/ping/... go test -short ./plugins/inputs/win_perf_counters/... go test -short ./plugins/inputs/win_services/... go test -short ./plugins/inputs/procstat/... go test -short ./plugins/inputs/ntpq/... + go test -short ./plugins/processors/port_name/... -# vet runs the Go source code static analysis tool `vet` to find -# any common errors. +.PHONY: vet vet: @echo 'go vet $$(go list ./... | grep -v ./plugins/parsers/influx)' @go vet $$(go list ./... | grep -v ./plugins/parsers/influx) ; if [ $$? -ne 0 ]; then \ @@ -71,27 +84,54 @@ vet: exit 1; \ fi -test-ci: fmtcheck vet - go test -short ./... +.PHONY: tidy +tidy: + go mod verify + go mod tidy + @if ! git diff --quiet go.mod go.sum; then \ + echo "please run go mod tidy and check in changes"; \ + exit 1; \ + fi +.PHONY: check +check: fmtcheck vet + @$(MAKE) --no-print-directory tidy + +.PHONY: test-all test-all: fmtcheck vet go test ./... +.PHONY: check-deps +check-deps: + ./scripts/check-deps.sh + +.PHONY: package package: ./scripts/build.py --package --platform=all --arch=all +.PHONY: package-release +package-release: + ./scripts/build.py --release --package --platform=all --arch=all \ + --upload --bucket=dl.influxdata.com/telegraf/releases + +.PHONY: package-nightly +package-nightly: + ./scripts/build.py --nightly --package --platform=all --arch=all \ + --upload --bucket=dl.influxdata.com/telegraf/nightlies + +.PHONY: clean clean: rm -f telegraf rm -f telegraf.exe +.PHONY: docker-image docker-image: - ./scripts/build.py --package --platform=linux --arch=amd64 - cp build/telegraf*$(COMMIT)*.deb . - docker build -f scripts/dev.docker --build-arg "package=telegraf*$(COMMIT)*.deb" -t "telegraf-dev:$(COMMIT)" . + docker build -f scripts/stretch.docker -t "telegraf:$(COMMIT)" . plugins/parsers/influx/machine.go: plugins/parsers/influx/machine.go.rl ragel -Z -G2 $^ -o $@ +.PHONY: static static: @echo "Building static linux binary..." @CGO_ENABLED=0 \ @@ -99,8 +139,17 @@ static: GOARCH=amd64 \ go build -ldflags "$(LDFLAGS)" ./cmd/telegraf +.PHONY: plugin-% plugin-%: @echo "Starting dev environment for $${$(@)} input plugin..." @docker-compose -f plugins/inputs/$${$(@)}/dev/docker-compose.yml up -.PHONY: deps telegraf install test test-windows lint vet test-all package clean docker-image fmtcheck uint64 static +.PHONY: ci-1.13 +ci-1.13: + docker build -t quay.io/influxdb/telegraf-ci:1.13.8 - < scripts/ci-1.13.docker + docker push quay.io/influxdb/telegraf-ci:1.13.8 + +.PHONY: ci-1.12 +ci-1.12: + docker build -t quay.io/influxdb/telegraf-ci:1.12.17 - < scripts/ci-1.12.docker + docker push quay.io/influxdb/telegraf-ci:1.12.17 diff --git a/README.md b/README.md index 679e2847f..32ed21edb 100644 --- a/README.md +++ b/README.md @@ -1,24 +1,24 @@ # Telegraf [![Circle CI](https://circleci.com/gh/influxdata/telegraf.svg?style=svg)](https://circleci.com/gh/influxdata/telegraf) [![Docker pulls](https://img.shields.io/docker/pulls/library/telegraf.svg)](https://hub.docker.com/_/telegraf/) -Telegraf is an agent written in Go for collecting, processing, aggregating, -and writing metrics. +Telegraf is an agent for collecting, processing, aggregating, and writing metrics. Design goals are to have a minimal memory footprint with a plugin system so -that developers in the community can easily add support for collecting metrics -. For an example configuration referencet from local or remote services. +that developers in the community can easily add support for collecting +metrics. -Telegraf is plugin-driven and has the concept of 4 distinct plugins: +Telegraf is plugin-driven and has the concept of 4 distinct plugin types: 1. [Input Plugins](#input-plugins) collect metrics from the system, services, or 3rd party APIs 2. [Processor Plugins](#processor-plugins) transform, decorate, and/or filter metrics 3. [Aggregator Plugins](#aggregator-plugins) create aggregate metrics (e.g. mean, min, max, quantiles, etc.) 4. [Output Plugins](#output-plugins) write metrics to various destinations -For more information on Processor and Aggregator plugins please [read this](./docs/AGGREGATORS_AND_PROCESSORS.md). +New plugins are designed to be easy to contribute, pull requests are welcomed +and we work to incorporate as many pull requests as possible. -New plugins are designed to be easy to contribute, -we'll eagerly accept pull -requests and will manage the set of plugins that Telegraf supports. +## Try in Browser :rocket: + +You can try Telegraf right in your browser in the [Telegraf playground](https://rootnroll.com/d/telegraf/). ## Contributing @@ -26,8 +26,19 @@ There are many ways to contribute: - Fix and [report bugs](https://github.com/influxdata/telegraf/issues/new) - [Improve documentation](https://github.com/influxdata/telegraf/issues?q=is%3Aopen+label%3Adocumentation) - [Review code and feature proposals](https://github.com/influxdata/telegraf/pulls) -- Answer questions on github and on the [Community Site](https://community.influxdata.com/) +- Answer questions and discuss here on github and on the [Community Site](https://community.influxdata.com/) - [Contribute plugins](CONTRIBUTING.md) +- [Contribute external plugins](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/execd/shim) *(experimental)* + +## Minimum Requirements + +Telegraf shares the same [minimum requirements][] as Go: +- Linux kernel version 2.6.23 or later +- Windows 7 or later +- FreeBSD 11.2 or later +- MacOS 10.11 El Capitan or later + +[minimum requirements]: https://github.com/golang/go/wiki/MinimumRequirements#minimum-requirements ## Installation: @@ -40,20 +51,29 @@ Ansible role: https://github.com/rossmcdonald/telegraf ### From Source: -Telegraf requires golang version 1.9 or newer, the Makefile requires GNU make. +Telegraf requires Go version 1.13 or newer, the Makefile requires GNU make. -Dependencies are managed with [dep](https://github.com/golang/dep), -which is installed by the Makefile if you don't have it already. +1. [Install Go](https://golang.org/doc/install) >=1.13 (1.14 recommended) +2. Clone the Telegraf repository: + ``` + cd ~/src + git clone https://github.com/influxdata/telegraf.git + ``` +3. Run `make` from the source directory + ``` + cd ~/src/telegraf + make + ``` -1. [Install Go](https://golang.org/doc/install) -2. [Setup your GOPATH](https://golang.org/doc/code.html#GOPATH) -3. Run `go get -d github.com/influxdata/telegraf` -4. Run `cd $GOPATH/src/github.com/influxdata/telegraf` -5. Run `make` +### Changelog + +View the [changelog](/CHANGELOG.md) for the latest updates and changes by +version. ### Nightly Builds These builds are generated from the master branch: +- [telegraf-nightly_darwin_amd64.tar.gz](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly_darwin_amd64.tar.gz) - [telegraf_nightly_amd64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_amd64.deb) - [telegraf_nightly_arm64.deb](https://dl.influxdata.com/telegraf/nightlies/telegraf_nightly_arm64.deb) - [telegraf-nightly.arm64.rpm](https://dl.influxdata.com/telegraf/nightlies/telegraf-nightly.arm64.rpm) @@ -83,112 +103,171 @@ These builds are generated from the master branch: See usage with: ``` -./telegraf --help +telegraf --help ``` #### Generate a telegraf config file: ``` -./telegraf config > telegraf.conf +telegraf config > telegraf.conf ``` #### Generate config with only cpu input & influxdb output plugins defined: ``` -./telegraf --input-filter cpu --output-filter influxdb config +telegraf --section-filter agent:inputs:outputs --input-filter cpu --output-filter influxdb config ``` -#### Run a single telegraf collection, outputing metrics to stdout: +#### Run a single telegraf collection, outputting metrics to stdout: ``` -./telegraf --config telegraf.conf --test +telegraf --config telegraf.conf --test ``` #### Run telegraf with all plugins defined in config file: ``` -./telegraf --config telegraf.conf +telegraf --config telegraf.conf ``` #### Run telegraf, enabling the cpu & memory input, and influxdb output plugins: ``` -./telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb +telegraf --config telegraf.conf --input-filter cpu:mem --output-filter influxdb ``` +## Documentation -## Configuration +[Latest Release Documentation][release docs]. -See the [configuration guide](docs/CONFIGURATION.md) for a rundown of the more advanced -configuration options. +For documentation on the latest development code see the [documentation index][devel docs]. + +[release docs]: https://docs.influxdata.com/telegraf +[devel docs]: docs ## Input Plugins +* [activemq](./plugins/inputs/activemq) * [aerospike](./plugins/inputs/aerospike) * [amqp_consumer](./plugins/inputs/amqp_consumer) (rabbitmq) * [apache](./plugins/inputs/apache) +* [apcupsd](./plugins/inputs/apcupsd) * [aurora](./plugins/inputs/aurora) -* [aws cloudwatch](./plugins/inputs/cloudwatch) +* [aws cloudwatch](./plugins/inputs/cloudwatch) (Amazon Cloudwatch) +* [azure_storage_queue](./plugins/inputs/azure_storage_queue) * [bcache](./plugins/inputs/bcache) +* [beanstalkd](./plugins/inputs/beanstalkd) +* [bind](./plugins/inputs/bind) * [bond](./plugins/inputs/bond) -* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) * [burrow](./plugins/inputs/burrow) +* [cassandra](./plugins/inputs/cassandra) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) * [ceph](./plugins/inputs/ceph) * [cgroup](./plugins/inputs/cgroup) * [chrony](./plugins/inputs/chrony) -* [consul](./plugins/inputs/consul) +* [cisco_telemetry_gnmi](./plugins/inputs/cisco_telemetry_gnmi) +* [cisco_telemetry_mdt](./plugins/inputs/cisco_telemetry_mdt) +* [clickhouse](./plugins/inputs/clickhouse) +* [cloud_pubsub](./plugins/inputs/cloud_pubsub) Google Cloud Pub/Sub +* [cloud_pubsub_push](./plugins/inputs/cloud_pubsub_push) Google Cloud Pub/Sub push endpoint * [conntrack](./plugins/inputs/conntrack) +* [consul](./plugins/inputs/consul) * [couchbase](./plugins/inputs/couchbase) * [couchdb](./plugins/inputs/couchdb) +* [cpu](./plugins/inputs/cpu) * [DC/OS](./plugins/inputs/dcos) +* [diskio](./plugins/inputs/diskio) +* [disk](./plugins/inputs/disk) * [disque](./plugins/inputs/disque) * [dmcache](./plugins/inputs/dmcache) * [dns query time](./plugins/inputs/dns_query) * [docker](./plugins/inputs/docker) +* [docker_log](./plugins/inputs/docker_log) * [dovecot](./plugins/inputs/dovecot) +* [aws ecs](./plugins/inputs/ecs) (Amazon Elastic Container Service, Fargate) * [elasticsearch](./plugins/inputs/elasticsearch) +* [ethtool](./plugins/inputs/ethtool) +* [eventhub_consumer](./plugins/inputs/eventhub_consumer) (Azure Event Hubs \& Azure IoT Hub) * [exec](./plugins/inputs/exec) (generic executable plugin, support JSON, influx, graphite and nagios) +* [execd](./plugins/inputs/execd) * [fail2ban](./plugins/inputs/fail2ban) * [fibaro](./plugins/inputs/fibaro) +* [file](./plugins/inputs/file) * [filestat](./plugins/inputs/filestat) +* [filecount](./plugins/inputs/filecount) +* [fireboard](/plugins/inputs/fireboard) * [fluentd](./plugins/inputs/fluentd) +* [github](./plugins/inputs/github) * [graylog](./plugins/inputs/graylog) * [haproxy](./plugins/inputs/haproxy) * [hddtemp](./plugins/inputs/hddtemp) +* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) +* [http_listener](./plugins/inputs/influxdb_listener) (deprecated, renamed to [influxdb_listener](/plugins/inputs/influxdb_listener)) +* [http_listener_v2](./plugins/inputs/http_listener_v2) * [http](./plugins/inputs/http) (generic HTTP plugin, supports using input data formats) * [http_response](./plugins/inputs/http_response) -* [httpjson](./plugins/inputs/httpjson) (generic JSON-emitting http service plugin) -* [internal](./plugins/inputs/internal) +* [icinga2](./plugins/inputs/icinga2) +* [infiniband](./plugins/inputs/infiniband) * [influxdb](./plugins/inputs/influxdb) +* [influxdb_listener](./plugins/inputs/influxdb_listener) +* [internal](./plugins/inputs/internal) * [interrupts](./plugins/inputs/interrupts) * [ipmi_sensor](./plugins/inputs/ipmi_sensor) -* [iptables](./plugins/inputs/iptables) * [ipset](./plugins/inputs/ipset) -* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) +* [iptables](./plugins/inputs/iptables) +* [ipvs](./plugins/inputs/ipvs) +* [jenkins](./plugins/inputs/jenkins) * [jolokia2](./plugins/inputs/jolokia2) (java, cassandra, kafka) -- [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) +* [jolokia](./plugins/inputs/jolokia) (deprecated, use [jolokia2](./plugins/inputs/jolokia2)) +* [jti_openconfig_telemetry](./plugins/inputs/jti_openconfig_telemetry) +* [kafka_consumer](./plugins/inputs/kafka_consumer) * [kapacitor](./plugins/inputs/kapacitor) +* [aws kinesis](./plugins/inputs/kinesis_consumer) (Amazon Kinesis) +* [kernel](./plugins/inputs/kernel) +* [kernel_vmstat](./plugins/inputs/kernel_vmstat) +* [kibana](./plugins/inputs/kibana) * [kubernetes](./plugins/inputs/kubernetes) +* [kube_inventory](./plugins/inputs/kube_inventory) +* [lanz](./plugins/inputs/lanz) * [leofs](./plugins/inputs/leofs) +* [linux_sysctl_fs](./plugins/inputs/linux_sysctl_fs) +* [logparser](./plugins/inputs/logparser) (deprecated, use [tail](/plugins/inputs/tail)) +* [logstash](./plugins/inputs/logstash) * [lustre2](./plugins/inputs/lustre2) * [mailchimp](./plugins/inputs/mailchimp) +* [marklogic](./plugins/inputs/marklogic) * [mcrouter](./plugins/inputs/mcrouter) * [memcached](./plugins/inputs/memcached) +* [mem](./plugins/inputs/mem) * [mesos](./plugins/inputs/mesos) * [minecraft](./plugins/inputs/minecraft) +* [modbus](./plugins/inputs/modbus) * [mongodb](./plugins/inputs/mongodb) +* [monit](./plugins/inputs/monit) +* [mqtt_consumer](./plugins/inputs/mqtt_consumer) +* [multifile](./plugins/inputs/multifile) * [mysql](./plugins/inputs/mysql) +* [nats_consumer](./plugins/inputs/nats_consumer) * [nats](./plugins/inputs/nats) +* [neptune_apex](./plugins/inputs/neptune_apex) +* [net](./plugins/inputs/net) * [net_response](./plugins/inputs/net_response) +* [netstat](./plugins/inputs/net) * [nginx](./plugins/inputs/nginx) +* [nginx_plus_api](./plugins/inputs/nginx_plus_api) * [nginx_plus](./plugins/inputs/nginx_plus) +* [nginx_upstream_check](./plugins/inputs/nginx_upstream_check) +* [nginx_vts](./plugins/inputs/nginx_vts) +* [nsq_consumer](./plugins/inputs/nsq_consumer) * [nsq](./plugins/inputs/nsq) * [nstat](./plugins/inputs/nstat) * [ntpq](./plugins/inputs/ntpq) * [nvidia_smi](./plugins/inputs/nvidia_smi) * [openldap](./plugins/inputs/openldap) +* [openntpd](./plugins/inputs/openntpd) * [opensmtpd](./plugins/inputs/opensmtpd) +* [openweathermap](./plugins/inputs/openweathermap) * [pf](./plugins/inputs/pf) +* [pgbouncer](./plugins/inputs/pgbouncer) * [phpfpm](./plugins/inputs/phpfpm) * [phusion passenger](./plugins/inputs/passenger) * [ping](./plugins/inputs/ping) @@ -196,6 +275,8 @@ configuration options. * [postgresql_extensible](./plugins/inputs/postgresql_extensible) * [postgresql](./plugins/inputs/postgresql) * [powerdns](./plugins/inputs/powerdns) +* [powerdns_recursor](./plugins/inputs/powerdns_recursor) +* [processes](./plugins/inputs/processes) * [procstat](./plugins/inputs/procstat) * [prometheus](./plugins/inputs/prometheus) (can be used for [Caddy server](./plugins/inputs/prometheus/README.md#usage-for-caddy-http-server)) * [puppetagent](./plugins/inputs/puppetagent) @@ -206,49 +287,35 @@ configuration options. * [riak](./plugins/inputs/riak) * [salesforce](./plugins/inputs/salesforce) * [sensors](./plugins/inputs/sensors) +* [sflow](./plugins/inputs/sflow) * [smart](./plugins/inputs/smart) -* [snmp](./plugins/inputs/snmp) * [snmp_legacy](./plugins/inputs/snmp_legacy) +* [snmp](./plugins/inputs/snmp) +* [snmp_trap](./plugins/inputs/snmp_trap) +* [socket_listener](./plugins/inputs/socket_listener) * [solr](./plugins/inputs/solr) * [sql server](./plugins/inputs/sqlserver) (microsoft) +* [stackdriver](./plugins/inputs/stackdriver) (Google Cloud Monitoring) +* [statsd](./plugins/inputs/statsd) +* [suricata](./plugins/inputs/suricata) +* [swap](./plugins/inputs/swap) +* [synproxy](./plugins/inputs/synproxy) * [syslog](./plugins/inputs/syslog) +* [sysstat](./plugins/inputs/sysstat) +* [systemd_units](./plugins/inputs/systemd_units) +* [system](./plugins/inputs/system) +* [tail](./plugins/inputs/tail) +* [temp](./plugins/inputs/temp) +* [tcp_listener](./plugins/inputs/socket_listener) * [teamspeak](./plugins/inputs/teamspeak) * [tengine](./plugins/inputs/tengine) * [tomcat](./plugins/inputs/tomcat) * [twemproxy](./plugins/inputs/twemproxy) -* [unbound](./plugins/inputs/unbound) -* [varnish](./plugins/inputs/varnish) -* [zfs](./plugins/inputs/zfs) -* [zookeeper](./plugins/inputs/zookeeper) -* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) -* [win_services](./plugins/inputs/win_services) -* [sysstat](./plugins/inputs/sysstat) -* [system](./plugins/inputs/system) - * cpu - * mem - * net - * netstat - * disk - * diskio - * swap - * processes - * kernel (/proc/stat) - * kernel (/proc/vmstat) - * linux_sysctl_fs (/proc/sys/fs) - -Telegraf can also collect metrics via the following service plugins: - -* [http_listener](./plugins/inputs/http_listener) -* [kafka_consumer](./plugins/inputs/kafka_consumer) -* [mqtt_consumer](./plugins/inputs/mqtt_consumer) -* [nats_consumer](./plugins/inputs/nats_consumer) -* [nsq_consumer](./plugins/inputs/nsq_consumer) -* [logparser](./plugins/inputs/logparser) -* [statsd](./plugins/inputs/statsd) -* [socket_listener](./plugins/inputs/socket_listener) -* [tail](./plugins/inputs/tail) -* [tcp_listener](./plugins/inputs/socket_listener) * [udp_listener](./plugins/inputs/socket_listener) +* [unbound](./plugins/inputs/unbound) +* [uwsgi](./plugins/inputs/uwsgi) +* [varnish](./plugins/inputs/varnish) +* [vsphere](./plugins/inputs/vsphere) VMware vSphere * [webhooks](./plugins/inputs/webhooks) * [filestack](./plugins/inputs/webhooks/filestack) * [github](./plugins/inputs/webhooks/github) @@ -256,61 +323,107 @@ Telegraf can also collect metrics via the following service plugins: * [papertrail](./plugins/inputs/webhooks/papertrail) * [particle](./plugins/inputs/webhooks/particle) * [rollbar](./plugins/inputs/webhooks/rollbar) +* [win_perf_counters](./plugins/inputs/win_perf_counters) (windows performance counters) +* [win_services](./plugins/inputs/win_services) +* [wireguard](./plugins/inputs/wireguard) +* [wireless](./plugins/inputs/wireless) +* [x509_cert](./plugins/inputs/x509_cert) +* [zfs](./plugins/inputs/zfs) * [zipkin](./plugins/inputs/zipkin) +* [zookeeper](./plugins/inputs/zookeeper) -Telegraf is able to parse the following input data formats into metrics, these -formats may be used with input plugins supporting the `data_format` option: +## Parsers -* [InfluxDB Line Protocol](./docs/DATA_FORMATS_INPUT.md#influx) -* [JSON](./docs/DATA_FORMATS_INPUT.md#json) -* [Graphite](./docs/DATA_FORMATS_INPUT.md#graphite) -* [Value](./docs/DATA_FORMATS_INPUT.md#value) -* [Nagios](./docs/DATA_FORMATS_INPUT.md#nagios) -* [Collectd](./docs/DATA_FORMATS_INPUT.md#collectd) -* [Dropwizard](./docs/DATA_FORMATS_INPUT.md#dropwizard) +- [InfluxDB Line Protocol](/plugins/parsers/influx) +- [Collectd](/plugins/parsers/collectd) +- [CSV](/plugins/parsers/csv) +- [Dropwizard](/plugins/parsers/dropwizard) +- [FormUrlencoded](/plugins/parser/form_urlencoded) +- [Graphite](/plugins/parsers/graphite) +- [Grok](/plugins/parsers/grok) +- [JSON](/plugins/parsers/json) +- [Logfmt](/plugins/parsers/logfmt) +- [Nagios](/plugins/parsers/nagios) +- [Value](/plugins/parsers/value), ie: 45 or "booyah" +- [Wavefront](/plugins/parsers/wavefront) + +## Serializers + +- [InfluxDB Line Protocol](/plugins/serializers/influx) +- [JSON](/plugins/serializers/json) +- [Graphite](/plugins/serializers/graphite) +- [ServiceNow](/plugins/serializers/nowmetric) +- [SplunkMetric](/plugins/serializers/splunkmetric) +- [Carbon2](/plugins/serializers/carbon2) +- [Wavefront](/plugins/serializers/wavefront) ## Processor Plugins -* [converter](./plugins/processors/converter) -* [override](./plugins/processors/override) -* [printer](./plugins/processors/printer) -* [regex](./plugins/processors/regex) -* [topk](./plugins/processors/topk) +* [clone](/plugins/processors/clone) +* [converter](/plugins/processors/converter) +* [date](/plugins/processors/date) +* [dedup](/plugins/processors/dedup) +* [defaults](/plugins/processors/defaults) +* [enum](/plugins/processors/enum) +* [filepath](/plugins/processors/filepath) +* [override](/plugins/processors/override) +* [parser](/plugins/processors/parser) +* [pivot](/plugins/processors/pivot) +* [printer](/plugins/processors/printer) +* [regex](/plugins/processors/regex) +* [rename](/plugins/processors/rename) +* [s2geo](/plugins/processors/s2geo) +* [strings](/plugins/processors/strings) +* [tag_limit](/plugins/processors/tag_limit) +* [template](/plugins/processors/template) +* [topk](/plugins/processors/topk) +* [unpivot](/plugins/processors/unpivot) ## Aggregator Plugins * [basicstats](./plugins/aggregators/basicstats) -* [minmax](./plugins/aggregators/minmax) +* [final](./plugins/aggregators/final) * [histogram](./plugins/aggregators/histogram) +* [merge](./plugins/aggregators/merge) +* [minmax](./plugins/aggregators/minmax) * [valuecounter](./plugins/aggregators/valuecounter) ## Output Plugins -* [influxdb](./plugins/outputs/influxdb) +* [influxdb](./plugins/outputs/influxdb) (InfluxDB 1.x) +* [influxdb_v2](./plugins/outputs/influxdb_v2) ([InfluxDB 2.x](https://github.com/influxdata/influxdb)) * [amon](./plugins/outputs/amon) * [amqp](./plugins/outputs/amqp) (rabbitmq) * [application_insights](./plugins/outputs/application_insights) * [aws kinesis](./plugins/outputs/kinesis) * [aws cloudwatch](./plugins/outputs/cloudwatch) +* [azure_monitor](./plugins/outputs/azure_monitor) +* [cloud_pubsub](./plugins/outputs/cloud_pubsub) Google Cloud Pub/Sub * [cratedb](./plugins/outputs/cratedb) * [datadog](./plugins/outputs/datadog) * [discard](./plugins/outputs/discard) * [elasticsearch](./plugins/outputs/elasticsearch) +* [exec](./plugins/outputs/exec) * [file](./plugins/outputs/file) * [graphite](./plugins/outputs/graphite) * [graylog](./plugins/outputs/graylog) +* [health](./plugins/outputs/health) * [http](./plugins/outputs/http) * [instrumental](./plugins/outputs/instrumental) * [kafka](./plugins/outputs/kafka) * [librato](./plugins/outputs/librato) * [mqtt](./plugins/outputs/mqtt) * [nats](./plugins/outputs/nats) +* [newrelic](./plugins/outputs/newrelic) * [nsq](./plugins/outputs/nsq) * [opentsdb](./plugins/outputs/opentsdb) * [prometheus](./plugins/outputs/prometheus_client) * [riemann](./plugins/outputs/riemann) * [riemann_legacy](./plugins/outputs/riemann_legacy) * [socket_writer](./plugins/outputs/socket_writer) +* [stackdriver](./plugins/outputs/stackdriver) (Google Cloud Monitoring) +* [syslog](./plugins/outputs/syslog) * [tcp](./plugins/outputs/socket_writer) * [udp](./plugins/outputs/socket_writer) +* [warp10](./plugins/outputs/warp10) * [wavefront](./plugins/outputs/wavefront) diff --git a/accumulator.go b/accumulator.go index 370f0c70c..1ea5737a8 100644 --- a/accumulator.go +++ b/accumulator.go @@ -1,16 +1,14 @@ package telegraf -import "time" +import ( + "time" +) -// Accumulator is an interface for "accumulating" metrics from plugin(s). -// The metrics are sent down a channel shared between all plugins. +// Accumulator allows adding metrics to the processing flow. type Accumulator interface { // AddFields adds a metric to the accumulator with the given measurement // name, fields, and tags (and timestamp). If a timestamp is not provided, // then the accumulator sets it to "now". - // Create a point with a value, decorating it with tags - // NOTE: tags is expected to be owned by the caller, don't mutate - // it after passing to Add. AddFields(measurement string, fields map[string]interface{}, tags map[string]string, @@ -40,7 +38,48 @@ type Accumulator interface { tags map[string]string, t ...time.Time) - SetPrecision(precision, interval time.Duration) + // AddMetric adds an metric to the accumulator. + AddMetric(Metric) + // SetPrecision sets the timestamp rounding precision. All metrics addeds + // added to the accumulator will have their timestamp rounded to the + // nearest multiple of precision. + SetPrecision(precision time.Duration) + + // Report an error. AddError(err error) + + // Upgrade to a TrackingAccumulator with space for maxTracked + // metrics/batches. + WithTracking(maxTracked int) TrackingAccumulator +} + +// TrackingID uniquely identifies a tracked metric group +type TrackingID uint64 + +// DeliveryInfo provides the results of a delivered metric group. +type DeliveryInfo interface { + // ID is the TrackingID + ID() TrackingID + + // Delivered returns true if the metric was processed successfully. + Delivered() bool +} + +// TrackingAccumulator is an Accumulator that provides a signal when the +// metric has been fully processed. Sending more metrics than the accumulator +// has been allocated for without reading status from the Accepted or Rejected +// channels is an error. +type TrackingAccumulator interface { + Accumulator + + // Add the Metric and arrange for tracking feedback after processing.. + AddTrackingMetric(m Metric) TrackingID + + // Add a group of Metrics and arrange for a signal when the group has been + // processed. + AddTrackingMetricGroup(group []Metric) TrackingID + + // Delivered returns a channel that will contain the tracking results. + Delivered() <-chan DeliveryInfo } diff --git a/agent/accumulator.go b/agent/accumulator.go index 51c213a81..65000fd98 100644 --- a/agent/accumulator.go +++ b/agent/accumulator.go @@ -1,31 +1,27 @@ package agent import ( - "log" "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/selfstat" -) - -var ( - NErrors = selfstat.Register("agent", "gather_errors", map[string]string{}) + "github.com/influxdata/telegraf/metric" ) type MetricMaker interface { - Name() string - MakeMetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, - mType telegraf.ValueType, - t time.Time, - ) telegraf.Metric + LogName() string + MakeMetric(metric telegraf.Metric) telegraf.Metric + Log() telegraf.Logger +} + +type accumulator struct { + maker MetricMaker + metrics chan<- telegraf.Metric + precision time.Duration } func NewAccumulator( maker MetricMaker, - metrics chan telegraf.Metric, + metrics chan<- telegraf.Metric, ) telegraf.Accumulator { acc := accumulator{ maker: maker, @@ -35,23 +31,13 @@ func NewAccumulator( return &acc } -type accumulator struct { - metrics chan telegraf.Metric - - maker MetricMaker - - precision time.Duration -} - func (ac *accumulator) AddFields( measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time, ) { - if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Untyped, ac.getTime(t)); m != nil { - ac.metrics <- m - } + ac.addFields(measurement, tags, fields, telegraf.Untyped, t...) } func (ac *accumulator) AddGauge( @@ -60,9 +46,7 @@ func (ac *accumulator) AddGauge( tags map[string]string, t ...time.Time, ) { - if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Gauge, ac.getTime(t)); m != nil { - ac.metrics <- m - } + ac.addFields(measurement, tags, fields, telegraf.Gauge, t...) } func (ac *accumulator) AddCounter( @@ -71,9 +55,7 @@ func (ac *accumulator) AddCounter( tags map[string]string, t ...time.Time, ) { - if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Counter, ac.getTime(t)); m != nil { - ac.metrics <- m - } + ac.addFields(measurement, tags, fields, telegraf.Counter, t...) } func (ac *accumulator) AddSummary( @@ -82,9 +64,7 @@ func (ac *accumulator) AddSummary( tags map[string]string, t ...time.Time, ) { - if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Summary, ac.getTime(t)); m != nil { - ac.metrics <- m - } + ac.addFields(measurement, tags, fields, telegraf.Summary, t...) } func (ac *accumulator) AddHistogram( @@ -93,7 +73,28 @@ func (ac *accumulator) AddHistogram( tags map[string]string, t ...time.Time, ) { - if m := ac.maker.MakeMetric(measurement, fields, tags, telegraf.Histogram, ac.getTime(t)); m != nil { + ac.addFields(measurement, tags, fields, telegraf.Histogram, t...) +} + +func (ac *accumulator) AddMetric(m telegraf.Metric) { + m.SetTime(m.Time().Round(ac.precision)) + if m := ac.maker.MakeMetric(m); m != nil { + ac.metrics <- m + } +} + +func (ac *accumulator) addFields( + measurement string, + tags map[string]string, + fields map[string]interface{}, + tp telegraf.ValueType, + t ...time.Time, +) { + m, err := metric.New(measurement, tags, fields, ac.getTime(t), tp) + if err != nil { + return + } + if m := ac.maker.MakeMetric(m); m != nil { ac.metrics <- m } } @@ -104,33 +105,14 @@ func (ac *accumulator) AddError(err error) { if err == nil { return } - NErrors.Incr(1) - //TODO suppress/throttle consecutive duplicate errors? - log.Printf("E! Error in plugin [%s]: %s", ac.maker.Name(), err) + ac.maker.Log().Errorf("Error in plugin: %v", err) } -// SetPrecision takes two time.Duration objects. If the first is non-zero, -// it sets that as the precision. Otherwise, it takes the second argument -// as the order of time that the metrics should be rounded to, with the -// maximum being 1s. -func (ac *accumulator) SetPrecision(precision, interval time.Duration) { - if precision > 0 { - ac.precision = precision - return - } - switch { - case interval >= time.Second: - ac.precision = time.Second - case interval >= time.Millisecond: - ac.precision = time.Millisecond - case interval >= time.Microsecond: - ac.precision = time.Microsecond - default: - ac.precision = time.Nanosecond - } +func (ac *accumulator) SetPrecision(precision time.Duration) { + ac.precision = precision } -func (ac accumulator) getTime(t []time.Time) time.Time { +func (ac *accumulator) getTime(t []time.Time) time.Time { var timestamp time.Time if len(t) > 0 { timestamp = t[0] @@ -139,3 +121,43 @@ func (ac accumulator) getTime(t []time.Time) time.Time { } return timestamp.Round(ac.precision) } + +func (ac *accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { + return &trackingAccumulator{ + Accumulator: ac, + delivered: make(chan telegraf.DeliveryInfo, maxTracked), + } +} + +type trackingAccumulator struct { + telegraf.Accumulator + delivered chan telegraf.DeliveryInfo +} + +func (a *trackingAccumulator) AddTrackingMetric(m telegraf.Metric) telegraf.TrackingID { + dm, id := metric.WithTracking(m, a.onDelivery) + a.AddMetric(dm) + return id +} + +func (a *trackingAccumulator) AddTrackingMetricGroup(group []telegraf.Metric) telegraf.TrackingID { + db, id := metric.WithGroupTracking(group, a.onDelivery) + for _, m := range db { + a.AddMetric(m) + } + return id +} + +func (a *trackingAccumulator) Delivered() <-chan telegraf.DeliveryInfo { + return a.delivered +} + +func (a *trackingAccumulator) onDelivery(info telegraf.DeliveryInfo) { + select { + case a.delivered <- info: + default: + // This is a programming error in the input. More items were sent for + // tracking than space requested. + panic("channel is full") + } +} diff --git a/agent/accumulator_test.go b/agent/accumulator_test.go index 22fa3e409..38a7e047c 100644 --- a/agent/accumulator_test.go +++ b/agent/accumulator_test.go @@ -9,8 +9,7 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" - + "github.com/influxdata/telegraf/models" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -61,7 +60,6 @@ func TestAccAddError(t *testing.T) { a.AddError(fmt.Errorf("baz")) errs := bytes.Split(errBuf.Bytes(), []byte{'\n'}) - assert.EqualValues(t, int64(3), NErrors.Get()) require.Len(t, errs, 4) // 4 because of trailing newline assert.Contains(t, string(errs[0]), "TestPlugin") assert.Contains(t, string(errs[0]), "foo") @@ -76,7 +74,6 @@ func TestSetPrecision(t *testing.T) { name string unset bool precision time.Duration - interval time.Duration timestamp time.Time expected time.Time }{ @@ -88,13 +85,13 @@ func TestSetPrecision(t *testing.T) { }, { name: "second interval", - interval: time.Second, + precision: time.Second, timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC), expected: time.Date(2006, time.February, 10, 12, 0, 0, 0, time.UTC), }, { name: "microsecond interval", - interval: time.Microsecond, + precision: time.Microsecond, timestamp: time.Date(2006, time.February, 10, 12, 0, 0, 82912748, time.UTC), expected: time.Date(2006, time.February, 10, 12, 0, 0, 82913000, time.UTC), }, @@ -111,7 +108,7 @@ func TestSetPrecision(t *testing.T) { a := NewAccumulator(&TestMetricMaker{}, metrics) if !tt.unset { - a.SetPrecision(tt.precision, tt.interval) + a.SetPrecision(tt.precision) } a.AddFields("acctest", @@ -128,32 +125,36 @@ func TestSetPrecision(t *testing.T) { } } +func TestAddTrackingMetricGroupEmpty(t *testing.T) { + ch := make(chan telegraf.Metric, 10) + metrics := []telegraf.Metric{} + acc := NewAccumulator(&TestMetricMaker{}, ch).WithTracking(1) + + id := acc.AddTrackingMetricGroup(metrics) + + select { + case tracking := <-acc.Delivered(): + require.Equal(t, tracking.ID(), id) + default: + t.Fatal("empty group should be delivered immediately") + } +} + type TestMetricMaker struct { } func (tm *TestMetricMaker) Name() string { return "TestPlugin" } -func (tm *TestMetricMaker) MakeMetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, - mType telegraf.ValueType, - t time.Time, -) telegraf.Metric { - switch mType { - case telegraf.Untyped: - if m, err := metric.New(measurement, tags, fields, t); err == nil { - return m - } - case telegraf.Counter: - if m, err := metric.New(measurement, tags, fields, t, telegraf.Counter); err == nil { - return m - } - case telegraf.Gauge: - if m, err := metric.New(measurement, tags, fields, t, telegraf.Gauge); err == nil { - return m - } - } - return nil + +func (tm *TestMetricMaker) LogName() string { + return tm.Name() +} + +func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { + return metric +} + +func (tm *TestMetricMaker) Log() telegraf.Logger { + return models.NewLogger("TestPlugin", "test", "") } diff --git a/agent/agent.go b/agent/agent.go index 6eb9505e2..72e906a59 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -1,429 +1,1094 @@ package agent import ( + "context" "fmt" "log" "os" "runtime" + "sort" "sync" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/config" - "github.com/influxdata/telegraf/internal/models" - "github.com/influxdata/telegraf/selfstat" + "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/plugins/serializers/influx" ) -// Agent runs telegraf and collects data based on the given config +// Agent runs a set of plugins. type Agent struct { Config *config.Config } -// NewAgent returns an Agent struct based off the given Config +// NewAgent returns an Agent for the given Config. func NewAgent(config *config.Config) (*Agent, error) { a := &Agent{ Config: config, } - - if !a.Config.Agent.OmitHostname { - if a.Config.Agent.Hostname == "" { - hostname, err := os.Hostname() - if err != nil { - return nil, err - } - - a.Config.Agent.Hostname = hostname - } - - config.Tags["host"] = a.Config.Agent.Hostname - } - return a, nil } -// Connect connects to all configured outputs -func (a *Agent) Connect() error { - for _, o := range a.Config.Outputs { - switch ot := o.Output.(type) { - case telegraf.ServiceOutput: - if err := ot.Start(); err != nil { - log.Printf("E! Service for output %s failed to start, exiting\n%s\n", - o.Name, err.Error()) - return err - } - } +// inputUnit is a group of input plugins and the shared channel they write to. +// +// ┌───────┐ +// │ Input │───┐ +// └───────┘ │ +// ┌───────┐ │ ______ +// │ Input │───┼──▶ ()_____) +// └───────┘ │ +// ┌───────┐ │ +// │ Input │───┘ +// └───────┘ +type inputUnit struct { + dst chan<- telegraf.Metric + inputs []*models.RunningInput +} - log.Printf("D! Attempting connection to output: %s\n", o.Name) - err := o.Output.Connect() - if err != nil { - log.Printf("E! Failed to connect to output %s, retrying in 15s, "+ - "error was '%s' \n", o.Name, err) - time.Sleep(15 * time.Second) - err = o.Output.Connect() +// ______ ┌───────────┐ ______ +// ()_____)──▶ │ Processor │──▶ ()_____) +// └───────────┘ +type processorUnit struct { + src <-chan telegraf.Metric + dst chan<- telegraf.Metric + processor *models.RunningProcessor +} + +// aggregatorUnit is a group of Aggregators and their source and sink channels. +// Typically the aggregators write to a processor channel and pass the original +// metrics to the output channel. The sink channels may be the same channel. +// +// ┌────────────┐ +// ┌──▶ │ Aggregator │───┐ +// │ └────────────┘ │ +// ______ │ ┌────────────┐ │ ______ +// ()_____)───┼──▶ │ Aggregator │───┼──▶ ()_____) +// │ └────────────┘ │ +// │ ┌────────────┐ │ +// ├──▶ │ Aggregator │───┘ +// │ └────────────┘ +// │ ______ +// └────────────────────────▶ ()_____) +type aggregatorUnit struct { + src <-chan telegraf.Metric + aggC chan<- telegraf.Metric + outputC chan<- telegraf.Metric + aggregators []*models.RunningAggregator +} + +// outputUnit is a group of Outputs and their source channel. Metrics on the +// channel are written to all outputs. +// +// ┌────────┐ +// ┌──▶ │ Output │ +// │ └────────┘ +// ______ ┌─────┐ │ ┌────────┐ +// ()_____)──▶ │ Fan │───┼──▶ │ Output │ +// └─────┘ │ └────────┘ +// │ ┌────────┐ +// └──▶ │ Output │ +// └────────┘ +type outputUnit struct { + src <-chan telegraf.Metric + outputs []*models.RunningOutput +} + +// Run starts and runs the Agent until the context is done. +func (a *Agent) Run(ctx context.Context) error { + log.Printf("I! [agent] Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ + "Flush Interval:%s", + a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, + a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) + + log.Printf("D! [agent] Initializing plugins") + err := a.initPlugins() + if err != nil { + return err + } + + startTime := time.Now() + + log.Printf("D! [agent] Connecting outputs") + next, ou, err := a.startOutputs(ctx, a.Config.Outputs) + if err != nil { + return err + } + + var apu []*processorUnit + var au *aggregatorUnit + if len(a.Config.Aggregators) != 0 { + aggC := next + if len(a.Config.AggProcessors) != 0 { + aggC, apu, err = a.startProcessors(next, a.Config.AggProcessors) if err != nil { return err } } - log.Printf("D! Successfully connected to output: %s\n", o.Name) + + next, au, err = a.startAggregators(aggC, next, a.Config.Aggregators) + if err != nil { + return err + } + } + + var pu []*processorUnit + if len(a.Config.Processors) != 0 { + next, pu, err = a.startProcessors(next, a.Config.Processors) + if err != nil { + return err + } + } + + iu, err := a.startInputs(next, a.Config.Inputs) + if err != nil { + return err + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err = a.runOutputs(ou) + if err != nil { + log.Printf("E! [agent] Error running outputs: %v", err) + } + }() + + if au != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(apu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + err = a.runAggregators(startTime, au) + if err != nil { + log.Printf("E! [agent] Error running aggregators: %v", err) + } + }() + } + + if pu != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(pu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() + } + + wg.Add(1) + go func() { + defer wg.Done() + err = a.runInputs(ctx, startTime, iu) + if err != nil { + log.Printf("E! [agent] Error running inputs: %v", err) + } + }() + + wg.Wait() + + log.Printf("D! [agent] Stopped Successfully") + return err +} + +// initPlugins runs the Init function on plugins. +func (a *Agent) initPlugins() error { + for _, input := range a.Config.Inputs { + err := input.Init() + if err != nil { + return fmt.Errorf("could not initialize input %s: %v", + input.LogName(), err) + } + } + for _, processor := range a.Config.Processors { + err := processor.Init() + if err != nil { + return fmt.Errorf("could not initialize processor %s: %v", + processor.Config.Name, err) + } + } + for _, aggregator := range a.Config.Aggregators { + err := aggregator.Init() + if err != nil { + return fmt.Errorf("could not initialize aggregator %s: %v", + aggregator.Config.Name, err) + } + } + for _, output := range a.Config.Outputs { + err := output.Init() + if err != nil { + return fmt.Errorf("could not initialize output %s: %v", + output.Config.Name, err) + } } return nil } -// Close closes the connection to all configured outputs -func (a *Agent) Close() error { - var err error - for _, o := range a.Config.Outputs { - err = o.Output.Close() - switch ot := o.Output.(type) { - case telegraf.ServiceOutput: - ot.Stop() +func (a *Agent) startInputs( + dst chan<- telegraf.Metric, + inputs []*models.RunningInput, +) (*inputUnit, error) { + log.Printf("D! [agent] Starting service inputs") + + unit := &inputUnit{ + dst: dst, + } + + for _, input := range inputs { + if si, ok := input.Input.(telegraf.ServiceInput); ok { + // Service input plugins are not subject to timestamp rounding. + // This only applies to the accumulator passed to Start(), the + // Gather() accumulator does apply rounding according to the + // precision agent setting. + acc := NewAccumulator(input, dst) + acc.SetPrecision(time.Nanosecond) + + err := si.Start(acc) + if err != nil { + stopServiceInputs(unit.inputs) + return nil, fmt.Errorf("starting input %s: %w", input.LogName(), err) + } + } + unit.inputs = append(unit.inputs, input) + } + + return unit, nil +} + +// runInputs starts and triggers the periodic gather for Inputs. +// +// When the context is done the timers are stopped and this function returns +// after all ongoing Gather calls complete. +func (a *Agent) runInputs( + ctx context.Context, + startTime time.Time, + unit *inputUnit, +) error { + var wg sync.WaitGroup + for _, input := range unit.inputs { + interval := a.Config.Agent.Interval.Duration + jitter := a.Config.Agent.CollectionJitter.Duration + + // Overwrite agent interval if this plugin has its own. + if input.Config.Interval != 0 { + interval = input.Config.Interval + } + + var ticker Ticker + if a.Config.Agent.RoundInterval { + ticker = NewAlignedTicker(startTime, interval, jitter) + } else { + ticker = NewUnalignedTicker(interval, jitter) + } + defer ticker.Stop() + + acc := NewAccumulator(input, unit.dst) + acc.SetPrecision(a.Precision()) + + wg.Add(1) + go func(input *models.RunningInput) { + defer wg.Done() + a.gatherLoop(ctx, acc, input, ticker) + }(input) + } + + wg.Wait() + + log.Printf("D! [agent] Stopping service inputs") + stopServiceInputs(unit.inputs) + + close(unit.dst) + log.Printf("D! [agent] Input channel closed") + + return nil +} + +// testStartInputs is a variation of startInputs for use in --test and --once +// mode. It differs by logging Start errors and returning only plugins +// successfully started. +func (a *Agent) testStartInputs( + dst chan<- telegraf.Metric, + inputs []*models.RunningInput, +) (*inputUnit, error) { + log.Printf("D! [agent] Starting service inputs") + + unit := &inputUnit{ + dst: dst, + } + + for _, input := range inputs { + if si, ok := input.Input.(telegraf.ServiceInput); ok { + // Service input plugins are not subject to timestamp rounding. + // This only applies to the accumulator passed to Start(), the + // Gather() accumulator does apply rounding according to the + // precision agent setting. + acc := NewAccumulator(input, dst) + acc.SetPrecision(time.Nanosecond) + + err := si.Start(acc) + if err != nil { + log.Printf("E! [agent] Starting input %s: %v", input.LogName(), err) + } + + } + + unit.inputs = append(unit.inputs, input) + } + + return unit, nil +} + +// testRunInputs is a variation of runInputs for use in --test and --once mode. +// Instead of using a ticker to run the inputs they are called once immediately. +func (a *Agent) testRunInputs( + ctx context.Context, + wait time.Duration, + unit *inputUnit, +) error { + var wg sync.WaitGroup + + nul := make(chan telegraf.Metric) + go func() { + for range nul { + } + }() + + for _, input := range unit.inputs { + wg.Add(1) + go func(input *models.RunningInput) { + defer wg.Done() + + // Run plugins that require multiple gathers to calculate rate + // and delta metrics twice. + switch input.Config.Name { + case "cpu", "mongodb", "procstat": + nulAcc := NewAccumulator(input, nul) + nulAcc.SetPrecision(a.Precision()) + if err := input.Input.Gather(nulAcc); err != nil { + nulAcc.AddError(err) + } + + time.Sleep(500 * time.Millisecond) + } + + acc := NewAccumulator(input, unit.dst) + acc.SetPrecision(a.Precision()) + + if err := input.Input.Gather(acc); err != nil { + acc.AddError(err) + } + }(input) + } + wg.Wait() + + internal.SleepContext(ctx, wait) + + log.Printf("D! [agent] Stopping service inputs") + stopServiceInputs(unit.inputs) + + close(unit.dst) + log.Printf("D! [agent] Input channel closed") + return nil +} + +// stopServiceInputs stops all service inputs. +func stopServiceInputs(inputs []*models.RunningInput) { + for _, input := range inputs { + if si, ok := input.Input.(telegraf.ServiceInput); ok { + si.Stop() } } - return err } -func panicRecover(input *models.RunningInput) { - if err := recover(); err != nil { - trace := make([]byte, 2048) - runtime.Stack(trace, true) - log.Printf("E! FATAL: Input [%s] panicked: %s, Stack:\n%s\n", - input.Name(), err, trace) - log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " + - "stack trace, configuration, and OS information: " + - "https://github.com/influxdata/telegraf/issues/new") - } -} - -// gatherer runs the inputs that have been configured with their own -// reporting interval. -func (a *Agent) gatherer( - shutdown chan struct{}, +// gather runs an input's gather function periodically until the context is +// done. +func (a *Agent) gatherLoop( + ctx context.Context, + acc telegraf.Accumulator, input *models.RunningInput, - interval time.Duration, - metricC chan telegraf.Metric, + ticker Ticker, ) { defer panicRecover(input) - GatherTime := selfstat.RegisterTiming("gather", - "gather_time_ns", - map[string]string{"input": input.Config.Name}, - ) - - acc := NewAccumulator(input, metricC) - acc.SetPrecision(a.Config.Agent.Precision.Duration, - a.Config.Agent.Interval.Duration) - - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - internal.RandomSleep(a.Config.Agent.CollectionJitter.Duration, shutdown) - - start := time.Now() - gatherWithTimeout(shutdown, input, acc, interval) - elapsed := time.Since(start) - - GatherTime.Incr(elapsed.Nanoseconds()) - select { - case <-shutdown: + case <-ticker.Elapsed(): + err := a.gatherOnce(acc, input, ticker) + if err != nil { + acc.AddError(err) + } + case <-ctx.Done(): return - case <-ticker.C: - continue } } } -// gatherWithTimeout gathers from the given input, with the given timeout. -// when the given timeout is reached, gatherWithTimeout logs an error message -// but continues waiting for it to return. This is to avoid leaving behind -// hung processes, and to prevent re-calling the same hung process over and -// over. -func gatherWithTimeout( - shutdown chan struct{}, - input *models.RunningInput, +// gatherOnce runs the input's Gather function once, logging a warning each +// interval it fails to complete before. +func (a *Agent) gatherOnce( acc telegraf.Accumulator, - timeout time.Duration, -) { - ticker := time.NewTicker(timeout) - defer ticker.Stop() + input *models.RunningInput, + ticker Ticker, +) error { done := make(chan error) go func() { - done <- input.Input.Gather(acc) + done <- input.Gather(acc) }() for { select { case err := <-done: - if err != nil { - acc.AddError(err) + return err + case <-ticker.Elapsed(): + log.Printf("W! [agent] [%s] did not complete within its interval", + input.LogName()) + } + } +} + +// startProcessors sets up the processor chain and calls Start on all +// processors. If an error occurs any started processors are Stopped. +func (a *Agent) startProcessors( + dst chan<- telegraf.Metric, + processors models.RunningProcessors, +) (chan<- telegraf.Metric, []*processorUnit, error) { + var units []*processorUnit + + // Sort from last to first + sort.SliceStable(processors, func(i, j int) bool { + return processors[i].Config.Order > processors[j].Config.Order + }) + + var src chan telegraf.Metric + for _, processor := range processors { + src = make(chan telegraf.Metric, 100) + acc := NewAccumulator(processor, dst) + + err := processor.Start(acc) + if err != nil { + for _, u := range units { + u.processor.Stop() + close(u.dst) } - return - case <-ticker.C: - err := fmt.Errorf("took longer to collect than collection interval (%s)", - timeout) - acc.AddError(err) - continue - case <-shutdown: + return nil, nil, fmt.Errorf("starting processor %s: %w", processor.LogName(), err) + } + + units = append(units, &processorUnit{ + src: src, + dst: dst, + processor: processor, + }) + + dst = src + } + + return src, units, nil +} + +// runProcessors begins processing metrics and runs until the source channel is +// closed and all metrics have been written. +func (a *Agent) runProcessors( + units []*processorUnit, +) error { + var wg sync.WaitGroup + for _, unit := range units { + wg.Add(1) + go func(unit *processorUnit) { + defer wg.Done() + + acc := NewAccumulator(unit.processor, unit.dst) + for m := range unit.src { + unit.processor.Add(m, acc) + } + unit.processor.Stop() + close(unit.dst) + log.Printf("D! [agent] Processor channel closed") + }(unit) + } + wg.Wait() + + return nil +} + +// startAggregators sets up the aggregator unit and returns the source channel. +func (a *Agent) startAggregators( + aggC chan<- telegraf.Metric, + outputC chan<- telegraf.Metric, + aggregators []*models.RunningAggregator, +) (chan<- telegraf.Metric, *aggregatorUnit, error) { + src := make(chan telegraf.Metric, 100) + unit := &aggregatorUnit{ + src: src, + aggC: aggC, + outputC: outputC, + aggregators: aggregators, + } + return src, unit, nil +} + +// runAggregators beings aggregating metrics and runs until the source channel +// is closed and all metrics have been written. +func (a *Agent) runAggregators( + startTime time.Time, + unit *aggregatorUnit, +) error { + ctx, cancel := context.WithCancel(context.Background()) + + // Before calling Add, initialize the aggregation window. This ensures + // that any metric created after start time will be aggregated. + for _, agg := range a.Config.Aggregators { + since, until := updateWindow(startTime, a.Config.Agent.RoundInterval, agg.Period()) + agg.UpdateWindow(since, until) + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for metric := range unit.src { + var dropOriginal bool + for _, agg := range a.Config.Aggregators { + if ok := agg.Add(metric); ok { + dropOriginal = true + } + } + + if !dropOriginal { + unit.outputC <- metric // keep original. + } else { + metric.Drop() + } + } + cancel() + }() + + for _, agg := range a.Config.Aggregators { + wg.Add(1) + go func(agg *models.RunningAggregator) { + defer wg.Done() + + acc := NewAccumulator(agg, unit.aggC) + acc.SetPrecision(a.Precision()) + a.push(ctx, agg, acc) + }(agg) + } + + wg.Wait() + + // In the case that there are no processors, both aggC and outputC are the + // same channel. If there are processors, we close the aggC and the + // processor chain will close the outputC when it finishes processing. + close(unit.aggC) + log.Printf("D! [agent] Aggregator channel closed") + + return nil +} + +func updateWindow(start time.Time, roundInterval bool, period time.Duration) (time.Time, time.Time) { + var until time.Time + if roundInterval { + until = internal.AlignTime(start, period) + if until == start { + until = internal.AlignTime(start.Add(time.Nanosecond), period) + } + } else { + until = start.Add(period) + } + + since := until.Add(-period) + + return since, until +} + +// push runs the push for a single aggregator every period. +func (a *Agent) push( + ctx context.Context, + aggregator *models.RunningAggregator, + acc telegraf.Accumulator, +) { + for { + // Ensures that Push will be called for each period, even if it has + // already elapsed before this function is called. This is guaranteed + // because so long as only Push updates the EndPeriod. This method + // also avoids drift by not using a ticker. + until := time.Until(aggregator.EndPeriod()) + + select { + case <-time.After(until): + aggregator.Push(acc) + break + case <-ctx.Done(): + aggregator.Push(acc) return } } } -// Test verifies that we can 'Gather' from all inputs with their configured -// Config struct -func (a *Agent) Test() error { - shutdown := make(chan struct{}) - defer close(shutdown) - metricC := make(chan telegraf.Metric) - - // dummy receiver for the point channel - go func() { - for { - select { - case <-metricC: - // do nothing - case <-shutdown: - return +// startOutputs calls Connect on all outputs and returns the source channel. +// If an error occurs calling Connect all stared plugins have Close called. +func (a *Agent) startOutputs( + ctx context.Context, + outputs []*models.RunningOutput, +) (chan<- telegraf.Metric, *outputUnit, error) { + src := make(chan telegraf.Metric, 100) + unit := &outputUnit{src: src} + for _, output := range outputs { + err := a.connectOutput(ctx, output) + if err != nil { + for _, output := range unit.outputs { + output.Close() } - } - }() - - for _, input := range a.Config.Inputs { - if _, ok := input.Input.(telegraf.ServiceInput); ok { - fmt.Printf("\nWARNING: skipping plugin [[%s]]: service inputs not supported in --test mode\n", - input.Name()) - continue + return nil, nil, fmt.Errorf("connecting output %s: %w", output.LogName(), err) } - acc := NewAccumulator(input, metricC) - acc.SetPrecision(a.Config.Agent.Precision.Duration, - a.Config.Agent.Interval.Duration) - input.SetTrace(true) - input.SetDefaultTags(a.Config.Tags) + unit.outputs = append(unit.outputs, output) + } - if err := input.Input.Gather(acc); err != nil { + return src, unit, nil +} + +// connectOutputs connects to all outputs. +func (a *Agent) connectOutput(ctx context.Context, output *models.RunningOutput) error { + log.Printf("D! [agent] Attempting connection to [%s]", output.LogName()) + err := output.Output.Connect() + if err != nil { + log.Printf("E! [agent] Failed to connect to [%s], retrying in 15s, "+ + "error was '%s'", output.LogName(), err) + + err := internal.SleepContext(ctx, 15*time.Second) + if err != nil { return err } - // Special instructions for some inputs. cpu, for example, needs to be - // run twice in order to return cpu usage percentages. - switch input.Name() { - case "inputs.cpu", "inputs.mongodb", "inputs.procstat": - time.Sleep(500 * time.Millisecond) - if err := input.Input.Gather(acc); err != nil { - return err - } + err = output.Output.Connect() + if err != nil { + return fmt.Errorf("Error connecting to output %q: %w", output.LogName(), err) } - } + log.Printf("D! [agent] Successfully connected to %s", output.LogName()) return nil } -// flush writes a list of metrics to all configured outputs -func (a *Agent) flush() { +// runOutputs begins processing metrics and returns until the source channel is +// closed and all metrics have been written. On shutdown metrics will be +// written one last time and dropped if unsuccessful. +func (a *Agent) runOutputs( + unit *outputUnit, +) error { var wg sync.WaitGroup - wg.Add(len(a.Config.Outputs)) - for _, o := range a.Config.Outputs { + // Start flush loop + interval := a.Config.Agent.FlushInterval.Duration + jitter := a.Config.Agent.FlushJitter.Duration + + ctx, cancel := context.WithCancel(context.Background()) + + for _, output := range unit.outputs { + interval := interval + // Overwrite agent flush_interval if this plugin has its own. + if output.Config.FlushInterval != 0 { + interval = output.Config.FlushInterval + } + + jitter := jitter + // Overwrite agent flush_jitter if this plugin has its own. + if output.Config.FlushJitter != nil { + jitter = *output.Config.FlushJitter + } + + wg.Add(1) go func(output *models.RunningOutput) { defer wg.Done() - err := output.Write() - if err != nil { - log.Printf("E! Error writing to output [%s]: %s\n", - output.Name, err.Error()) - } - }(o) + + ticker := NewRollingTicker(interval, jitter) + defer ticker.Stop() + + a.flushLoop(ctx, output, ticker) + }(output) } + for metric := range unit.src { + for i, output := range unit.outputs { + if i == len(a.Config.Outputs)-1 { + output.AddMetric(metric) + } else { + output.AddMetric(metric.Copy()) + } + } + } + + log.Println("I! [agent] Hang on, flushing any cached metrics before shutdown") + cancel() wg.Wait() -} -// flusher monitors the metrics input channel and flushes on the minimum interval -func (a *Agent) flusher(shutdown chan struct{}, metricC chan telegraf.Metric, aggC chan telegraf.Metric) error { - // Inelegant, but this sleep is to allow the Gather threads to run, so that - // the flusher will flush after metrics are collected. - time.Sleep(time.Millisecond * 300) - - // create an output metric channel and a gorouting that continuously passes - // each metric onto the output plugins & aggregators. - outMetricC := make(chan telegraf.Metric, 100) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-shutdown: - if len(outMetricC) > 0 { - // keep going until outMetricC is flushed - continue - } - return - case m := <-outMetricC: - // if dropOriginal is set to true, then we will only send this - // metric to the aggregators, not the outputs. - var dropOriginal bool - for _, agg := range a.Config.Aggregators { - if ok := agg.Add(m.Copy()); ok { - dropOriginal = true - } - } - if !dropOriginal { - for i, o := range a.Config.Outputs { - if i == len(a.Config.Outputs)-1 { - o.AddMetric(m) - } else { - o.AddMetric(m.Copy()) - } - } - } - } - } - }() - - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-shutdown: - if len(aggC) > 0 { - // keep going until aggC is flushed - continue - } - return - case metric := <-aggC: - metrics := []telegraf.Metric{metric} - for _, processor := range a.Config.Processors { - metrics = processor.Apply(metrics...) - } - for _, m := range metrics { - for i, o := range a.Config.Outputs { - if i == len(a.Config.Outputs)-1 { - o.AddMetric(m) - } else { - o.AddMetric(m.Copy()) - } - } - } - } - } - }() - - ticker := time.NewTicker(a.Config.Agent.FlushInterval.Duration) - semaphore := make(chan struct{}, 1) - for { - select { - case <-shutdown: - log.Println("I! Hang on, flushing any cached metrics before shutdown") - // wait for outMetricC to get flushed before flushing outputs - wg.Wait() - a.flush() - return nil - case <-ticker.C: - go func() { - select { - case semaphore <- struct{}{}: - internal.RandomSleep(a.Config.Agent.FlushJitter.Duration, shutdown) - a.flush() - <-semaphore - default: - // skipping this flush because one is already happening - log.Println("W! Skipping a scheduled flush because there is" + - " already a flush ongoing.") - } - }() - case metric := <-metricC: - // NOTE potential bottleneck here as we put each metric through the - // processors serially. - mS := []telegraf.Metric{metric} - for _, processor := range a.Config.Processors { - mS = processor.Apply(mS...) - } - for _, m := range mS { - outMetricC <- m - } - } - } -} - -// Run runs the agent daemon, gathering every Interval -func (a *Agent) Run(shutdown chan struct{}) error { - var wg sync.WaitGroup - - log.Printf("I! Agent Config: Interval:%s, Quiet:%#v, Hostname:%#v, "+ - "Flush Interval:%s \n", - a.Config.Agent.Interval.Duration, a.Config.Agent.Quiet, - a.Config.Agent.Hostname, a.Config.Agent.FlushInterval.Duration) - - // channel shared between all input threads for accumulating metrics - metricC := make(chan telegraf.Metric, 100) - aggC := make(chan telegraf.Metric, 100) - - // Round collection to nearest interval by sleeping - if a.Config.Agent.RoundInterval { - i := int64(a.Config.Agent.Interval.Duration) - time.Sleep(time.Duration(i - (time.Now().UnixNano() % i))) - } - - wg.Add(1) - go func() { - defer wg.Done() - if err := a.flusher(shutdown, metricC, aggC); err != nil { - log.Printf("E! Flusher routine failed, exiting: %s\n", err.Error()) - close(shutdown) - } - }() - - wg.Add(len(a.Config.Aggregators)) - for _, aggregator := range a.Config.Aggregators { - go func(agg *models.RunningAggregator) { - defer wg.Done() - acc := NewAccumulator(agg, aggC) - acc.SetPrecision(a.Config.Agent.Precision.Duration, - a.Config.Agent.Interval.Duration) - agg.Run(acc, shutdown) - }(aggregator) - } - - // Service inputs may immediately add metrics, if metrics are added before - // the aggregator starts they will be dropped. Generally this occurs - // only during testing but it is an outstanding issue. - // - // https://github.com/influxdata/telegraf/issues/4394 - for _, input := range a.Config.Inputs { - input.SetDefaultTags(a.Config.Tags) - switch p := input.Input.(type) { - case telegraf.ServiceInput: - acc := NewAccumulator(input, metricC) - // Service input plugins should set their own precision of their - // metrics. - acc.SetPrecision(time.Nanosecond, 0) - if err := p.Start(acc); err != nil { - log.Printf("E! Service for input %s failed to start, exiting\n%s\n", - input.Name(), err.Error()) - return err - } - defer p.Stop() - } - } - - wg.Add(len(a.Config.Inputs)) - for _, input := range a.Config.Inputs { - interval := a.Config.Agent.Interval.Duration - // overwrite global interval if this plugin has it's own. - if input.Config.Interval != 0 { - interval = input.Config.Interval - } - go func(in *models.RunningInput, interv time.Duration) { - defer wg.Done() - a.gatherer(shutdown, in, interv, metricC) - }(input, interval) - } - - wg.Wait() - a.Close() return nil } + +// flushLoop runs an output's flush function periodically until the context is +// done. +func (a *Agent) flushLoop( + ctx context.Context, + output *models.RunningOutput, + ticker Ticker, +) { + logError := func(err error) { + if err != nil { + log.Printf("E! [agent] Error writing to %s: %v", output.LogName(), err) + } + } + + // watch for flush requests + flushRequested := make(chan os.Signal, 1) + watchForFlushSignal(flushRequested) + defer stopListeningForFlushSignal(flushRequested) + + for { + // Favor shutdown over other methods. + select { + case <-ctx.Done(): + logError(a.flushOnce(output, ticker, output.Write)) + return + default: + } + + select { + case <-ctx.Done(): + logError(a.flushOnce(output, ticker, output.Write)) + return + case <-ticker.Elapsed(): + logError(a.flushOnce(output, ticker, output.Write)) + case <-flushRequested: + logError(a.flushOnce(output, ticker, output.Write)) + case <-output.BatchReady: + // Favor the ticker over batch ready + select { + case <-ticker.Elapsed(): + logError(a.flushOnce(output, ticker, output.Write)) + default: + logError(a.flushOnce(output, ticker, output.WriteBatch)) + } + } + } +} + +// flushOnce runs the output's Write function once, logging a warning each +// interval it fails to complete before. +func (a *Agent) flushOnce( + output *models.RunningOutput, + ticker Ticker, + writeFunc func() error, +) error { + done := make(chan error) + go func() { + done <- writeFunc() + }() + + for { + select { + case err := <-done: + output.LogBufferStatus() + return err + case <-ticker.Elapsed(): + log.Printf("W! [agent] [%q] did not complete within its flush interval", + output.LogName()) + output.LogBufferStatus() + } + } +} + +// Test runs the inputs, processors and aggregators for a single gather and +// writes the metrics to stdout. +func (a *Agent) Test(ctx context.Context, wait time.Duration) error { + src := make(chan telegraf.Metric, 100) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + s := influx.NewSerializer() + s.SetFieldSortOrder(influx.SortFields) + + for metric := range src { + octets, err := s.Serialize(metric) + if err == nil { + fmt.Print("> ", string(octets)) + } + metric.Reject() + } + }() + + err := a.test(ctx, wait, src) + if err != nil { + return err + } + + wg.Wait() + + if models.GlobalGatherErrors.Get() != 0 { + return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get()) + } + return nil +} + +// Test runs the agent and performs a single gather sending output to the +// outputF. After gathering pauses for the wait duration to allow service +// inputs to run. +func (a *Agent) test(ctx context.Context, wait time.Duration, outputC chan<- telegraf.Metric) error { + log.Printf("D! [agent] Initializing plugins") + err := a.initPlugins() + if err != nil { + return err + } + + startTime := time.Now() + + next := outputC + + var apu []*processorUnit + var au *aggregatorUnit + if len(a.Config.Aggregators) != 0 { + procC := next + if len(a.Config.AggProcessors) != 0 { + procC, apu, err = a.startProcessors(next, a.Config.AggProcessors) + if err != nil { + return err + } + } + + next, au, err = a.startAggregators(procC, next, a.Config.Aggregators) + if err != nil { + return err + } + } + + var pu []*processorUnit + if len(a.Config.Processors) != 0 { + next, pu, err = a.startProcessors(next, a.Config.Processors) + if err != nil { + return err + } + } + + iu, err := a.testStartInputs(next, a.Config.Inputs) + if err != nil { + return err + } + + var wg sync.WaitGroup + + if au != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(apu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + err = a.runAggregators(startTime, au) + if err != nil { + log.Printf("E! [agent] Error running aggregators: %v", err) + } + }() + } + + if pu != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(pu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() + } + + wg.Add(1) + go func() { + defer wg.Done() + err = a.testRunInputs(ctx, wait, iu) + if err != nil { + log.Printf("E! [agent] Error running inputs: %v", err) + } + }() + + wg.Wait() + + log.Printf("D! [agent] Stopped Successfully") + + return nil +} + +// Once runs the full agent for a single gather. +func (a *Agent) Once(ctx context.Context, wait time.Duration) error { + err := a.once(ctx, wait) + if err != nil { + return err + } + + if models.GlobalGatherErrors.Get() != 0 { + return fmt.Errorf("input plugins recorded %d errors", models.GlobalGatherErrors.Get()) + } + + unsent := 0 + for _, output := range a.Config.Outputs { + unsent += output.BufferLength() + } + if unsent != 0 { + return fmt.Errorf("output plugins unable to send %d metrics", unsent) + } + return nil +} + +// On runs the agent and performs a single gather sending output to the +// outputF. After gathering pauses for the wait duration to allow service +// inputs to run. +func (a *Agent) once(ctx context.Context, wait time.Duration) error { + log.Printf("D! [agent] Initializing plugins") + err := a.initPlugins() + if err != nil { + return err + } + + startTime := time.Now() + + log.Printf("D! [agent] Connecting outputs") + next, ou, err := a.startOutputs(ctx, a.Config.Outputs) + if err != nil { + return err + } + + var apu []*processorUnit + var au *aggregatorUnit + if len(a.Config.Aggregators) != 0 { + procC := next + if len(a.Config.AggProcessors) != 0 { + procC, apu, err = a.startProcessors(next, a.Config.AggProcessors) + if err != nil { + return err + } + } + + next, au, err = a.startAggregators(procC, next, a.Config.Aggregators) + if err != nil { + return err + } + } + + var pu []*processorUnit + if len(a.Config.Processors) != 0 { + next, pu, err = a.startProcessors(next, a.Config.Processors) + if err != nil { + return err + } + } + + iu, err := a.testStartInputs(next, a.Config.Inputs) + if err != nil { + return err + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err = a.runOutputs(ou) + if err != nil { + log.Printf("E! [agent] Error running outputs: %v", err) + } + }() + + if au != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(apu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + err = a.runAggregators(startTime, au) + if err != nil { + log.Printf("E! [agent] Error running aggregators: %v", err) + } + }() + } + + if pu != nil { + wg.Add(1) + go func() { + defer wg.Done() + err = a.runProcessors(pu) + if err != nil { + log.Printf("E! [agent] Error running processors: %v", err) + } + }() + } + + wg.Add(1) + go func() { + defer wg.Done() + err = a.testRunInputs(ctx, wait, iu) + if err != nil { + log.Printf("E! [agent] Error running inputs: %v", err) + } + }() + + wg.Wait() + + log.Printf("D! [agent] Stopped Successfully") + + return nil +} + +// Returns the rounding precision for metrics. +func (a *Agent) Precision() time.Duration { + precision := a.Config.Agent.Precision.Duration + interval := a.Config.Agent.Interval.Duration + + if precision > 0 { + return precision + } + + switch { + case interval >= time.Second: + return time.Second + case interval >= time.Millisecond: + return time.Millisecond + case interval >= time.Microsecond: + return time.Microsecond + default: + return time.Nanosecond + } +} + +// panicRecover displays an error if an input panics. +func panicRecover(input *models.RunningInput) { + if err := recover(); err != nil { + trace := make([]byte, 2048) + runtime.Stack(trace, true) + log.Printf("E! FATAL: [%s] panicked: %s, Stack:\n%s", + input.LogName(), err, trace) + log.Println("E! PLEASE REPORT THIS PANIC ON GITHUB with " + + "stack trace, configuration, and OS information: " + + "https://github.com/influxdata/telegraf/issues/new/choose") + } +} diff --git a/agent/agent_posix.go b/agent/agent_posix.go new file mode 100644 index 000000000..09552cac0 --- /dev/null +++ b/agent/agent_posix.go @@ -0,0 +1,19 @@ +// +build !windows + +package agent + +import ( + "os" + "os/signal" + "syscall" +) + +const flushSignal = syscall.SIGUSR1 + +func watchForFlushSignal(flushRequested chan os.Signal) { + signal.Notify(flushRequested, flushSignal) +} + +func stopListeningForFlushSignal(flushRequested chan os.Signal) { + defer signal.Stop(flushRequested) +} diff --git a/agent/agent_test.go b/agent/agent_test.go index a5920ce1c..9cc631b17 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -2,15 +2,13 @@ package agent import ( "testing" + "time" - "github.com/influxdata/telegraf/internal/config" - - // needing to load the plugins + "github.com/influxdata/telegraf/config" _ "github.com/influxdata/telegraf/plugins/inputs/all" - // needing to load the outputs _ "github.com/influxdata/telegraf/plugins/outputs/all" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAgent_OmitHostname(t *testing.T) { @@ -24,35 +22,35 @@ func TestAgent_OmitHostname(t *testing.T) { func TestAgent_LoadPlugin(t *testing.T) { c := config.NewConfig() c.InputFilters = []string{"mysql"} - err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err := c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ := NewAgent(c) assert.Equal(t, 1, len(a.Config.Inputs)) c = config.NewConfig() c.InputFilters = []string{"foo"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 0, len(a.Config.Inputs)) c = config.NewConfig() c.InputFilters = []string{"mysql", "foo"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 1, len(a.Config.Inputs)) c = config.NewConfig() c.InputFilters = []string{"mysql", "redis"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 2, len(a.Config.Inputs)) c = config.NewConfig() c.InputFilters = []string{"mysql", "foo", "redis", "bar"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 2, len(a.Config.Inputs)) @@ -61,42 +59,42 @@ func TestAgent_LoadPlugin(t *testing.T) { func TestAgent_LoadOutput(t *testing.T) { c := config.NewConfig() c.OutputFilters = []string{"influxdb"} - err := c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err := c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ := NewAgent(c) assert.Equal(t, 2, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"kafka"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 1, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"foo"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 0, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"influxdb", "foo"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 2, len(a.Config.Outputs)) c = config.NewConfig() c.OutputFilters = []string{"influxdb", "kafka"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) assert.Equal(t, 3, len(c.Outputs)) a, _ = NewAgent(c) @@ -104,8 +102,67 @@ func TestAgent_LoadOutput(t *testing.T) { c = config.NewConfig() c.OutputFilters = []string{"influxdb", "foo", "kafka", "bar"} - err = c.LoadConfig("../internal/config/testdata/telegraf-agent.toml") + err = c.LoadConfig("../config/testdata/telegraf-agent.toml") assert.NoError(t, err) a, _ = NewAgent(c) assert.Equal(t, 3, len(a.Config.Outputs)) } + +func TestWindow(t *testing.T) { + parse := func(s string) time.Time { + tm, err := time.Parse(time.RFC3339, s) + if err != nil { + panic(err) + } + return tm + } + + tests := []struct { + name string + start time.Time + roundInterval bool + period time.Duration + since time.Time + until time.Time + }{ + { + name: "round with exact alignment", + start: parse("2018-03-27T00:00:00Z"), + roundInterval: true, + period: 30 * time.Second, + since: parse("2018-03-27T00:00:00Z"), + until: parse("2018-03-27T00:00:30Z"), + }, + { + name: "round with alignment needed", + start: parse("2018-03-27T00:00:05Z"), + roundInterval: true, + period: 30 * time.Second, + since: parse("2018-03-27T00:00:00Z"), + until: parse("2018-03-27T00:00:30Z"), + }, + { + name: "no round with exact alignment", + start: parse("2018-03-27T00:00:00Z"), + roundInterval: false, + period: 30 * time.Second, + since: parse("2018-03-27T00:00:00Z"), + until: parse("2018-03-27T00:00:30Z"), + }, + { + name: "no found with alignment needed", + start: parse("2018-03-27T00:00:05Z"), + roundInterval: false, + period: 30 * time.Second, + since: parse("2018-03-27T00:00:05Z"), + until: parse("2018-03-27T00:00:35Z"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + since, until := updateWindow(tt.start, tt.roundInterval, tt.period) + require.Equal(t, tt.since, since, "since") + require.Equal(t, tt.until, until, "until") + }) + } +} diff --git a/agent/agent_windows.go b/agent/agent_windows.go new file mode 100644 index 000000000..94ed9d006 --- /dev/null +++ b/agent/agent_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package agent + +import "os" + +func watchForFlushSignal(flushRequested chan os.Signal) { + // not supported +} + +func stopListeningForFlushSignal(flushRequested chan os.Signal) { + // not supported +} diff --git a/agent/tick.go b/agent/tick.go new file mode 100644 index 000000000..93e3a3d76 --- /dev/null +++ b/agent/tick.go @@ -0,0 +1,268 @@ +package agent + +import ( + "context" + "sync" + "time" + + "github.com/benbjohnson/clock" + "github.com/influxdata/telegraf/internal" +) + +type empty struct{} + +type Ticker interface { + Elapsed() <-chan time.Time + Stop() +} + +// AlignedTicker delivers ticks at aligned times plus an optional jitter. Each +// tick is realigned to avoid drift and handle changes to the system clock. +// +// The ticks may have an jitter duration applied to them as an random offset to +// the interval. However the overall pace of is that of the interval, so on +// average you will have one collection each interval. +// +// The first tick is emitted at the next alignment. +// +// Ticks are dropped for slow consumers. +// +// The implementation currently does not recalculate until the next tick with +// no maximum sleep, when using large intervals alignment is not corrected +// until the next tick. +type AlignedTicker struct { + interval time.Duration + jitter time.Duration + ch chan time.Time + cancel context.CancelFunc + wg sync.WaitGroup +} + +func NewAlignedTicker(now time.Time, interval, jitter time.Duration) *AlignedTicker { + return newAlignedTicker(now, interval, jitter, clock.New()) +} + +func newAlignedTicker(now time.Time, interval, jitter time.Duration, clock clock.Clock) *AlignedTicker { + ctx, cancel := context.WithCancel(context.Background()) + t := &AlignedTicker{ + interval: interval, + jitter: jitter, + ch: make(chan time.Time, 1), + cancel: cancel, + } + + d := t.next(now) + timer := clock.Timer(d) + + t.wg.Add(1) + go func() { + defer t.wg.Done() + t.run(ctx, timer) + }() + + return t +} + +func (t *AlignedTicker) next(now time.Time) time.Duration { + next := internal.AlignTime(now, t.interval) + d := next.Sub(now) + if d == 0 { + d = t.interval + } + d += internal.RandomDuration(t.jitter) + return d +} + +func (t *AlignedTicker) run(ctx context.Context, timer *clock.Timer) { + for { + select { + case <-ctx.Done(): + timer.Stop() + return + case now := <-timer.C: + select { + case t.ch <- now: + default: + } + + d := t.next(now) + timer.Reset(d) + } + } +} + +func (t *AlignedTicker) Elapsed() <-chan time.Time { + return t.ch +} + +func (t *AlignedTicker) Stop() { + t.cancel() + t.wg.Wait() +} + +// UnalignedTicker delivers ticks at regular but unaligned intervals. No +// effort is made to avoid drift. +// +// The ticks may have an jitter duration applied to them as an random offset to +// the interval. However the overall pace of is that of the interval, so on +// average you will have one collection each interval. +// +// The first tick is emitted immediately. +// +// Ticks are dropped for slow consumers. +type UnalignedTicker struct { + interval time.Duration + jitter time.Duration + ch chan time.Time + cancel context.CancelFunc + wg sync.WaitGroup +} + +func NewUnalignedTicker(interval, jitter time.Duration) *UnalignedTicker { + return newUnalignedTicker(interval, jitter, clock.New()) +} + +func newUnalignedTicker(interval, jitter time.Duration, clock clock.Clock) *UnalignedTicker { + ctx, cancel := context.WithCancel(context.Background()) + t := &UnalignedTicker{ + interval: interval, + jitter: jitter, + ch: make(chan time.Time, 1), + cancel: cancel, + } + + ticker := clock.Ticker(t.interval) + t.ch <- clock.Now() + + t.wg.Add(1) + go func() { + defer t.wg.Done() + t.run(ctx, ticker, clock) + }() + + return t +} + +func sleep(ctx context.Context, duration time.Duration, clock clock.Clock) error { + if duration == 0 { + return nil + } + + t := clock.Timer(duration) + select { + case <-t.C: + return nil + case <-ctx.Done(): + t.Stop() + return ctx.Err() + } +} + +func (t *UnalignedTicker) run(ctx context.Context, ticker *clock.Ticker, clock clock.Clock) { + for { + select { + case <-ctx.Done(): + ticker.Stop() + return + case <-ticker.C: + jitter := internal.RandomDuration(t.jitter) + err := sleep(ctx, jitter, clock) + if err != nil { + ticker.Stop() + return + } + select { + case t.ch <- clock.Now(): + default: + } + } + } +} + +func (t *UnalignedTicker) InjectTick() { + t.ch <- time.Now() +} + +func (t *UnalignedTicker) Elapsed() <-chan time.Time { + return t.ch +} + +func (t *UnalignedTicker) Stop() { + t.cancel() + t.wg.Wait() +} + +// RollingTicker delivers ticks at regular but unaligned intervals. +// +// Because the next interval is scheduled based on the interval + jitter, you +// are guaranteed at least interval seconds without missing a tick and ticks +// will be evenly scheduled over time. +// +// On average you will have one collection each interval + (jitter/2). +// +// The first tick is emitted after interval+jitter seconds. +// +// Ticks are dropped for slow consumers. +type RollingTicker struct { + interval time.Duration + jitter time.Duration + ch chan time.Time + cancel context.CancelFunc + wg sync.WaitGroup +} + +func NewRollingTicker(interval, jitter time.Duration) *RollingTicker { + return newRollingTicker(interval, jitter, clock.New()) +} + +func newRollingTicker(interval, jitter time.Duration, clock clock.Clock) *RollingTicker { + ctx, cancel := context.WithCancel(context.Background()) + t := &RollingTicker{ + interval: interval, + jitter: jitter, + ch: make(chan time.Time, 1), + cancel: cancel, + } + + d := t.next() + timer := clock.Timer(d) + + t.wg.Add(1) + go func() { + defer t.wg.Done() + t.run(ctx, timer) + }() + + return t +} + +func (t *RollingTicker) next() time.Duration { + return t.interval + internal.RandomDuration(t.jitter) +} + +func (t *RollingTicker) run(ctx context.Context, timer *clock.Timer) { + for { + select { + case <-ctx.Done(): + timer.Stop() + return + case now := <-timer.C: + select { + case t.ch <- now: + default: + } + + d := t.next() + timer.Reset(d) + } + } +} + +func (t *RollingTicker) Elapsed() <-chan time.Time { + return t.ch +} + +func (t *RollingTicker) Stop() { + t.cancel() + t.wg.Wait() +} diff --git a/agent/tick_test.go b/agent/tick_test.go new file mode 100644 index 000000000..6e9755ceb --- /dev/null +++ b/agent/tick_test.go @@ -0,0 +1,251 @@ +package agent + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/stretchr/testify/require" +) + +var format = "2006-01-02T15:04:05.999Z07:00" + +func TestAlignedTicker(t *testing.T) { + interval := 10 * time.Second + jitter := 0 * time.Second + + clock := clock.NewMock() + since := clock.Now() + until := since.Add(60 * time.Second) + + ticker := newAlignedTicker(since, interval, jitter, clock) + + expected := []time.Time{ + time.Unix(10, 0).UTC(), + time.Unix(20, 0).UTC(), + time.Unix(30, 0).UTC(), + time.Unix(40, 0).UTC(), + time.Unix(50, 0).UTC(), + time.Unix(60, 0).UTC(), + } + + actual := []time.Time{} + for !clock.Now().After(until) { + select { + case tm := <-ticker.Elapsed(): + actual = append(actual, tm.UTC()) + default: + } + clock.Add(10 * time.Second) + } + + require.Equal(t, expected, actual) +} + +func TestAlignedTickerJitter(t *testing.T) { + interval := 10 * time.Second + jitter := 5 * time.Second + + clock := clock.NewMock() + since := clock.Now() + until := since.Add(60 * time.Second) + + ticker := newAlignedTicker(since, interval, jitter, clock) + + last := since + for !clock.Now().After(until) { + select { + case tm := <-ticker.Elapsed(): + require.True(t, tm.Sub(last) <= 15*time.Second) + require.True(t, tm.Sub(last) >= 5*time.Second) + last = last.Add(interval) + default: + } + clock.Add(5 * time.Second) + } +} + +func TestAlignedTickerMissedTick(t *testing.T) { + interval := 10 * time.Second + jitter := 0 * time.Second + + clock := clock.NewMock() + since := clock.Now() + + ticker := newAlignedTicker(since, interval, jitter, clock) + + clock.Add(25 * time.Second) + tm := <-ticker.Elapsed() + require.Equal(t, time.Unix(10, 0).UTC(), tm.UTC()) + clock.Add(5 * time.Second) + tm = <-ticker.Elapsed() + require.Equal(t, time.Unix(30, 0).UTC(), tm.UTC()) +} + +func TestUnalignedTicker(t *testing.T) { + interval := 10 * time.Second + jitter := 0 * time.Second + + clock := clock.NewMock() + clock.Add(1 * time.Second) + since := clock.Now() + until := since.Add(60 * time.Second) + + ticker := newUnalignedTicker(interval, jitter, clock) + + expected := []time.Time{ + time.Unix(1, 0).UTC(), + time.Unix(11, 0).UTC(), + time.Unix(21, 0).UTC(), + time.Unix(31, 0).UTC(), + time.Unix(41, 0).UTC(), + time.Unix(51, 0).UTC(), + time.Unix(61, 0).UTC(), + } + + actual := []time.Time{} + for !clock.Now().After(until) { + select { + case tm := <-ticker.Elapsed(): + actual = append(actual, tm.UTC()) + default: + } + clock.Add(10 * time.Second) + } + + require.Equal(t, expected, actual) +} + +func TestRollingTicker(t *testing.T) { + interval := 10 * time.Second + jitter := 0 * time.Second + + clock := clock.NewMock() + clock.Add(1 * time.Second) + since := clock.Now() + until := since.Add(60 * time.Second) + + ticker := newUnalignedTicker(interval, jitter, clock) + + expected := []time.Time{ + time.Unix(1, 0).UTC(), + time.Unix(11, 0).UTC(), + time.Unix(21, 0).UTC(), + time.Unix(31, 0).UTC(), + time.Unix(41, 0).UTC(), + time.Unix(51, 0).UTC(), + time.Unix(61, 0).UTC(), + } + + actual := []time.Time{} + for !clock.Now().After(until) { + select { + case tm := <-ticker.Elapsed(): + actual = append(actual, tm.UTC()) + default: + } + clock.Add(10 * time.Second) + } + + require.Equal(t, expected, actual) +} + +// Simulates running the Ticker for an hour and displays stats about the +// operation. +func TestAlignedTickerDistribution(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + interval := 10 * time.Second + jitter := 5 * time.Second + + clock := clock.NewMock() + since := clock.Now() + + ticker := newAlignedTicker(since, interval, jitter, clock) + dist := simulatedDist(ticker, clock) + printDist(dist) + require.True(t, 350 < dist.Count) + require.True(t, 9 < dist.Mean() && dist.Mean() < 11) +} + +// Simulates running the Ticker for an hour and displays stats about the +// operation. +func TestUnalignedTickerDistribution(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + interval := 10 * time.Second + jitter := 5 * time.Second + + clock := clock.NewMock() + + ticker := newUnalignedTicker(interval, jitter, clock) + dist := simulatedDist(ticker, clock) + printDist(dist) + require.True(t, 350 < dist.Count) + require.True(t, 9 < dist.Mean() && dist.Mean() < 11) +} + +// Simulates running the Ticker for an hour and displays stats about the +// operation. +func TestRollingTickerDistribution(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + interval := 10 * time.Second + jitter := 5 * time.Second + + clock := clock.NewMock() + + ticker := newRollingTicker(interval, jitter, clock) + dist := simulatedDist(ticker, clock) + printDist(dist) + require.True(t, 275 < dist.Count) + require.True(t, 12 < dist.Mean() && 13 > dist.Mean()) +} + +type Distribution struct { + Buckets [60]int + Count int + Waittime float64 +} + +func (d *Distribution) Mean() float64 { + return d.Waittime / float64(d.Count) +} + +func printDist(dist Distribution) { + for i, count := range dist.Buckets { + fmt.Printf("%2d %s\n", i, strings.Repeat("x", count)) + } + fmt.Printf("Average interval: %f\n", dist.Mean()) + fmt.Printf("Count: %d\n", dist.Count) +} + +func simulatedDist(ticker Ticker, clock *clock.Mock) Distribution { + since := clock.Now() + until := since.Add(1 * time.Hour) + + var dist Distribution + + last := clock.Now() + for !clock.Now().After(until) { + select { + case tm := <-ticker.Elapsed(): + dist.Buckets[tm.Second()] += 1 + dist.Count++ + dist.Waittime += tm.Sub(last).Seconds() + last = tm + default: + clock.Add(1 * time.Second) + } + } + + return dist +} diff --git a/aggregator.go b/aggregator.go index 48aa8e4bf..f168b04d0 100644 --- a/aggregator.go +++ b/aggregator.go @@ -5,11 +5,7 @@ package telegraf // Add, Push, and Reset can not be called concurrently, so locking is not // required when implementing an Aggregator plugin. type Aggregator interface { - // SampleConfig returns the default configuration of the Input. - SampleConfig() string - - // Description returns a one-sentence description on the Input. - Description() string + PluginDescriber // Add the metric to the aggregator. Add(in Metric) diff --git a/appveyor.yml b/appveyor.yml index 76a5ab067..b454c8dc8 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,34 +1,35 @@ version: "{build}" +image: Visual Studio 2019 + cache: - - C:\Cache + - C:\gopath\pkg\mod -> go.sum + - C:\ProgramData\chocolatey\bin -> appveyor.yml + - C:\ProgramData\chocolatey\lib -> appveyor.yml clone_folder: C:\gopath\src\github.com\influxdata\telegraf environment: GOPATH: C:\gopath +stack: go 1.14 + platform: x64 install: - - IF NOT EXIST "C:\Cache" mkdir C:\Cache - - IF NOT EXIST "C:\Cache\go1.10.1.msi" curl -o "C:\Cache\go1.10.1.msi" https://storage.googleapis.com/golang/go1.10.1.windows-amd64.msi - - IF NOT EXIST "C:\Cache\gnuwin32-bin.zip" curl -o "C:\Cache\gnuwin32-bin.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-bin.zip - - IF NOT EXIST "C:\Cache\gnuwin32-dep.zip" curl -o "C:\Cache\gnuwin32-dep.zip" https://dl.influxdata.com/telegraf/ci/make-3.81-dep.zip - - IF EXIST "C:\Go" rmdir /S /Q C:\Go - - msiexec.exe /i "C:\Cache\go1.10.1.msi" /quiet - - 7z x "C:\Cache\gnuwin32-bin.zip" -oC:\GnuWin32 -y - - 7z x "C:\Cache\gnuwin32-dep.zip" -oC:\GnuWin32 -y + - choco install make + - cd "%GOPATH%\src\github.com\influxdata\telegraf" + - git config --system core.longpaths true - go version - go env - - git config --system core.longpaths true build_script: - - cmd: C:\GnuWin32\bin\make deps - - cmd: C:\GnuWin32\bin\make telegraf + - make deps + - make telegraf test_script: - - cmd: C:\GnuWin32\bin\make test-windows + - make check + - make test-windows artifacts: - path: telegraf.exe diff --git a/cmd/telegraf/telegraf.go b/cmd/telegraf/telegraf.go index 57ff846cf..7e0b4ec1c 100644 --- a/cmd/telegraf/telegraf.go +++ b/cmd/telegraf/telegraf.go @@ -1,6 +1,8 @@ package main import ( + "context" + "errors" "flag" "fmt" "log" @@ -8,13 +10,15 @@ import ( _ "net/http/pprof" // Comment this line to disable pprof endpoint. "os" "os/signal" - "runtime" + "sort" "strings" "syscall" + "time" "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/config" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/config" + "github.com/influxdata/telegraf/internal/goplugin" "github.com/influxdata/telegraf/logger" _ "github.com/influxdata/telegraf/plugins/aggregators/all" "github.com/influxdata/telegraf/plugins/inputs" @@ -22,23 +26,26 @@ import ( "github.com/influxdata/telegraf/plugins/outputs" _ "github.com/influxdata/telegraf/plugins/outputs/all" _ "github.com/influxdata/telegraf/plugins/processors/all" - "github.com/kardianos/service" ) +// If you update these, update usage.go and usage_windows.go var fDebug = flag.Bool("debug", false, "turn on debug logging") var pprofAddr = flag.String("pprof-addr", "", "pprof address to listen on, not activate pprof if empty") var fQuiet = flag.Bool("quiet", false, "run in quiet mode") -var fTest = flag.Bool("test", false, "gather metrics, print them out, and exit") +var fTest = flag.Bool("test", false, "enable test mode: gather metrics, print them out, and exit. Note: Test mode only runs inputs, not processors, aggregators, or outputs") +var fTestWait = flag.Int("test-wait", 0, "wait up to this many seconds for service inputs to complete in test mode") var fConfig = flag.String("config", "", "configuration file to load") var fConfigDirectory = flag.String("config-directory", "", "directory containing additional *.conf files") -var fVersion = flag.Bool("version", false, "display the version") +var fVersion = flag.Bool("version", false, "display the version and exit") var fSampleConfig = flag.Bool("sample-config", false, "print out full sample configuration") var fPidfile = flag.String("pidfile", "", "file to write our pid to") +var fSectionFilters = flag.String("section-filter", "", + "filter the sections to print, separator is ':'. Valid values are 'agent', 'global_tags', 'outputs', 'processors', 'aggregators' and 'inputs'") var fInputFilters = flag.String("input-filter", "", "filter the inputs to enable, separator is :") var fInputList = flag.Bool("input-list", false, @@ -54,30 +61,23 @@ var fProcessorFilters = flag.String("processor-filter", "", var fUsage = flag.String("usage", "", "print usage for a plugin, ie, 'telegraf --usage mysql'") var fService = flag.String("service", "", - "operate on the service") + "operate on the service (windows only)") +var fServiceName = flag.String("service-name", "telegraf", "service name (windows only)") +var fServiceDisplayName = flag.String("service-display-name", "Telegraf Data Collector Service", "service display name (windows only)") var fRunAsConsole = flag.Bool("console", false, "run as console application (windows only)") +var fPlugins = flag.String("plugin-directory", "", + "path to directory containing external plugins") +var fRunOnce = flag.Bool("once", false, "run one gather and exit") var ( - nextVersion = "1.8.0" - version string - commit string - branch string + version string + commit string + branch string ) -func init() { - // If commit or branch are not set, make that clear. - if commit == "" { - commit = "unknown" - } - if branch == "" { - branch = "unknown" - } -} - var stop chan struct{} func reloadLoop( - stop chan struct{}, inputFilters []string, outputFilters []string, aggregatorFilters []string, @@ -88,148 +88,151 @@ func reloadLoop( for <-reload { reload <- false - // If no other options are specified, load the config file and run. - c := config.NewConfig() - c.OutputFilters = outputFilters - c.InputFilters = inputFilters - err := c.LoadConfig(*fConfig) - if err != nil { - log.Fatal("E! " + err.Error()) - } + ctx, cancel := context.WithCancel(context.Background()) - if *fConfigDirectory != "" { - err = c.LoadDirectory(*fConfigDirectory) - if err != nil { - log.Fatal("E! " + err.Error()) - } - } - if !*fTest && len(c.Outputs) == 0 { - log.Fatalf("E! Error: no outputs found, did you provide a valid config file?") - } - if len(c.Inputs) == 0 { - log.Fatalf("E! Error: no inputs found, did you provide a valid config file?") - } - - if int64(c.Agent.Interval.Duration) <= 0 { - log.Fatalf("E! Agent interval must be positive, found %s", - c.Agent.Interval.Duration) - } - - if int64(c.Agent.FlushInterval.Duration) <= 0 { - log.Fatalf("E! Agent flush_interval must be positive; found %s", - c.Agent.Interval.Duration) - } - - ag, err := agent.NewAgent(c) - if err != nil { - log.Fatal("E! " + err.Error()) - } - - // Setup logging - logger.SetupLogging( - ag.Config.Agent.Debug || *fDebug, - ag.Config.Agent.Quiet || *fQuiet, - ag.Config.Agent.Logfile, - ) - - if *fTest { - err = ag.Test() - if err != nil { - log.Fatal("E! " + err.Error()) - } - os.Exit(0) - } - - err = ag.Connect() - if err != nil { - log.Fatal("E! " + err.Error()) - } - - shutdown := make(chan struct{}) - signals := make(chan os.Signal) - signal.Notify(signals, os.Interrupt, syscall.SIGHUP, syscall.SIGTERM) + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt, syscall.SIGHUP, + syscall.SIGTERM, syscall.SIGINT) go func() { select { case sig := <-signals: - if sig == os.Interrupt || sig == syscall.SIGTERM { - close(shutdown) - } if sig == syscall.SIGHUP { - log.Printf("I! Reloading Telegraf config\n") + log.Printf("I! Reloading Telegraf config") <-reload reload <- true - close(shutdown) } + cancel() case <-stop: - close(shutdown) + cancel() } }() - log.Printf("I! Starting Telegraf %s\n", displayVersion()) - log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) - log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) - log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) - log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) - log.Printf("I! Tags enabled: %s", c.ListTags()) - - if *fPidfile != "" { - f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - log.Printf("E! Unable to create pidfile: %s", err) - } else { - fmt.Fprintf(f, "%d\n", os.Getpid()) - - f.Close() - - defer func() { - err := os.Remove(*fPidfile) - if err != nil { - log.Printf("E! Unable to remove pidfile: %s", err) - } - }() - } + err := runAgent(ctx, inputFilters, outputFilters) + if err != nil && err != context.Canceled { + log.Fatalf("E! [telegraf] Error running agent: %v", err) } - - ag.Run(shutdown) } } +func runAgent(ctx context.Context, + inputFilters []string, + outputFilters []string, +) error { + log.Printf("I! Starting Telegraf %s", version) + + // If no other options are specified, load the config file and run. + c := config.NewConfig() + c.OutputFilters = outputFilters + c.InputFilters = inputFilters + err := c.LoadConfig(*fConfig) + if err != nil { + return err + } + + if *fConfigDirectory != "" { + err = c.LoadDirectory(*fConfigDirectory) + if err != nil { + return err + } + } + if !*fTest && len(c.Outputs) == 0 { + return errors.New("Error: no outputs found, did you provide a valid config file?") + } + if *fPlugins == "" && len(c.Inputs) == 0 { + return errors.New("Error: no inputs found, did you provide a valid config file?") + } + + if int64(c.Agent.Interval.Duration) <= 0 { + return fmt.Errorf("Agent interval must be positive, found %s", + c.Agent.Interval.Duration) + } + + if int64(c.Agent.FlushInterval.Duration) <= 0 { + return fmt.Errorf("Agent flush_interval must be positive; found %s", + c.Agent.Interval.Duration) + } + + ag, err := agent.NewAgent(c) + if err != nil { + return err + } + + // Setup logging as configured. + logConfig := logger.LogConfig{ + Debug: ag.Config.Agent.Debug || *fDebug, + Quiet: ag.Config.Agent.Quiet || *fQuiet, + LogTarget: ag.Config.Agent.LogTarget, + Logfile: ag.Config.Agent.Logfile, + RotationInterval: ag.Config.Agent.LogfileRotationInterval, + RotationMaxSize: ag.Config.Agent.LogfileRotationMaxSize, + RotationMaxArchives: ag.Config.Agent.LogfileRotationMaxArchives, + } + + logger.SetupLogging(logConfig) + + if *fRunOnce { + wait := time.Duration(*fTestWait) * time.Second + return ag.Once(ctx, wait) + } + + if *fTest || *fTestWait != 0 { + wait := time.Duration(*fTestWait) * time.Second + return ag.Test(ctx, wait) + } + + log.Printf("I! Loaded inputs: %s", strings.Join(c.InputNames(), " ")) + log.Printf("I! Loaded aggregators: %s", strings.Join(c.AggregatorNames(), " ")) + log.Printf("I! Loaded processors: %s", strings.Join(c.ProcessorNames(), " ")) + log.Printf("I! Loaded outputs: %s", strings.Join(c.OutputNames(), " ")) + log.Printf("I! Tags enabled: %s", c.ListTags()) + + if *fPidfile != "" { + f, err := os.OpenFile(*fPidfile, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Printf("E! Unable to create pidfile: %s", err) + } else { + fmt.Fprintf(f, "%d\n", os.Getpid()) + + f.Close() + + defer func() { + err := os.Remove(*fPidfile) + if err != nil { + log.Printf("E! Unable to remove pidfile: %s", err) + } + }() + } + } + + return ag.Run(ctx) +} + func usageExit(rc int) { fmt.Println(internal.Usage) os.Exit(rc) } -type program struct { - inputFilters []string - outputFilters []string - aggregatorFilters []string - processorFilters []string -} +func formatFullVersion() string { + var parts = []string{"Telegraf"} -func (p *program) Start(s service.Service) error { - go p.run() - return nil -} -func (p *program) run() { - stop = make(chan struct{}) - reloadLoop( - stop, - p.inputFilters, - p.outputFilters, - p.aggregatorFilters, - p.processorFilters, - ) -} -func (p *program) Stop(s service.Service) error { - close(stop) - return nil -} - -func displayVersion() string { - if version == "" { - return fmt.Sprintf("v%s~%s", nextVersion, commit) + if version != "" { + parts = append(parts, version) + } else { + parts = append(parts, "unknown") } - return "v" + version + + if branch != "" || commit != "" { + if branch == "" { + branch = "unknown" + } + if commit == "" { + commit = "unknown" + } + git := fmt.Sprintf("(git: %s %s)", branch, commit) + parts = append(parts, git) + } + + return strings.Join(parts, " ") } func main() { @@ -237,7 +240,10 @@ func main() { flag.Parse() args := flag.Args() - inputFilters, outputFilters := []string{}, []string{} + sectionFilters, inputFilters, outputFilters := []string{}, []string{}, []string{} + if *fSectionFilters != "" { + sectionFilters = strings.Split(":"+strings.TrimSpace(*fSectionFilters)+":", ":") + } if *fInputFilters != "" { inputFilters = strings.Split(":"+strings.TrimSpace(*fInputFilters)+":", ":") } @@ -253,6 +259,16 @@ func main() { processorFilters = strings.Split(":"+strings.TrimSpace(*fProcessorFilters)+":", ":") } + logger.SetupLogging(logger.LogConfig{}) + + // Load external plugins, if requested. + if *fPlugins != "" { + log.Printf("I! Loading external plugins from: %s", *fPlugins) + if err := goplugin.LoadExternalPlugins(*fPlugins); err != nil { + log.Fatal("E! " + err.Error()) + } + } + if *pprofAddr != "" { go func() { pprofHostPort := *pprofAddr @@ -273,10 +289,11 @@ func main() { if len(args) > 0 { switch args[0] { case "version": - fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit) + fmt.Println(formatFullVersion()) return case "config": config.PrintSampleConfig( + sectionFilters, inputFilters, outputFilters, aggregatorFilters, @@ -289,22 +306,33 @@ func main() { // switch for flags which just do something and exit immediately switch { case *fOutputList: - fmt.Println("Available Output Plugins:") - for k, _ := range outputs.Outputs { + fmt.Println("Available Output Plugins: ") + names := make([]string, 0, len(outputs.Outputs)) + for k := range outputs.Outputs { + names = append(names, k) + } + sort.Strings(names) + for _, k := range names { fmt.Printf(" %s\n", k) } return case *fInputList: fmt.Println("Available Input Plugins:") - for k, _ := range inputs.Inputs { + names := make([]string, 0, len(inputs.Inputs)) + for k := range inputs.Inputs { + names = append(names, k) + } + sort.Strings(names) + for _, k := range names { fmt.Printf(" %s\n", k) } return case *fVersion: - fmt.Printf("Telegraf %s (git: %s %s)\n", displayVersion(), branch, commit) + fmt.Println(formatFullVersion()) return case *fSampleConfig: config.PrintSampleConfig( + sectionFilters, inputFilters, outputFilters, aggregatorFilters, @@ -320,53 +348,20 @@ func main() { return } - if runtime.GOOS == "windows" && !(*fRunAsConsole) { - svcConfig := &service.Config{ - Name: "telegraf", - DisplayName: "Telegraf Data Collector Service", - Description: "Collects data using a series of plugins and publishes it to" + - "another series of plugins.", - Arguments: []string{"--config", "C:\\Program Files\\Telegraf\\telegraf.conf"}, - } - - prg := &program{ - inputFilters: inputFilters, - outputFilters: outputFilters, - aggregatorFilters: aggregatorFilters, - processorFilters: processorFilters, - } - s, err := service.New(prg, svcConfig) - if err != nil { - log.Fatal("E! " + err.Error()) - } - // Handle the --service flag here to prevent any issues with tooling that - // may not have an interactive session, e.g. installing from Ansible. - if *fService != "" { - if *fConfig != "" { - (*svcConfig).Arguments = []string{"--config", *fConfig} - } - if *fConfigDirectory != "" { - (*svcConfig).Arguments = append((*svcConfig).Arguments, "--config-directory", *fConfigDirectory) - } - err := service.Control(s, *fService) - if err != nil { - log.Fatal("E! " + err.Error()) - } - os.Exit(0) - } else { - err = s.Run() - if err != nil { - log.Println("E! " + err.Error()) - } - } - } else { - stop = make(chan struct{}) - reloadLoop( - stop, - inputFilters, - outputFilters, - aggregatorFilters, - processorFilters, - ) + shortVersion := version + if shortVersion == "" { + shortVersion = "unknown" } + + // Configure version + if err := internal.SetVersion(shortVersion); err != nil { + log.Println("Telegraf version already configured to: " + internal.Version()) + } + + run( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) } diff --git a/cmd/telegraf/telegraf_posix.go b/cmd/telegraf/telegraf_posix.go new file mode 100644 index 000000000..ca28622f1 --- /dev/null +++ b/cmd/telegraf/telegraf_posix.go @@ -0,0 +1,13 @@ +// +build !windows + +package main + +func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { + stop = make(chan struct{}) + reloadLoop( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) +} diff --git a/cmd/telegraf/telegraf_windows.go b/cmd/telegraf/telegraf_windows.go new file mode 100644 index 000000000..830e6eaa4 --- /dev/null +++ b/cmd/telegraf/telegraf_windows.go @@ -0,0 +1,124 @@ +// +build windows + +package main + +import ( + "log" + "os" + "runtime" + + "github.com/influxdata/telegraf/logger" + "github.com/kardianos/service" +) + +func run(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { + if runtime.GOOS == "windows" && windowsRunAsService() { + runAsWindowsService( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) + } else { + stop = make(chan struct{}) + reloadLoop( + inputFilters, + outputFilters, + aggregatorFilters, + processorFilters, + ) + } +} + +type program struct { + inputFilters []string + outputFilters []string + aggregatorFilters []string + processorFilters []string +} + +func (p *program) Start(s service.Service) error { + go p.run() + return nil +} +func (p *program) run() { + stop = make(chan struct{}) + reloadLoop( + p.inputFilters, + p.outputFilters, + p.aggregatorFilters, + p.processorFilters, + ) +} +func (p *program) Stop(s service.Service) error { + close(stop) + return nil +} + +func runAsWindowsService(inputFilters, outputFilters, aggregatorFilters, processorFilters []string) { + programFiles := os.Getenv("ProgramFiles") + if programFiles == "" { // Should never happen + programFiles = "C:\\Program Files" + } + svcConfig := &service.Config{ + Name: *fServiceName, + DisplayName: *fServiceDisplayName, + Description: "Collects data using a series of plugins and publishes it to " + + "another series of plugins.", + Arguments: []string{"--config", programFiles + "\\Telegraf\\telegraf.conf"}, + } + + prg := &program{ + inputFilters: inputFilters, + outputFilters: outputFilters, + aggregatorFilters: aggregatorFilters, + processorFilters: processorFilters, + } + s, err := service.New(prg, svcConfig) + if err != nil { + log.Fatal("E! " + err.Error()) + } + // Handle the --service flag here to prevent any issues with tooling that + // may not have an interactive session, e.g. installing from Ansible. + if *fService != "" { + if *fConfig != "" { + svcConfig.Arguments = []string{"--config", *fConfig} + } + if *fConfigDirectory != "" { + svcConfig.Arguments = append(svcConfig.Arguments, "--config-directory", *fConfigDirectory) + } + //set servicename to service cmd line, to have a custom name after relaunch as a service + svcConfig.Arguments = append(svcConfig.Arguments, "--service-name", *fServiceName) + + err := service.Control(s, *fService) + if err != nil { + log.Fatal("E! " + err.Error()) + } + os.Exit(0) + } else { + winlogger, err := s.Logger(nil) + if err == nil { + //When in service mode, register eventlog target andd setup default logging to eventlog + logger.RegisterEventLogger(winlogger) + logger.SetupLogging(logger.LogConfig{LogTarget: logger.LogTargetEventlog}) + } + err = s.Run() + + if err != nil { + log.Println("E! " + err.Error()) + } + } +} + +// Return true if Telegraf should create a Windows service. +func windowsRunAsService() bool { + if *fService != "" { + return true + } + + if *fRunAsConsole { + return false + } + + return !service.Interactive() +} diff --git a/internal/config/aws/credentials.go b/config/aws/credentials.go similarity index 79% rename from internal/config/aws/credentials.go rename to config/aws/credentials.go index b1f57fceb..1e4f91b13 100644 --- a/internal/config/aws/credentials.go +++ b/config/aws/credentials.go @@ -9,13 +9,14 @@ import ( ) type CredentialConfig struct { - Region string - AccessKey string - SecretKey string - RoleARN string - Profile string - Filename string - Token string + Region string + AccessKey string + SecretKey string + RoleARN string + Profile string + Filename string + Token string + EndpointURL string } func (c *CredentialConfig) Credentials() client.ConfigProvider { @@ -28,7 +29,8 @@ func (c *CredentialConfig) Credentials() client.ConfigProvider { func (c *CredentialConfig) rootCredentials() client.ConfigProvider { config := &aws.Config{ - Region: aws.String(c.Region), + Region: aws.String(c.Region), + Endpoint: &c.EndpointURL, } if c.AccessKey != "" || c.SecretKey != "" { config.Credentials = credentials.NewStaticCredentials(c.AccessKey, c.SecretKey, c.Token) @@ -42,7 +44,8 @@ func (c *CredentialConfig) rootCredentials() client.ConfigProvider { func (c *CredentialConfig) assumeCredentials() client.ConfigProvider { rootCredentials := c.rootCredentials() config := &aws.Config{ - Region: aws.String(c.Region), + Region: aws.String(c.Region), + Endpoint: &c.EndpointURL, } config.Credentials = stscreds.NewCredentials(rootCredentials, c.RoleARN) return session.New(config) diff --git a/internal/config/config.go b/config/config.go similarity index 59% rename from internal/config/config.go rename to config/config.go index 8a31c271e..bca178cb0 100644 --- a/internal/config/config.go +++ b/config/config.go @@ -7,9 +7,10 @@ import ( "io/ioutil" "log" "math" + "net/http" + "net/url" "os" "path/filepath" - "regexp" "runtime" "sort" @@ -19,19 +20,22 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/internal/models" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/aggregators" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/influxdata/toml" "github.com/influxdata/toml/ast" ) var ( + // Default sections + sectionDefaults = []string{"global_tags", "agent", "outputs", + "processors", "aggregators", "inputs"} + // Default input plugins inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel", "processes", "disk", "diskio"} @@ -40,7 +44,7 @@ var ( outputDefaults = []string{"influxdb"} // envVarRe is a regex to find environment variables in the config file - envVarRe = regexp.MustCompile(`\$\w+`) + envVarRe = regexp.MustCompile(`\$\{(\w+)\}|\$(\w+)`) envVarEscaper = strings.NewReplacer( `"`, `\"`, @@ -61,22 +65,26 @@ type Config struct { Outputs []*models.RunningOutput Aggregators []*models.RunningAggregator // Processors have a slice wrapper type because they need to be sorted - Processors models.RunningProcessors + Processors models.RunningProcessors + AggProcessors models.RunningProcessors } func NewConfig() *Config { c := &Config{ // Agent defaults: Agent: &AgentConfig{ - Interval: internal.Duration{Duration: 10 * time.Second}, - RoundInterval: true, - FlushInterval: internal.Duration{Duration: 10 * time.Second}, + Interval: internal.Duration{Duration: 10 * time.Second}, + RoundInterval: true, + FlushInterval: internal.Duration{Duration: 10 * time.Second}, + LogTarget: "file", + LogfileRotationMaxArchives: 5, }, Tags: make(map[string]string), Inputs: make([]*models.RunningInput, 0), Outputs: make([]*models.RunningOutput, 0), Processors: make([]*models.RunningProcessor, 0), + AggProcessors: make([]*models.RunningProcessor, 0), InputFilters: make([]string, 0), OutputFilters: make([]string, 0), } @@ -136,13 +144,32 @@ type AgentConfig struct { UTC bool `toml:"utc"` // Debug is the option for running in debug mode - Debug bool - - // Logfile specifies the file to send logs to - Logfile string + Debug bool `toml:"debug"` // Quiet is the option for running in quiet mode - Quiet bool + Quiet bool `toml:"quiet"` + + // Log target controls the destination for logs and can be one of "file", + // "stderr" or, on Windows, "eventlog". When set to "file", the output file + // is determined by the "logfile" setting. + LogTarget string `toml:"logtarget"` + + // Name of the file to be logged to when using the "file" logtarget. If set to + // the empty string then logs are written to stderr. + Logfile string `toml:"logfile"` + + // The file will be rotated after the time interval specified. When set + // to 0 no time based rotation is performed. + LogfileRotationInterval internal.Duration `toml:"logfile_rotation_interval"` + + // The logfile will be rotated when it becomes larger than the specified + // size. When set to 0 no size based rotation is performed. + LogfileRotationMaxSize internal.Size `toml:"logfile_rotation_max_size"` + + // Maximum number of rotated archives to keep, any older logs are deleted. + // If set to -1, no archives are removed. + LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"` + Hostname string OmitHostname bool } @@ -151,7 +178,7 @@ type AgentConfig struct { func (c *Config) InputNames() []string { var name []string for _, input := range c.Inputs { - name = append(name, input.Name()) + name = append(name, input.Config.Name) } return name } @@ -160,7 +187,7 @@ func (c *Config) InputNames() []string { func (c *Config) AggregatorNames() []string { var name []string for _, aggregator := range c.Aggregators { - name = append(name, aggregator.Name()) + name = append(name, aggregator.Config.Name) } return name } @@ -169,7 +196,7 @@ func (c *Config) AggregatorNames() []string { func (c *Config) ProcessorNames() []string { var name []string for _, processor := range c.Processors { - name = append(name, processor.Name) + name = append(name, processor.Config.Name) } return name } @@ -178,7 +205,7 @@ func (c *Config) ProcessorNames() []string { func (c *Config) OutputNames() []string { var name []string for _, output := range c.Outputs { - name = append(name, output.Name) + name = append(name, output.Config.Name) } return name } @@ -208,11 +235,12 @@ var header = `# Telegraf Configuration # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. # -# Environment variables can be used anywhere in this config file, simply prepend -# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), -# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) - +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) +` +var globalTagsConfig = ` # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 @@ -220,7 +248,8 @@ var header = `# Telegraf Configuration ## Environment variables can be used as tags, and throughout the config file # user = "$USER" - +` +var agentConfig = ` # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs @@ -234,10 +263,9 @@ var header = `# Telegraf Configuration ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -246,8 +274,8 @@ var header = `# Telegraf Configuration ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. @@ -263,119 +291,169 @@ var header = `# Telegraf Configuration ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false +` +var outputHeader = ` ############################################################################### # OUTPUT PLUGINS # ############################################################################### + ` var processorHeader = ` - ############################################################################### # PROCESSOR PLUGINS # ############################################################################### + ` var aggregatorHeader = ` - ############################################################################### # AGGREGATOR PLUGINS # ############################################################################### + ` var inputHeader = ` - ############################################################################### # INPUT PLUGINS # ############################################################################### + ` var serviceInputHeader = ` - ############################################################################### # SERVICE INPUT PLUGINS # ############################################################################### + ` // PrintSampleConfig prints the sample config func PrintSampleConfig( + sectionFilters []string, inputFilters []string, outputFilters []string, aggregatorFilters []string, processorFilters []string, ) { + // print headers fmt.Printf(header) + if len(sectionFilters) == 0 { + sectionFilters = sectionDefaults + } + printFilteredGlobalSections(sectionFilters) + // print output plugins - if len(outputFilters) != 0 { - printFilteredOutputs(outputFilters, false) - } else { - printFilteredOutputs(outputDefaults, false) - // Print non-default outputs, commented - var pnames []string - for pname := range outputs.Outputs { - if !sliceContains(pname, outputDefaults) { - pnames = append(pnames, pname) + if sliceContains("outputs", sectionFilters) { + if len(outputFilters) != 0 { + if len(outputFilters) >= 3 && outputFilters[1] != "none" { + fmt.Printf(outputHeader) } + printFilteredOutputs(outputFilters, false) + } else { + fmt.Printf(outputHeader) + printFilteredOutputs(outputDefaults, false) + // Print non-default outputs, commented + var pnames []string + for pname := range outputs.Outputs { + if !sliceContains(pname, outputDefaults) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + printFilteredOutputs(pnames, true) } - sort.Strings(pnames) - printFilteredOutputs(pnames, true) } // print processor plugins - fmt.Printf(processorHeader) - if len(processorFilters) != 0 { - printFilteredProcessors(processorFilters, false) - } else { - pnames := []string{} - for pname := range processors.Processors { - pnames = append(pnames, pname) + if sliceContains("processors", sectionFilters) { + if len(processorFilters) != 0 { + if len(processorFilters) >= 3 && processorFilters[1] != "none" { + fmt.Printf(processorHeader) + } + printFilteredProcessors(processorFilters, false) + } else { + fmt.Printf(processorHeader) + pnames := []string{} + for pname := range processors.Processors { + pnames = append(pnames, pname) + } + sort.Strings(pnames) + printFilteredProcessors(pnames, true) } - sort.Strings(pnames) - printFilteredProcessors(pnames, true) } - // pring aggregator plugins - fmt.Printf(aggregatorHeader) - if len(aggregatorFilters) != 0 { - printFilteredAggregators(aggregatorFilters, false) - } else { - pnames := []string{} - for pname := range aggregators.Aggregators { - pnames = append(pnames, pname) + // print aggregator plugins + if sliceContains("aggregators", sectionFilters) { + if len(aggregatorFilters) != 0 { + if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" { + fmt.Printf(aggregatorHeader) + } + printFilteredAggregators(aggregatorFilters, false) + } else { + fmt.Printf(aggregatorHeader) + pnames := []string{} + for pname := range aggregators.Aggregators { + pnames = append(pnames, pname) + } + sort.Strings(pnames) + printFilteredAggregators(pnames, true) } - sort.Strings(pnames) - printFilteredAggregators(pnames, true) } // print input plugins - fmt.Printf(inputHeader) - if len(inputFilters) != 0 { - printFilteredInputs(inputFilters, false) - } else { - printFilteredInputs(inputDefaults, false) - // Print non-default inputs, commented - var pnames []string - for pname := range inputs.Inputs { - if !sliceContains(pname, inputDefaults) { - pnames = append(pnames, pname) + if sliceContains("inputs", sectionFilters) { + if len(inputFilters) != 0 { + if len(inputFilters) >= 3 && inputFilters[1] != "none" { + fmt.Printf(inputHeader) } + printFilteredInputs(inputFilters, false) + } else { + fmt.Printf(inputHeader) + printFilteredInputs(inputDefaults, false) + // Print non-default inputs, commented + var pnames []string + for pname := range inputs.Inputs { + if !sliceContains(pname, inputDefaults) { + pnames = append(pnames, pname) + } + } + sort.Strings(pnames) + printFilteredInputs(pnames, true) } - sort.Strings(pnames) - printFilteredInputs(pnames, true) } } @@ -450,6 +528,7 @@ func printFilteredInputs(inputFilters []string, commented bool) { return } sort.Strings(servInputNames) + fmt.Printf(serviceInputHeader) for _, name := range servInputNames { printConfig(name, servInputs[name], "inputs", commented) @@ -474,12 +553,17 @@ func printFilteredOutputs(outputFilters []string, commented bool) { } } -type printer interface { - Description() string - SampleConfig() string +func printFilteredGlobalSections(sectionFilters []string) { + if sliceContains("global_tags", sectionFilters) { + fmt.Printf(globalTagsConfig) + } + + if sliceContains("agent", sectionFilters) { + fmt.Printf(agentConfig) + } } -func printConfig(name string, p printer, op string, commented bool) { +func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool) { comment := "" if commented { comment = "# " @@ -569,7 +653,11 @@ func getDefaultConfigPath() (string, error) { homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf") etcfile := "/etc/telegraf/telegraf.conf" if runtime.GOOS == "windows" { - etcfile = `C:\Program Files\Telegraf\telegraf.conf` + programFiles := os.Getenv("ProgramFiles") + if programFiles == "" { // Should never happen + programFiles = `C:\Program Files` + } + etcfile = programFiles + `\Telegraf\telegraf.conf` } for _, path := range []string{envfile, homefile, etcfile} { if _, err := os.Stat(path); err == nil { @@ -591,9 +679,22 @@ func (c *Config) LoadConfig(path string) error { return err } } - tbl, err := parseFile(path) + data, err := loadConfig(path) if err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error loading config file %s: %w", path, err) + } + + if err = c.LoadConfigData(data); err != nil { + return fmt.Errorf("Error loading config file %s: %w", path, err) + } + return nil +} + +// LoadConfigData loads TOML-formatted config data +func (c *Config) LoadConfigData(data []byte) error { + tbl, err := parseConfig(data) + if err != nil { + return fmt.Errorf("Error parsing data: %s", err) } // Parse tags tables first: @@ -601,11 +702,10 @@ func (c *Config) LoadConfig(path string) error { if val, ok := tbl.Fields[tableName]; ok { subTable, ok := val.(*ast.Table) if !ok { - return fmt.Errorf("%s: invalid configuration", path) + return fmt.Errorf("invalid configuration, bad table name %q", tableName) } if err = toml.UnmarshalTable(subTable, c.Tags); err != nil { - log.Printf("E! Could not parse [global_tags] config\n") - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("error parsing table name %q: %w", tableName, err) } } } @@ -614,19 +714,31 @@ func (c *Config) LoadConfig(path string) error { if val, ok := tbl.Fields["agent"]; ok { subTable, ok := val.(*ast.Table) if !ok { - return fmt.Errorf("%s: invalid configuration", path) + return fmt.Errorf("invalid configuration, error parsing agent table") } if err = toml.UnmarshalTable(subTable, c.Agent); err != nil { - log.Printf("E! Could not parse [agent] config\n") - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("error parsing agent table: %w", err) } } + if !c.Agent.OmitHostname { + if c.Agent.Hostname == "" { + hostname, err := os.Hostname() + if err != nil { + return err + } + + c.Agent.Hostname = hostname + } + + c.Tags["host"] = c.Agent.Hostname + } + // Parse all the rest of the plugins: for name, val := range tbl.Fields { subTable, ok := val.(*ast.Table) if !ok { - return fmt.Errorf("%s: invalid configuration", path) + return fmt.Errorf("invalid configuration, error parsing field %q as table", name) } switch name { @@ -637,17 +749,17 @@ func (c *Config) LoadConfig(path string) error { // legacy [outputs.influxdb] support case *ast.Table: if err = c.addOutput(pluginName, pluginSubTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addOutput(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s array, %s", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s, file %s", - pluginName, path) + return fmt.Errorf("Unsupported config format: %s", + pluginName) } } case "inputs", "plugins": @@ -656,17 +768,17 @@ func (c *Config) LoadConfig(path string) error { // legacy [inputs.cpu] support case *ast.Table: if err = c.addInput(pluginName, pluginSubTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addInput(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s, file %s", - pluginName, path) + return fmt.Errorf("Unsupported config format: %s", + pluginName) } } case "processors": @@ -675,12 +787,12 @@ func (c *Config) LoadConfig(path string) error { case []*ast.Table: for _, t := range pluginSubTable { if err = c.addProcessor(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s, file %s", - pluginName, path) + return fmt.Errorf("Unsupported config format: %s", + pluginName) } } case "aggregators": @@ -689,19 +801,19 @@ func (c *Config) LoadConfig(path string) error { case []*ast.Table: for _, t := range pluginSubTable { if err = c.addAggregator(pluginName, t); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", pluginName, err) } } default: - return fmt.Errorf("Unsupported config format: %s, file %s", - pluginName, path) + return fmt.Errorf("Unsupported config format: %s", + pluginName) } } // Assume it's an input input for legacy config file support if no other // identifiers are present default: if err = c.addInput(name, subTable); err != nil { - return fmt.Errorf("Error parsing %s, %s", path, err) + return fmt.Errorf("Error parsing %s, %s", name, err) } } } @@ -709,6 +821,7 @@ func (c *Config) LoadConfig(path string) error { if len(c.Processors) > 1 { sort.Sort(c.Processors) } + return nil } @@ -724,23 +837,70 @@ func escapeEnv(value string) string { return envVarEscaper.Replace(value) } -// parseFile loads a TOML configuration from a provided path and -// returns the AST produced from the TOML parser. When loading the file, it -// will find environment variables and replace them. -func parseFile(fpath string) (*ast.Table, error) { - contents, err := ioutil.ReadFile(fpath) +func loadConfig(config string) ([]byte, error) { + u, err := url.Parse(config) if err != nil { return nil, err } - // ugh windows why + + switch u.Scheme { + case "https", "http": + return fetchConfig(u) + default: + // If it isn't a https scheme, try it as a file. + } + return ioutil.ReadFile(config) + +} + +func fetchConfig(u *url.URL) ([]byte, error) { + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + + if v, exists := os.LookupEnv("INFLUX_TOKEN"); exists { + req.Header.Add("Authorization", "Token "+v) + } + req.Header.Add("Accept", "application/toml") + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to retrieve remote config: %s", resp.Status) + } + + defer resp.Body.Close() + return ioutil.ReadAll(resp.Body) +} + +// parseConfig loads a TOML configuration from a provided path and +// returns the AST produced from the TOML parser. When loading the file, it +// will find environment variables and replace them. +func parseConfig(contents []byte) (*ast.Table, error) { contents = trimBOM(contents) - env_vars := envVarRe.FindAll(contents, -1) - for _, env_var := range env_vars { + parameters := envVarRe.FindAllSubmatch(contents, -1) + for _, parameter := range parameters { + if len(parameter) != 3 { + continue + } + + var env_var []byte + if parameter[1] != nil { + env_var = parameter[1] + } else if parameter[2] != nil { + env_var = parameter[2] + } else { + continue + } + env_val, ok := os.LookupEnv(strings.TrimPrefix(string(env_var), "$")) if ok { env_val = escapeEnv(env_val) - contents = bytes.Replace(contents, env_var, []byte(env_val), 1) + contents = bytes.Replace(contents, parameter[0], []byte(env_val), 1) } } @@ -772,25 +932,48 @@ func (c *Config) addProcessor(name string, table *ast.Table) error { if !ok { return fmt.Errorf("Undefined but requested processor: %s", name) } - processor := creator() processorConfig, err := buildProcessor(name, table) if err != nil { return err } - if err := toml.UnmarshalTable(table, processor); err != nil { + rf, err := c.newRunningProcessor(creator, processorConfig, name, table) + if err != nil { return err } + c.Processors = append(c.Processors, rf) - rf := &models.RunningProcessor{ - Name: name, - Processor: processor, - Config: processorConfig, + // save a copy for the aggregator + rf, err = c.newRunningProcessor(creator, processorConfig, name, table) + if err != nil { + return err + } + c.AggProcessors = append(c.AggProcessors, rf) + + return nil +} + +func (c *Config) newRunningProcessor( + creator processors.StreamingCreator, + processorConfig *models.ProcessorConfig, + name string, + table *ast.Table, +) (*models.RunningProcessor, error) { + processor := creator() + + if p, ok := processor.(unwrappable); ok { + if err := toml.UnmarshalTable(table, p.Unwrap()); err != nil { + return nil, err + } + } else { + if err := toml.UnmarshalTable(table, processor); err != nil { + return nil, err + } } - c.Processors = append(c.Processors, rf) - return nil + rf := models.NewRunningProcessor(processor, processorConfig) + return rf, nil } func (c *Config) addOutput(name string, table *ast.Table) error { @@ -855,6 +1038,17 @@ func (c *Config) addInput(name string, table *ast.Table) error { t.SetParser(parser) } + switch t := input.(type) { + case parsers.ParserFuncInput: + config, err := getParserConfig(name, table) + if err != nil { + return err + } + t.SetParserFunc(func() (parsers.Parser, error) { + return parsers.NewParser(config) + }) + } + pluginConfig, err := buildInput(name, table) if err != nil { return err @@ -865,6 +1059,7 @@ func (c *Config) addInput(name string, table *ast.Table) error { } rp := models.NewRunningInput(input, pluginConfig) + rp.SetDefaultTags(c.Tags) c.Inputs = append(c.Inputs, rp) return nil } @@ -873,18 +1068,11 @@ func (c *Config) addInput(name string, table *ast.Table) error { // builds the filter and returns a // models.AggregatorConfig to be inserted into models.RunningAggregator func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) { - unsupportedFields := []string{"tagexclude", "taginclude"} - for _, field := range unsupportedFields { - if _, ok := tbl.Fields[field]; ok { - return nil, fmt.Errorf("%s is not supported for aggregator plugins (%s).", - field, name) - } - } - conf := &models.AggregatorConfig{ Name: name, Delay: time.Millisecond * 100, Period: time.Second * 30, + Grace: time.Second * 0, } if node, ok := tbl.Fields["period"]; ok { @@ -913,6 +1101,18 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err } } + if node, ok := tbl.Fields["grace"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + dur, err := time.ParseDuration(str.Value) + if err != nil { + return nil, err + } + + conf.Grace = dur + } + } + } if node, ok := tbl.Fields["drop_original"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if b, ok := kv.Value.(*ast.Boolean); ok { @@ -949,6 +1149,14 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err } } + if node, ok := tbl.Fields["alias"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + conf.Alias = str.Value + } + } + } + conf.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { @@ -960,10 +1168,12 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err delete(tbl.Fields, "period") delete(tbl.Fields, "delay") + delete(tbl.Fields, "grace") delete(tbl.Fields, "drop_original") delete(tbl.Fields, "name_prefix") delete(tbl.Fields, "name_suffix") delete(tbl.Fields, "name_override") + delete(tbl.Fields, "alias") delete(tbl.Fields, "tags") var err error conf.Filter, err = buildFilter(tbl) @@ -978,13 +1188,6 @@ func buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, err // models.ProcessorConfig to be inserted into models.RunningProcessor func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { conf := &models.ProcessorConfig{Name: name} - unsupportedFields := []string{"tagexclude", "taginclude", "fielddrop", "fieldpass"} - for _, field := range unsupportedFields { - if _, ok := tbl.Fields[field]; ok { - return nil, fmt.Errorf("%s is not supported for processor plugins (%s).", - field, name) - } - } if node, ok := tbl.Fields["order"]; ok { if kv, ok := node.(*ast.KeyValue); ok { @@ -998,6 +1201,15 @@ func buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error } } + if node, ok := tbl.Fields["alias"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + conf.Alias = str.Value + } + } + } + + delete(tbl.Fields, "alias") delete(tbl.Fields, "order") var err error conf.Filter, err = buildFilter(tbl) @@ -1186,6 +1398,14 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { } } + if node, ok := tbl.Fields["alias"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + cp.Alias = str.Value + } + } + } + cp.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { @@ -1198,6 +1418,7 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { delete(tbl.Fields, "name_prefix") delete(tbl.Fields, "name_suffix") delete(tbl.Fields, "name_override") + delete(tbl.Fields, "alias") delete(tbl.Fields, "interval") delete(tbl.Fields, "tags") var err error @@ -1212,7 +1433,17 @@ func buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { // a parsers.Parser object, and creates it, which can then be added onto // an Input object. func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { - c := &parsers.Config{} + config, err := getParserConfig(name, tbl) + if err != nil { + return nil, err + } + return parsers.NewParser(config) +} + +func getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { + c := &parsers.Config{ + JSONStrict: true, + } if node, ok := tbl.Fields["data_format"]; ok { if kv, ok := node.(*ast.KeyValue); ok { @@ -1261,6 +1492,70 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { } } + if node, ok := tbl.Fields["json_string_fields"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.JSONStringFields = append(c.JSONStringFields, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["json_name_key"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.JSONNameKey = str.Value + } + } + } + + if node, ok := tbl.Fields["json_query"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.JSONQuery = str.Value + } + } + } + + if node, ok := tbl.Fields["json_time_key"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.JSONTimeKey = str.Value + } + } + } + + if node, ok := tbl.Fields["json_time_format"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.JSONTimeFormat = str.Value + } + } + } + + if node, ok := tbl.Fields["json_timezone"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.JSONTimezone = str.Value + } + } + } + + if node, ok := tbl.Fields["json_strict"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.JSONStrict, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + if node, ok := tbl.Fields["data_type"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { @@ -1285,6 +1580,14 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { } } + if node, ok := tbl.Fields["collectd_parse_multivalue"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CollectdSplit = str.Value + } + } + } + if node, ok := tbl.Fields["collectd_typesdb"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if ary, ok := kv.Value.(*ast.Array); ok { @@ -1338,23 +1641,250 @@ func buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { } } + //for grok data_format + if node, ok := tbl.Fields["grok_named_patterns"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.GrokNamedPatterns = append(c.GrokNamedPatterns, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["grok_patterns"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.GrokPatterns = append(c.GrokPatterns, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["grok_custom_patterns"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.GrokCustomPatterns = str.Value + } + } + } + + if node, ok := tbl.Fields["grok_custom_pattern_files"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.GrokCustomPatternFiles = append(c.GrokCustomPatternFiles, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["grok_timezone"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.GrokTimezone = str.Value + } + } + } + + if node, ok := tbl.Fields["grok_unique_timestamp"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.GrokUniqueTimestamp = str.Value + } + } + } + + //for csv parser + if node, ok := tbl.Fields["csv_column_names"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.CSVColumnNames = append(c.CSVColumnNames, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["csv_column_types"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.CSVColumnTypes = append(c.CSVColumnTypes, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["csv_tag_columns"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.CSVTagColumns = append(c.CSVTagColumns, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["csv_delimiter"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CSVDelimiter = str.Value + } + } + } + + if node, ok := tbl.Fields["csv_comment"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CSVComment = str.Value + } + } + } + + if node, ok := tbl.Fields["csv_measurement_column"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CSVMeasurementColumn = str.Value + } + } + } + + if node, ok := tbl.Fields["csv_timestamp_column"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CSVTimestampColumn = str.Value + } + } + } + + if node, ok := tbl.Fields["csv_timestamp_format"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.CSVTimestampFormat = str.Value + } + } + } + + if node, ok := tbl.Fields["csv_header_row_count"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if integer, ok := kv.Value.(*ast.Integer); ok { + v, err := integer.Int() + if err != nil { + return nil, err + } + c.CSVHeaderRowCount = int(v) + } + } + } + + if node, ok := tbl.Fields["csv_skip_rows"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if integer, ok := kv.Value.(*ast.Integer); ok { + v, err := integer.Int() + if err != nil { + return nil, err + } + c.CSVSkipRows = int(v) + } + } + } + + if node, ok := tbl.Fields["csv_skip_columns"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if integer, ok := kv.Value.(*ast.Integer); ok { + v, err := integer.Int() + if err != nil { + return nil, err + } + c.CSVSkipColumns = int(v) + } + } + } + + if node, ok := tbl.Fields["csv_trim_space"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.Boolean); ok { + //for config with no quotes + val, err := strconv.ParseBool(str.Value) + c.CSVTrimSpace = val + if err != nil { + return nil, fmt.Errorf("E! parsing to bool: %v", err) + } + } + } + } + + if node, ok := tbl.Fields["form_urlencoded_tag_keys"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.FormUrlencodedTagKeys = append(c.FormUrlencodedTagKeys, str.Value) + } + } + } + } + } + c.MetricName = name delete(tbl.Fields, "data_format") delete(tbl.Fields, "separator") delete(tbl.Fields, "templates") delete(tbl.Fields, "tag_keys") + delete(tbl.Fields, "json_name_key") + delete(tbl.Fields, "json_query") + delete(tbl.Fields, "json_string_fields") + delete(tbl.Fields, "json_time_format") + delete(tbl.Fields, "json_time_key") + delete(tbl.Fields, "json_timezone") + delete(tbl.Fields, "json_strict") delete(tbl.Fields, "data_type") delete(tbl.Fields, "collectd_auth_file") delete(tbl.Fields, "collectd_security_level") delete(tbl.Fields, "collectd_typesdb") + delete(tbl.Fields, "collectd_parse_multivalue") delete(tbl.Fields, "dropwizard_metric_registry_path") delete(tbl.Fields, "dropwizard_time_path") delete(tbl.Fields, "dropwizard_time_format") delete(tbl.Fields, "dropwizard_tags_path") delete(tbl.Fields, "dropwizard_tag_paths") + delete(tbl.Fields, "grok_named_patterns") + delete(tbl.Fields, "grok_patterns") + delete(tbl.Fields, "grok_custom_patterns") + delete(tbl.Fields, "grok_custom_pattern_files") + delete(tbl.Fields, "grok_timezone") + delete(tbl.Fields, "grok_unique_timestamp") + delete(tbl.Fields, "csv_column_names") + delete(tbl.Fields, "csv_column_types") + delete(tbl.Fields, "csv_comment") + delete(tbl.Fields, "csv_delimiter") + delete(tbl.Fields, "csv_field_columns") + delete(tbl.Fields, "csv_header_row_count") + delete(tbl.Fields, "csv_measurement_column") + delete(tbl.Fields, "csv_skip_columns") + delete(tbl.Fields, "csv_skip_rows") + delete(tbl.Fields, "csv_tag_columns") + delete(tbl.Fields, "csv_timestamp_column") + delete(tbl.Fields, "csv_timestamp_format") + delete(tbl.Fields, "csv_trim_space") + delete(tbl.Fields, "form_urlencoded_tag_keys") - return parsers.NewParser(c) + return c, nil } // buildSerializer grabs the necessary entries from the ast.Table for creating @@ -1391,6 +1921,18 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error } } + if node, ok := tbl.Fields["templates"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.Templates = append(c.Templates, str.Value) + } + } + } + } + } + if node, ok := tbl.Fields["influx_max_line_bytes"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if integer, ok := kv.Value.(*ast.Integer); ok { @@ -1439,6 +1981,14 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error } } + if node, ok := tbl.Fields["graphite_separator"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + c.GraphiteSeparator = str.Value + } + } + } + if node, ok := tbl.Fields["json_timestamp_units"]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { @@ -1455,14 +2005,107 @@ func buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error } } + if node, ok := tbl.Fields["splunkmetric_hec_routing"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.HecRouting, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + + if node, ok := tbl.Fields["splunkmetric_multimetric"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.SplunkmetricMultiMetric, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + + if node, ok := tbl.Fields["wavefront_source_override"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if ary, ok := kv.Value.(*ast.Array); ok { + for _, elem := range ary.Value { + if str, ok := elem.(*ast.String); ok { + c.WavefrontSourceOverride = append(c.WavefrontSourceOverride, str.Value) + } + } + } + } + } + + if node, ok := tbl.Fields["wavefront_use_strict"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.WavefrontUseStrict, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + + if node, ok := tbl.Fields["prometheus_export_timestamp"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.PrometheusExportTimestamp, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + + if node, ok := tbl.Fields["prometheus_sort_metrics"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.PrometheusSortMetrics, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + + if node, ok := tbl.Fields["prometheus_string_as_label"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if b, ok := kv.Value.(*ast.Boolean); ok { + var err error + c.PrometheusStringAsLabel, err = b.Boolean() + if err != nil { + return nil, err + } + } + } + } + delete(tbl.Fields, "influx_max_line_bytes") delete(tbl.Fields, "influx_sort_fields") delete(tbl.Fields, "influx_uint_support") delete(tbl.Fields, "graphite_tag_support") + delete(tbl.Fields, "graphite_separator") delete(tbl.Fields, "data_format") delete(tbl.Fields, "prefix") delete(tbl.Fields, "template") + delete(tbl.Fields, "templates") delete(tbl.Fields, "json_timestamp_units") + delete(tbl.Fields, "splunkmetric_hec_routing") + delete(tbl.Fields, "splunkmetric_multimetric") + delete(tbl.Fields, "wavefront_source_override") + delete(tbl.Fields, "wavefront_use_strict") + delete(tbl.Fields, "prometheus_export_timestamp") + delete(tbl.Fields, "prometheus_sort_metrics") + delete(tbl.Fields, "prometheus_string_as_label") return serializers.NewSerializer(c) } @@ -1479,6 +2122,8 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { Name: name, Filter: filter, } + + // TODO // Outputs don't support FieldDrop/FieldPass, so set to NameDrop/NamePass if len(oc.Filter.FieldDrop) > 0 { oc.Filter.NameDrop = oc.Filter.FieldDrop @@ -1486,5 +2131,104 @@ func buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { if len(oc.Filter.FieldPass) > 0 { oc.Filter.NamePass = oc.Filter.FieldPass } + + if node, ok := tbl.Fields["flush_interval"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + dur, err := time.ParseDuration(str.Value) + if err != nil { + return nil, err + } + + oc.FlushInterval = dur + } + } + } + + if node, ok := tbl.Fields["flush_jitter"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + dur, err := time.ParseDuration(str.Value) + if err != nil { + return nil, err + } + oc.FlushJitter = new(time.Duration) + *oc.FlushJitter = dur + } + } + } + + if node, ok := tbl.Fields["metric_buffer_limit"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if integer, ok := kv.Value.(*ast.Integer); ok { + v, err := integer.Int() + if err != nil { + return nil, err + } + oc.MetricBufferLimit = int(v) + } + } + } + + if node, ok := tbl.Fields["metric_batch_size"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if integer, ok := kv.Value.(*ast.Integer); ok { + v, err := integer.Int() + if err != nil { + return nil, err + } + oc.MetricBatchSize = int(v) + } + } + } + + if node, ok := tbl.Fields["alias"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + oc.Alias = str.Value + } + } + } + + if node, ok := tbl.Fields["name_override"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + oc.NameOverride = str.Value + } + } + } + + if node, ok := tbl.Fields["name_suffix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + oc.NameSuffix = str.Value + } + } + } + + if node, ok := tbl.Fields["name_prefix"]; ok { + if kv, ok := node.(*ast.KeyValue); ok { + if str, ok := kv.Value.(*ast.String); ok { + oc.NamePrefix = str.Value + } + } + } + + delete(tbl.Fields, "flush_interval") + delete(tbl.Fields, "flush_jitter") + delete(tbl.Fields, "metric_buffer_limit") + delete(tbl.Fields, "metric_batch_size") + delete(tbl.Fields, "alias") + delete(tbl.Fields, "name_override") + delete(tbl.Fields, "name_suffix") + delete(tbl.Fields, "name_prefix") + return oc, nil } + +// unwrappable lets you retrieve the original telegraf.Processor from the +// StreamingProcessor. This is necessary because the toml Unmarshaller won't +// look inside composed types. +type unwrappable interface { + Unwrap() telegraf.Processor +} diff --git a/internal/config/config_test.go b/config/config_test.go similarity index 56% rename from internal/config/config_test.go rename to config/config_test.go index 3498d815d..6c5e3662a 100644 --- a/internal/config/config_test.go +++ b/config/config_test.go @@ -5,14 +5,17 @@ import ( "testing" "time" - "github.com/influxdata/telegraf/internal/models" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/exec" + "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" "github.com/influxdata/telegraf/plugins/inputs/memcached" "github.com/influxdata/telegraf/plugins/inputs/procstat" + httpOut "github.com/influxdata/telegraf/plugins/outputs/http" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { @@ -28,17 +31,17 @@ func TestConfig_LoadSingleInputWithEnvVars(t *testing.T) { filter := models.Filter{ NameDrop: []string{"metricname2"}, - NamePass: []string{"metricname1"}, + NamePass: []string{"metricname1", "ip_192.168.1.1_name"}, FieldDrop: []string{"other", "stuff"}, FieldPass: []string{"some", "strings"}, TagDrop: []models.TagFilter{ - models.TagFilter{ + { Name: "badtag", Filter: []string{"othertag"}, }, }, TagPass: []models.TagFilter{ - models.TagFilter{ + { Name: "goodtag", Filter: []string{"mytag"}, }, @@ -71,13 +74,13 @@ func TestConfig_LoadSingleInput(t *testing.T) { FieldDrop: []string{"other", "stuff"}, FieldPass: []string{"some", "strings"}, TagDrop: []models.TagFilter{ - models.TagFilter{ + { Name: "badtag", Filter: []string{"othertag"}, }, }, TagPass: []models.TagFilter{ - models.TagFilter{ + { Name: "goodtag", Filter: []string{"mytag"}, }, @@ -117,13 +120,13 @@ func TestConfig_LoadDirectory(t *testing.T) { FieldDrop: []string{"other", "stuff"}, FieldPass: []string{"some", "strings"}, TagDrop: []models.TagFilter{ - models.TagFilter{ + { Name: "badtag", Filter: []string{"othertag"}, }, }, TagPass: []models.TagFilter{ - models.TagFilter{ + { Name: "goodtag", Filter: []string{"mytag"}, }, @@ -143,7 +146,11 @@ func TestConfig_LoadDirectory(t *testing.T) { "Testdata did not produce correct memcached metadata.") ex := inputs.Inputs["exec"]().(*exec.Exec) - p, err := parsers.NewJSONParser("exec", nil, nil) + p, err := parsers.NewParser(&parsers.Config{ + MetricName: "exec", + DataFormat: "json", + JSONStrict: true, + }) assert.NoError(t, err) ex.SetParser(p) ex.Command = "/usr/bin/myothercollector --foo=bar" @@ -152,6 +159,11 @@ func TestConfig_LoadDirectory(t *testing.T) { MeasurementSuffix: "_myothercollector", } eConfig.Tags = make(map[string]string) + + exec := c.Inputs[1].Input.(*exec.Exec) + require.NotNil(t, exec.Log) + exec.Log = nil + assert.Equal(t, ex, c.Inputs[1].Input, "Merged Testdata did not produce a correct exec struct.") assert.Equal(t, eConfig, c.Inputs[1].Config, @@ -174,3 +186,74 @@ func TestConfig_LoadDirectory(t *testing.T) { assert.Equal(t, pConfig, c.Inputs[3].Config, "Merged Testdata did not produce correct procstat metadata.") } + +func TestConfig_LoadSpecialTypes(t *testing.T) { + c := NewConfig() + err := c.LoadConfig("./testdata/special_types.toml") + assert.NoError(t, err) + require.Equal(t, 1, len(c.Inputs)) + + inputHTTPListener, ok := c.Inputs[0].Input.(*http_listener_v2.HTTPListenerV2) + assert.Equal(t, true, ok) + // Tests telegraf duration parsing. + assert.Equal(t, internal.Duration{Duration: time.Second}, inputHTTPListener.WriteTimeout) + // Tests telegraf size parsing. + assert.Equal(t, internal.Size{Size: 1024 * 1024}, inputHTTPListener.MaxBodySize) + // Tests toml multiline basic strings. + assert.Equal(t, "/path/to/my/cert\n", inputHTTPListener.TLSCert) +} + +func TestConfig_FieldNotDefined(t *testing.T) { + c := NewConfig() + err := c.LoadConfig("./testdata/invalid_field.toml") + require.Error(t, err, "invalid field name") + assert.Equal(t, "Error loading config file ./testdata/invalid_field.toml: Error parsing http_listener_v2, line 2: field corresponding to `not_a_field' is not defined in http_listener_v2.HTTPListenerV2", err.Error()) + +} + +func TestConfig_WrongFieldType(t *testing.T) { + c := NewConfig() + err := c.LoadConfig("./testdata/wrong_field_type.toml") + require.Error(t, err, "invalid field type") + assert.Equal(t, "Error loading config file ./testdata/wrong_field_type.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Port) cannot unmarshal TOML string into int", err.Error()) + + c = NewConfig() + err = c.LoadConfig("./testdata/wrong_field_type2.toml") + require.Error(t, err, "invalid field type2") + assert.Equal(t, "Error loading config file ./testdata/wrong_field_type2.toml: Error parsing http_listener_v2, line 2: (http_listener_v2.HTTPListenerV2.Methods) cannot unmarshal TOML string into []string", err.Error()) +} + +func TestConfig_InlineTables(t *testing.T) { + // #4098 + c := NewConfig() + err := c.LoadConfig("./testdata/inline_table.toml") + assert.NoError(t, err) + require.Equal(t, 2, len(c.Outputs)) + + outputHTTP, ok := c.Outputs[1].Output.(*httpOut.HTTP) + assert.Equal(t, true, ok) + assert.Equal(t, map[string]string{"Authorization": "Token $TOKEN", "Content-Type": "application/json"}, outputHTTP.Headers) + assert.Equal(t, []string{"org_id"}, c.Outputs[0].Config.Filter.TagInclude) +} + +func TestConfig_SliceComment(t *testing.T) { + t.Skipf("Skipping until #3642 is resolved") + + c := NewConfig() + err := c.LoadConfig("./testdata/slice_comment.toml") + assert.NoError(t, err) + require.Equal(t, 1, len(c.Outputs)) + + outputHTTP, ok := c.Outputs[0].Output.(*httpOut.HTTP) + assert.Equal(t, []string{"test"}, outputHTTP.Scopes) + assert.Equal(t, true, ok) +} + +func TestConfig_BadOrdering(t *testing.T) { + // #3444: when not using inline tables, care has to be taken so subsequent configuration + // doesn't become part of the table. This is not a bug, but TOML syntax. + c := NewConfig() + err := c.LoadConfig("./testdata/non_slice_slice.toml") + require.Error(t, err, "bad ordering") + assert.Equal(t, "Error loading config file ./testdata/non_slice_slice.toml: Error parsing http array, line 4: cannot unmarshal TOML array into string (need slice)", err.Error()) +} diff --git a/config/testdata/inline_table.toml b/config/testdata/inline_table.toml new file mode 100644 index 000000000..525fdce17 --- /dev/null +++ b/config/testdata/inline_table.toml @@ -0,0 +1,7 @@ +[[outputs.http]] + headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" } + taginclude = ["org_id"] + +[[outputs.http]] + headers = { Authorization = "Token $TOKEN",Content-Type = "application/json" } + taginclude = ["org_id"] diff --git a/config/testdata/invalid_field.toml b/config/testdata/invalid_field.toml new file mode 100644 index 000000000..4c718d7bb --- /dev/null +++ b/config/testdata/invalid_field.toml @@ -0,0 +1,2 @@ +[[inputs.http_listener_v2]] + not_a_field = true diff --git a/config/testdata/non_slice_slice.toml b/config/testdata/non_slice_slice.toml new file mode 100644 index 000000000..f92edcc0b --- /dev/null +++ b/config/testdata/non_slice_slice.toml @@ -0,0 +1,4 @@ +[[outputs.http]] + [outputs.http.headers] + Content-Type = "application/json" + taginclude = ["org_id"] diff --git a/internal/config/testdata/single_plugin.toml b/config/testdata/single_plugin.toml similarity index 100% rename from internal/config/testdata/single_plugin.toml rename to config/testdata/single_plugin.toml diff --git a/internal/config/testdata/single_plugin_env_vars.toml b/config/testdata/single_plugin_env_vars.toml similarity index 83% rename from internal/config/testdata/single_plugin_env_vars.toml rename to config/testdata/single_plugin_env_vars.toml index 6600a77b3..b1f71ea8a 100644 --- a/internal/config/testdata/single_plugin_env_vars.toml +++ b/config/testdata/single_plugin_env_vars.toml @@ -1,6 +1,6 @@ [[inputs.memcached]] servers = ["$MY_TEST_SERVER"] - namepass = ["metricname1"] + namepass = ["metricname1", "ip_${MY_TEST_SERVER}_name"] namedrop = ["metricname2"] fieldpass = ["some", "strings"] fielddrop = ["other", "stuff"] diff --git a/config/testdata/slice_comment.toml b/config/testdata/slice_comment.toml new file mode 100644 index 000000000..1177e5f89 --- /dev/null +++ b/config/testdata/slice_comment.toml @@ -0,0 +1,5 @@ +[[outputs.http]] + scopes = [ + # comment + "test" # comment + ] diff --git a/config/testdata/special_types.toml b/config/testdata/special_types.toml new file mode 100644 index 000000000..24b73ae45 --- /dev/null +++ b/config/testdata/special_types.toml @@ -0,0 +1,9 @@ +[[inputs.http_listener_v2]] + write_timeout = "1s" + max_body_size = "1MiB" + tls_cert = """ +/path/to/my/cert +""" + tls_key = ''' +/path/to/my/key +''' diff --git a/internal/config/testdata/subconfig/exec.conf b/config/testdata/subconfig/exec.conf similarity index 100% rename from internal/config/testdata/subconfig/exec.conf rename to config/testdata/subconfig/exec.conf diff --git a/internal/config/testdata/subconfig/memcached.conf b/config/testdata/subconfig/memcached.conf similarity index 100% rename from internal/config/testdata/subconfig/memcached.conf rename to config/testdata/subconfig/memcached.conf diff --git a/internal/config/testdata/subconfig/procstat.conf b/config/testdata/subconfig/procstat.conf similarity index 100% rename from internal/config/testdata/subconfig/procstat.conf rename to config/testdata/subconfig/procstat.conf diff --git a/internal/config/testdata/telegraf-agent.toml b/config/testdata/telegraf-agent.toml similarity index 99% rename from internal/config/testdata/telegraf-agent.toml rename to config/testdata/telegraf-agent.toml index 9da79605f..f71b98206 100644 --- a/internal/config/testdata/telegraf-agent.toml +++ b/config/testdata/telegraf-agent.toml @@ -256,7 +256,7 @@ # specify address via a url matching: # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full] # or a simple string: - # host=localhost user=pqotest password=... sslmode=... dbname=app_production + # host=localhost user=pqgotest password=... sslmode=... dbname=app_production # # All connection parameters are optional. By default, the host is localhost # and the user is the currently running user. For localhost, we default diff --git a/config/testdata/wrong_field_type.toml b/config/testdata/wrong_field_type.toml new file mode 100644 index 000000000..237176e7e --- /dev/null +++ b/config/testdata/wrong_field_type.toml @@ -0,0 +1,2 @@ +[[inputs.http_listener_v2]] + port = "80" diff --git a/config/testdata/wrong_field_type2.toml b/config/testdata/wrong_field_type2.toml new file mode 100644 index 000000000..6f3def792 --- /dev/null +++ b/config/testdata/wrong_field_type2.toml @@ -0,0 +1,2 @@ +[[inputs.http_listener_v2]] + methods = "POST" diff --git a/config/types.go b/config/types.go new file mode 100644 index 000000000..5703c8411 --- /dev/null +++ b/config/types.go @@ -0,0 +1,88 @@ +package config + +import ( + "bytes" + "strconv" + "time" + + "github.com/alecthomas/units" +) + +// Duration is a time.Duration +type Duration time.Duration + +// Size is an int64 +type Size int64 + +// Number is a float +type Number float64 + +// UnmarshalTOML parses the duration from the TOML config file +func (d Duration) UnmarshalTOML(b []byte) error { + var err error + b = bytes.Trim(b, `'`) + + // see if we can directly convert it + dur, err := time.ParseDuration(string(b)) + if err == nil { + d = Duration(dur) + return nil + } + + // Parse string duration, ie, "1s" + if uq, err := strconv.Unquote(string(b)); err == nil && len(uq) > 0 { + dur, err := time.ParseDuration(uq) + if err == nil { + d = Duration(dur) + return nil + } + } + + // First try parsing as integer seconds + sI, err := strconv.ParseInt(string(b), 10, 64) + if err == nil { + dur := time.Second * time.Duration(sI) + d = Duration(dur) + return nil + } + // Second try parsing as float seconds + sF, err := strconv.ParseFloat(string(b), 64) + if err == nil { + dur := time.Second * time.Duration(sF) + d = Duration(dur) + return nil + } + + return nil +} + +func (s Size) UnmarshalTOML(b []byte) error { + var err error + b = bytes.Trim(b, `'`) + + val, err := strconv.ParseInt(string(b), 10, 64) + if err == nil { + s = Size(val) + return nil + } + uq, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + val, err = units.ParseStrictBytes(uq) + if err != nil { + return err + } + s = Size(val) + return nil +} + +func (n Number) UnmarshalTOML(b []byte) error { + value, err := strconv.ParseFloat(string(b), 64) + if err != nil { + return err + } + + n = Number(value) + return nil +} diff --git a/docker-compose.yml b/docker-compose.yml index 822d7fff1..eb96fc2bf 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -17,19 +17,20 @@ services: - KAFKA_ADVERTISED_HOST_NAME=localhost - KAFKA_ADVERTISED_PORT=9092 - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 - - KAFKA_CREATE_TOPICS="test:1:1" + - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 - JAVA_OPTS="-Xms256m -Xmx256m" ports: - "9092:9092" depends_on: - zookeeper elasticsearch: - image: elasticsearch:5 + image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0 environment: - - JAVA_OPTS="-Xms256m -Xmx256m" + - "ES_JAVA_OPTS=-Xms256m -Xmx256m" + - discovery.type=single-node + - xpack.security.enabled=false ports: - "9200:9200" - - "9300:9300" mysql: image: mysql environment: @@ -38,10 +39,19 @@ services: - "3306:3306" memcached: image: memcached - ports: + ports: - "11211:11211" + pgbouncer: + image: mbentley/ubuntu-pgbouncer + environment: + - PG_ENV_POSTGRESQL_USER=pgbouncer + - PG_ENV_POSTGRESQL_PASS=pgbouncer + ports: + - "6432:6432" postgres: image: postgres:alpine + environment: + - POSTGRES_HOST_AUTH_METHOD=trust ports: - "5432:5432" rabbitmq: @@ -83,11 +93,10 @@ services: ports: - "4200:4200" - "4230:4230" - - "5432:5432" + - "6543:5432" command: - crate - -Cnetwork.host=0.0.0.0 - -Ctransport.host=localhost - - -Clicense.enterprise=false environment: - CRATE_HEAP_SIZE=128m diff --git a/docs/AGGREGATORS.md b/docs/AGGREGATORS.md new file mode 100644 index 000000000..a5930a3e0 --- /dev/null +++ b/docs/AGGREGATORS.md @@ -0,0 +1,132 @@ +### Aggregator Plugins + +This section is for developers who want to create a new aggregator plugin. + +### Aggregator Plugin Guidelines + +* A aggregator must conform to the [telegraf.Aggregator][] interface. +* Aggregators should call `aggregators.Add` in their `init` function to + register themselves. See below for a quick example. +* To be available within Telegraf itself, plugins must add themselves to the + `github.com/influxdata/telegraf/plugins/aggregators/all/all.go` file. +- The `SampleConfig` function should return valid toml that describes how the + plugin can be configured. This is included in `telegraf config`. Please + consult the [SampleConfig][] page for the latest style guidelines. +* The `Description` function should say in one line what this aggregator does. +* The Aggregator plugin will need to keep caches of metrics that have passed + through it. This should be done using the builtin `HashID()` function of + each metric. +* When the `Reset()` function is called, all caches should be cleared. +- Follow the recommended [CodeStyle][]. + +### Aggregator Plugin Example + +```go +package min + +// min.go + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +type Min struct { + // caches for metric fields, names, and tags + fieldCache map[uint64]map[string]float64 + nameCache map[uint64]string + tagCache map[uint64]map[string]string +} + +func NewMin() telegraf.Aggregator { + m := &Min{} + m.Reset() + return m +} + +var sampleConfig = ` + ## period is the flush & clear interval of the aggregator. + period = "30s" + ## If true drop_original will drop the original metrics and + ## only send aggregates. + drop_original = false +` + +func (m *Min) Init() error { + return nil +} + +func (m *Min) SampleConfig() string { + return sampleConfig +} + +func (m *Min) Description() string { + return "Keep the aggregate min of each metric passing through." +} + +func (m *Min) Add(in telegraf.Metric) { + id := in.HashID() + if _, ok := m.nameCache[id]; !ok { + // hit an uncached metric, create caches for first time: + m.nameCache[id] = in.Name() + m.tagCache[id] = in.Tags() + m.fieldCache[id] = make(map[string]float64) + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + m.fieldCache[id][k] = fv + } + } + } else { + for k, v := range in.Fields() { + if fv, ok := convert(v); ok { + if _, ok := m.fieldCache[id][k]; !ok { + // hit an uncached field of a cached metric + m.fieldCache[id][k] = fv + continue + } + if fv < m.fieldCache[id][k] { + // set new minimum + m.fieldCache[id][k] = fv + } + } + } + } +} + +func (m *Min) Push(acc telegraf.Accumulator) { + for id, _ := range m.nameCache { + fields := map[string]interface{}{} + for k, v := range m.fieldCache[id] { + fields[k+"_min"] = v + } + acc.AddFields(m.nameCache[id], fields, m.tagCache[id]) + } +} + +func (m *Min) Reset() { + m.fieldCache = make(map[uint64]map[string]float64) + m.nameCache = make(map[uint64]string) + m.tagCache = make(map[uint64]map[string]string) +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case float64: + return v, true + case int64: + return float64(v), true + default: + return 0, false + } +} + +func init() { + aggregators.Add("min", func() telegraf.Aggregator { + return NewMin() + }) +} +``` + +[telegraf.Aggregator]: https://godoc.org/github.com/influxdata/telegraf#Aggregator +[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle diff --git a/docs/AGGREGATORS_AND_PROCESSORS.md b/docs/AGGREGATORS_AND_PROCESSORS.md index ffa9c8f7e..7be34aed5 100644 --- a/docs/AGGREGATORS_AND_PROCESSORS.md +++ b/docs/AGGREGATORS_AND_PROCESSORS.md @@ -44,13 +44,15 @@ to control which metrics are passed through a processor or aggregator. If a metric is filtered out the metric bypasses the plugin and is passed downstream to the next plugin. -**Processor** plugins process metrics as they pass through and immediately emit +### Processor +Processor plugins process metrics as they pass through and immediately emit results based on the values they process. For example, this could be printing all metrics or adding a tag to all metrics that pass through. -**Aggregator** plugins, on the other hand, are a bit more complicated. Aggregators +### Aggregator +Aggregator plugins, on the other hand, are a bit more complicated. Aggregators are typically for emitting new _aggregate_ metrics, such as a running mean, -minimum, maximum, quantiles, or standard deviation. For this reason, all _aggregator_ +minimum, maximum, or standard deviation. For this reason, all _aggregator_ plugins are configured with a `period`. The `period` is the size of the window of metrics that each _aggregate_ represents. In other words, the emitted _aggregate_ metric will be the aggregated value of the past `period` seconds. @@ -58,7 +60,8 @@ Since many users will only care about their aggregates and not every single metr gathered, there is also a `drop_original` argument, which tells Telegraf to only emit the aggregates and not the original metrics. -**NOTE** That since aggregators only aggregate metrics within their period, that -historical data is not supported. In other words, if your metric timestamp is more -than `now() - period` in the past, it will not be aggregated. If this is a feature -that you need, please comment on this [github issue](https://github.com/influxdata/telegraf/issues/1992) +Since aggregates are created for each measurement, field, and unique tag combination +the plugin receives, you can make use of `taginclude` to group +aggregates by specific tags only. + +**Note:** Aggregator plugins only aggregate metrics within their periods (`now() - period`). Data with a timestamp earlier than `now() - period` cannot be included. diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 39825376d..ca0b3946d 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -1,33 +1,25 @@ -# Telegraf Configuration +# Configuration -You can see the latest config file with all available plugins here: -[telegraf.conf](https://github.com/influxdata/telegraf/blob/master/etc/telegraf.conf) +Telegraf's configuration file is written using [TOML][] and is composed of +three sections: [global tags][], [agent][] settings, and [plugins][]. -## Generating a Configuration File +View the default [telegraf.conf][] config file with all available plugins. -A default Telegraf config file can be auto-generated by telegraf: +### Generating a Configuration File -``` +A default config file can be generated by telegraf: +```sh telegraf config > telegraf.conf ``` To generate a file with specific inputs and outputs, you can use the --input-filter and --output-filter flags: -``` +```sh telegraf --input-filter cpu:mem:net:swap --output-filter influxdb:kafka config ``` -## Environment Variables - -Environment variables can be used anywhere in the config file, simply prepend -them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), -for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) - -When using the `.deb` or `.rpm` packages, you can define environment variables -in the `/etc/default/telegraf` file. - -## Configuration file locations +### Configuration Loading The location of the configuration file can be set via the `--config` command line flag. @@ -40,180 +32,431 @@ On most systems, the default locations are `/etc/telegraf/telegraf.conf` for the main configuration file and `/etc/telegraf/telegraf.d` for the directory of configuration files. -# Global Tags +### Environment Variables -Global tags can be specified in the `[global_tags]` section of the config file -in key="value" format. All metrics being gathered on this host will be tagged -with the tags specified here. +Environment variables can be used anywhere in the config file, simply surround +them with `${}`. Replacement occurs before file parsing. For strings +the variable must be within quotes, e.g., `"${STR_VAR}"`, for numbers and booleans +they should be unquoted, e.g., `${INT_VAR}`, `${BOOL_VAR}`. -## Agent Configuration +When using the `.deb` or `.rpm` packages, you can define environment variables +in the `/etc/default/telegraf` file. -Telegraf has a few options you can configure under the `[agent]` section of the -config. +**Example**: -* **interval**: Default data collection interval for all inputs -* **round_interval**: Rounds collection interval to 'interval' -ie, if interval="10s" then always collect on :00, :10, :20, etc. -* **metric_batch_size**: Telegraf will send metrics to output in batch of at -most metric_batch_size metrics. -* **metric_buffer_limit**: Telegraf will cache metric_buffer_limit metrics -for each output, and will flush this buffer on a successful write. -This should be a multiple of metric_batch_size and could not be less -than 2 times metric_batch_size. -* **collection_jitter**: Collection jitter is used to jitter -the collection by a random amount. -Each plugin will sleep for a random time within jitter before collecting. -This can be used to avoid many plugins querying things like sysfs at the -same time, which can have a measurable effect on the system. -* **flush_interval**: Default data flushing interval for all outputs. -You should not set this below -interval. Maximum flush_interval will be flush_interval + flush_jitter -* **flush_jitter**: Jitter the flush interval by a random amount. -This is primarily to avoid -large write spikes for users running a large number of telegraf instances. -ie, a jitter of 5s and flush_interval 10s means flushes will happen every 10-15s. -* **precision**: - By default or when set to "0s", precision will be set to the same - timestamp order as the collection interval, with the maximum being 1s. - Precision will NOT be used for service inputs. It is up to each individual - service input to set the timestamp at the appropriate precision. - Valid time units are "ns", "us" (or "µs"), "ms", "s". +`/etc/default/telegraf`: +``` +USER="alice" +INFLUX_URL="http://localhost:8086" +INFLUX_SKIP_DATABASE_CREATION="true" +INFLUX_PASSWORD="monkey123" +``` -* **logfile**: Specify the log file name. The empty string means to log to stderr. -* **debug**: Run telegraf in debug mode. -* **quiet**: Run telegraf in quiet mode (error messages only). -* **hostname**: Override default hostname, if empty use os.Hostname(). -* **omit_hostname**: If true, do no set the "host" tag in the telegraf agent. +`/etc/telegraf.conf`: +```toml +[global_tags] + user = "${USER}" -## Input Configuration +[[inputs.mem]] -The following config parameters are available for all inputs: +[[outputs.influxdb]] + urls = ["${INFLUX_URL}"] + skip_database_creation = ${INFLUX_SKIP_DATABASE_CREATION} + password = "${INFLUX_PASSWORD}" +``` -* **interval**: How often to gather this metric. Normal plugins use a single -global interval, but if one particular input should be run less or more often, -you can configure that here. -* **name_override**: Override the base name of the measurement. -(Default is the name of the input). -* **name_prefix**: Specifies a prefix to attach to the measurement name. -* **name_suffix**: Specifies a suffix to attach to the measurement name. -* **tags**: A map of tags to apply to a specific input's measurements. +The above files will produce the following effective configuration file to be +parsed: +```toml +[global_tags] + user = "alice" -The [measurement filtering](#measurement-filtering) parameters can be used to -limit what metrics are emitted from the input plugin. +[[outputs.influxdb]] + urls = "http://localhost:8086" + skip_database_creation = true + password = "monkey123" +``` -## Output Configuration +### Intervals -The [measurement filtering](#measurement-filtering) parameters can be used to -limit what metrics are emitted from the output plugin. +Intervals are durations of time and can be specified for supporting settings by +combining an integer value and time unit as a string value. Valid time units are +`ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. +```toml +[agent] + interval = "10s" +``` -## Aggregator Configuration +### Global Tags -The following config parameters are available for all aggregators: - -* **period**: The period on which to flush & clear each aggregator. All metrics -that are sent with timestamps outside of this period will be ignored by the -aggregator. -* **delay**: The delay before each aggregator is flushed. This is to control -how long for aggregators to wait before receiving metrics from input plugins, -in the case that aggregators are flushing and inputs are gathering on the -same interval. -* **drop_original**: If true, the original metric will be dropped by the -aggregator and will not get sent to the output plugins. -* **name_override**: Override the base name of the measurement. -(Default is the name of the input). -* **name_prefix**: Specifies a prefix to attach to the measurement name. -* **name_suffix**: Specifies a suffix to attach to the measurement name. -* **tags**: A map of tags to apply to a specific input's measurements. - -The [measurement filtering](#measurement-filtering) parameters can be used to -limit what metrics are handled by the aggregator. Excluded metrics are passed -downstream to the next aggregator. - -## Processor Configuration - -The following config parameters are available for all processors: - -* **order**: This is the order in which the processor(s) get executed. If this -is not specified then processor execution order will be random. - -The [measurement filtering](#measurement-filtering) parameters can be used -to limit what metrics are handled by the processor. Excluded metrics are -passed downstream to the next processor. - -#### Measurement Filtering - -Filters can be configured per input, output, processor, or aggregator, -see below for examples. - -* **namepass**: -An array of glob pattern strings. Only points whose measurement name matches -a pattern in this list are emitted. -* **namedrop**: -The inverse of `namepass`. If a match is found the point is discarded. This -is tested on points after they have passed the `namepass` test. -* **fieldpass**: -An array of glob pattern strings. Only fields whose field key matches a -pattern in this list are emitted. -* **fielddrop**: -The inverse of `fieldpass`. Fields with a field key matching one of the -patterns will be discarded from the point. This is tested on points after -they have passed the `fieldpass` test. -* **tagpass**: -A table mapping tag keys to arrays of glob pattern strings. Only points -that contain a tag key in the table and a tag value matching one of its -patterns is emitted. -* **tagdrop**: -The inverse of `tagpass`. If a match is found the point is discarded. This -is tested on points after they have passed the `tagpass` test. -* **taginclude**: -An array of glob pattern strings. Only tags with a tag key matching one of -the patterns are emitted. In contrast to `tagpass`, which will pass an entire -point based on its tag, `taginclude` removes all non matching tags from the -point. This filter can be used on both inputs & outputs, but it is -_recommended_ to be used on inputs, as it is more efficient to filter out tags -at the ingestion point. -* **tagexclude**: -The inverse of `taginclude`. Tags with a tag key matching one of the patterns -will be discarded from the point. - -**NOTE** Due to the way TOML is parsed, `tagpass` and `tagdrop` parameters -must be defined at the _end_ of the plugin definition, otherwise subsequent -plugin config options will be interpreted as part of the tagpass/tagdrop -tables. - -#### Input Configuration Examples - -This is a full working config that will output CPU data to an InfluxDB instance -at 192.168.59.103:8086, tagging measurements with dc="denver-1". It will output -measurements at a 10s interval and will collect per-cpu data, dropping any -fields which begin with `time_`. +Global tags can be specified in the `[global_tags]` table in key="value" +format. All metrics that are gathered will be tagged with the tags specified. ```toml [global_tags] - dc = "denver-1" + dc = "us-east-1" +``` -[agent] - interval = "10s" +### Agent -# OUTPUTS -[[outputs.influxdb]] - url = "http://192.168.59.103:8086" # required. - database = "telegraf" # required. +The agent table configures Telegraf and the defaults used across all plugins. + +- **interval**: Default data collection [interval][] for all inputs. + +- **round_interval**: Rounds collection interval to [interval][] + ie, if interval="10s" then always collect on :00, :10, :20, etc. + +- **metric_batch_size**: + Telegraf will send metrics to outputs in batches of at most + metric_batch_size metrics. + This controls the size of writes that Telegraf sends to output plugins. + +- **metric_buffer_limit**: + Maximum number of unwritten metrics per output. Increasing this value + allows for longer periods of output downtime without dropping metrics at the + cost of higher maximum memory usage. + +- **collection_jitter**: + Collection jitter is used to jitter the collection by a random [interval][]. + Each plugin will sleep for a random time within jitter before collecting. + This can be used to avoid many plugins querying things like sysfs at the + same time, which can have a measurable effect on the system. + +- **flush_interval**: + Default flushing [interval][] for all outputs. Maximum flush_interval will be + flush_interval + flush_jitter. + +- **flush_jitter**: + Default flush jitter for all outputs. This jitters the flush [interval][] + by a random amount. This is primarily to avoid large write spikes for users + running a large number of telegraf instances. ie, a jitter of 5s and interval + 10s means flushes will happen every 10-15s. + + +- **precision**: + Collected metrics are rounded to the precision specified as an [interval][]. + + Precision will NOT be used for service inputs. It is up to each individual + service input to set the timestamp at the appropriate precision. + +- **debug**: + Log at debug level. + +- **quiet**: + Log only error level messages. + +- **logtarget**: + Log target controls the destination for logs and can be one of "file", + "stderr" or, on Windows, "eventlog". When set to "file", the output file is + determined by the "logfile" setting. + +- **logfile**: + Name of the file to be logged to when using the "file" logtarget. If set to + the empty string then logs are written to stderr. + + +- **logfile_rotation_interval**: + The logfile will be rotated after the time interval specified. When set to + 0 no time based rotation is performed. + +- **logfile_rotation_max_size**: + The logfile will be rotated when it becomes larger than the specified size. + When set to 0 no size based rotation is performed. + +- **logfile_rotation_max_archives**: + Maximum number of rotated archives to keep, any older logs are deleted. If + set to -1, no archives are removed. + +- **hostname**: + Override default hostname, if empty use os.Hostname() +- **omit_hostname**: + If set to true, do no set the "host" tag in the telegraf agent. + +### Plugins + +Telegraf plugins are divided into 4 types: [inputs][], [outputs][], +[processors][], and [aggregators][]. + +Unlike the `global_tags` and `agent` tables, any plugin can be defined +multiple times and each instance will run independently. This allows you to +have plugins defined with differing configurations as needed within a single +Telegraf process. + +Each plugin has a unique set of configuration options, reference the +sample configuration for details. Additionally, several options are available +on any plugin depending on its type. + +### Input Plugins + +Input plugins gather and create metrics. They support both polling and event +driven operation. + +Parameters that can be used with any input plugin: + +- **alias**: Name an instance of a plugin. +- **interval**: How often to gather this metric. Normal plugins use a single + global interval, but if one particular input should be run less or more + often, you can configure that here. +- **name_override**: Override the base name of the measurement. (Default is + the name of the input). +- **name_prefix**: Specifies a prefix to attach to the measurement name. +- **name_suffix**: Specifies a suffix to attach to the measurement name. +- **tags**: A map of tags to apply to a specific input's measurements. + +The [metric filtering][] parameters can be used to limit what metrics are +emitted from the input plugin. + +#### Examples + +Use the name_suffix parameter to emit measurements with the name `cpu_total`: +```toml +[[inputs.cpu]] + name_suffix = "_total" + percpu = false + totalcpu = true +``` + +Use the name_override parameter to emit measurements with the name `foobar`: +```toml +[[inputs.cpu]] + name_override = "foobar" + percpu = false + totalcpu = true +``` + +Emit measurements with two additional tags: `tag1=foo` and `tag2=bar` + +> **NOTE**: With TOML, order matters. Parameters belong to the last defined +> table header, place `[inputs.cpu.tags]` table at the _end_ of the plugin +> definition. +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + tag1 = "foo" + tag2 = "bar" +``` + +Utilize `name_override`, `name_prefix`, or `name_suffix` config options to +avoid measurement collisions when defining multiple plugins: +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true -# INPUTS [[inputs.cpu]] percpu = true totalcpu = false - # filter all fields beginning with 'time_' - fielddrop = ["time_*"] + name_override = "percpu_usage" + fielddrop = ["cpu_time*"] ``` -#### Input Config: tagpass and tagdrop +### Output Plugins -**NOTE** `tagpass` and `tagdrop` parameters must be defined at the _end_ of -the plugin definition, otherwise subsequent plugin config options will be -interpreted as part of the tagpass/tagdrop map. +Output plugins write metrics to a location. Outputs commonly write to +databases, network services, and messaging systems. +Parameters that can be used with any output plugin: + +- **alias**: Name an instance of a plugin. +- **flush_interval**: The maximum time between flushes. Use this setting to + override the agent `flush_interval` on a per plugin basis. +- **flush_jitter**: The amount of time to jitter the flush interval. Use this + setting to override the agent `flush_jitter` on a per plugin basis. +- **metric_batch_size**: The maximum number of metrics to send at once. Use + this setting to override the agent `metric_batch_size` on a per plugin basis. +- **metric_buffer_limit**: The maximum number of unsent metrics to buffer. + Use this setting to override the agent `metric_buffer_limit` on a per plugin + basis. +- **name_override**: Override the original name of the measurement. +- **name_prefix**: Specifies a prefix to attach to the measurement name. +- **name_suffix**: Specifies a suffix to attach to the measurement name. + +The [metric filtering][] parameters can be used to limit what metrics are +emitted from the output plugin. + +#### Examples + +Override flush parameters for a single output: +```toml +[agent] + flush_interval = "10s" + flush_jitter = "5s" + metric_batch_size = 1000 + +[[outputs.influxdb]] + urls = [ "http://example.org:8086" ] + database = "telegraf" + +[[outputs.file]] + files = [ "stdout" ] + flush_interval = "1s" + flush_jitter = "1s" + metric_batch_size = 10 +``` + +### Processor Plugins + +Processor plugins perform processing tasks on metrics and are commonly used to +rename or apply transformations to metrics. Processors are applied after the +input plugins and before any aggregator plugins. + +Parameters that can be used with any processor plugin: + +- **alias**: Name an instance of a plugin. +- **order**: The order in which the processor(s) are executed. If this is not + specified then processor execution order will be random. + +The [metric filtering][] parameters can be used to limit what metrics are +handled by the processor. Excluded metrics are passed downstream to the next +processor. + +#### Examples + +If the order processors are applied matters you must set order on all involved +processors: +```toml +[[processors.rename]] + order = 1 + [[processors.rename.replace]] + tag = "path" + dest = "resource" + +[[processors.strings]] + order = 2 + [[processors.strings.trim_prefix]] + tag = "resource" + prefix = "/api/" +``` + +### Aggregator Plugins + +Aggregator plugins produce new metrics after examining metrics over a time +period, as the name suggests they are commonly used to produce new aggregates +such as mean/max/min metrics. Aggregators operate on metrics after any +processors have been applied. + +Parameters that can be used with any aggregator plugin: + +- **alias**: Name an instance of a plugin. +- **period**: The period on which to flush & clear each aggregator. All + metrics that are sent with timestamps outside of this period will be ignored + by the aggregator. +- **delay**: The delay before each aggregator is flushed. This is to control + how long for aggregators to wait before receiving metrics from input + plugins, in the case that aggregators are flushing and inputs are gathering + on the same interval. +- **grace**: The duration when the metrics will still be aggregated + by the plugin, even though they're outside of the aggregation period. This + is needed in a situation when the agent is expected to receive late metrics + and it's acceptable to roll them up into next aggregation period. +- **drop_original**: If true, the original metric will be dropped by the + aggregator and will not get sent to the output plugins. +- **name_override**: Override the base name of the measurement. (Default is + the name of the input). +- **name_prefix**: Specifies a prefix to attach to the measurement name. +- **name_suffix**: Specifies a suffix to attach to the measurement name. +- **tags**: A map of tags to apply to a specific input's measurements. + +The [metric filtering][] parameters can be used to limit what metrics are +handled by the aggregator. Excluded metrics are passed downstream to the next +aggregator. + +#### Examples + +Collect and emit the min/max of the system load1 metric every 30s, dropping +the originals. +```toml +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + +[[outputs.file]] + files = ["stdout"] +``` + +Collect and emit the min/max of the swap metrics every 30s, dropping the +originals. The aggregator will not be applied to the system load metrics due +to the `namepass` parameter. +```toml +[[inputs.swap]] + +[[inputs.system]] + fieldpass = ["load1"] # collects system load1 metric. + +[[aggregators.minmax]] + period = "30s" # send & clear the aggregate every 30s. + drop_original = true # drop the original metrics. + namepass = ["swap"] # only "pass" swap metrics through the aggregator. + +[[outputs.file]] + files = ["stdout"] +``` + + +### Metric Filtering + +Metric filtering can be configured per plugin on any input, output, processor, +and aggregator plugin. Filters fall under two categories: Selectors and +Modifiers. + +#### Selectors + +Selector filters include or exclude entire metrics. When a metric is excluded +from a Input or an Output plugin, the metric is dropped. If a metric is +excluded from a Processor or Aggregator plugin, it is skips the plugin and is +sent onwards to the next stage of processing. + +- **namepass**: +An array of glob pattern strings. Only metrics whose measurement name matches +a pattern in this list are emitted. + +- **namedrop**: +The inverse of `namepass`. If a match is found the metric is discarded. This +is tested on metrics after they have passed the `namepass` test. + +- **tagpass**: +A table mapping tag keys to arrays of glob pattern strings. Only metrics +that contain a tag key in the table and a tag value matching one of its +patterns is emitted. + +- **tagdrop**: +The inverse of `tagpass`. If a match is found the metric is discarded. This +is tested on metrics after they have passed the `tagpass` test. + +#### Modifiers + +Modifier filters remove tags and fields from a metric. If all fields are +removed the metric is removed. + +- **fieldpass**: +An array of glob pattern strings. Only fields whose field key matches a +pattern in this list are emitted. + +- **fielddrop**: +The inverse of `fieldpass`. Fields with a field key matching one of the +patterns will be discarded from the metric. This is tested on metrics after +they have passed the `fieldpass` test. + +- **taginclude**: +An array of glob pattern strings. Only tags with a tag key matching one of +the patterns are emitted. In contrast to `tagpass`, which will pass an entire +metric based on its tag, `taginclude` removes all non matching tags from the +metric. Any tag can be filtered including global tags and the agent `host` +tag. + +- **tagexclude**: +The inverse of `taginclude`. Tags with a tag key matching one of the patterns +will be discarded from the metric. Any tag can be filtered including global +tags and the agent `host` tag. + +#### Filtering Examples + +##### Using tagpass and tagdrop: ```toml [[inputs.cpu]] percpu = true @@ -231,10 +474,22 @@ interpreted as part of the tagpass/tagdrop map. fstype = [ "ext4", "xfs" ] # Globs can also be used on the tag values path = [ "/opt", "/home*" ] + +[[inputs.win_perf_counters]] + [[inputs.win_perf_counters.object]] + ObjectName = "Network Interface" + Instances = ["*"] + Counters = [ + "Bytes Received/sec", + "Bytes Sent/sec" + ] + Measurement = "win_net" + # Don't send metrics where the Windows interface name (instance) begins with isatap or Local + [inputs.win_perf_counters.tagdrop] + instance = ["isatap*", "Local*"] ``` -#### Input Config: fieldpass and fielddrop - +##### Using fieldpass and fielddrop: ```toml # Drop all metrics for guest & steal CPU usage [[inputs.cpu]] @@ -247,8 +502,7 @@ interpreted as part of the tagpass/tagdrop map. fieldpass = ["inodes*"] ``` -#### Input Config: namepass and namedrop - +##### Using namepass and namedrop: ```toml # Drop all metrics about containers for kubelet [[inputs.prometheus]] @@ -261,8 +515,7 @@ interpreted as part of the tagpass/tagdrop map. namepass = ["rest_client_*"] ``` -#### Input Config: taginclude and tagexclude - +##### Using taginclude and tagexclude: ```toml # Only include the "cpu" tag in the measurements for the cpu plugin. [[inputs.cpu]] @@ -275,64 +528,7 @@ interpreted as part of the tagpass/tagdrop map. tagexclude = ["fstype"] ``` -#### Input config: prefix, suffix, and override - -This plugin will emit measurements with the name `cpu_total` - -```toml -[[inputs.cpu]] - name_suffix = "_total" - percpu = false - totalcpu = true -``` - -This will emit measurements with the name `foobar` - -```toml -[[inputs.cpu]] - name_override = "foobar" - percpu = false - totalcpu = true -``` - -#### Input config: tags - -This plugin will emit measurements with two additional tags: `tag1=foo` and -`tag2=bar` - -NOTE: Order matters, the `[inputs.cpu.tags]` table must be at the _end_ of the -plugin definition. - -```toml -[[inputs.cpu]] - percpu = false - totalcpu = true - [inputs.cpu.tags] - tag1 = "foo" - tag2 = "bar" -``` - -#### Multiple inputs of the same type - -Additional inputs (or outputs) of the same type can be specified, -just define more instances in the config file. It is highly recommended that -you utilize `name_override`, `name_prefix`, or `name_suffix` config options -to avoid measurement collisions: - -```toml -[[inputs.cpu]] - percpu = false - totalcpu = true - -[[inputs.cpu]] - percpu = true - totalcpu = false - name_override = "percpu_usage" - fielddrop = ["cpu_time*"] -``` - -#### Output Configuration Examples: - +##### Metrics can be routed to different outputs using the metric name and tags: ```toml [[outputs.influxdb]] urls = [ "http://localhost:8086" ] @@ -354,50 +550,43 @@ to avoid measurement collisions: cpu = ["cpu0"] ``` -#### Aggregator Configuration Examples: +##### Routing metrics to different outputs based on the input. -This will collect and emit the min/max of the system load1 metric every -30s, dropping the originals. +Metrics are tagged with `influxdb_database` in the input, which is then used to +select the output. The tag is removed in the outputs before writing. ```toml -[[inputs.system]] - fieldpass = ["load1"] # collects system load1 metric. +[[outputs.influxdb]] + urls = ["http://influxdb.example.com"] + database = "db_default" + [outputs.influxdb.tagdrop] + influxdb_database = ["*"] -[[aggregators.minmax]] - period = "30s" # send & clear the aggregate every 30s. - drop_original = true # drop the original metrics. +[[outputs.influxdb]] + urls = ["http://influxdb.example.com"] + database = "db_other" + tagexclude = ["influxdb_database"] + [outputs.influxdb.tagpass] + influxdb_database = ["other"] -[[outputs.file]] - files = ["stdout"] +[[inputs.disk]] + [inputs.disk.tags] + influxdb_database = "other" ``` -This will collect and emit the min/max of the swap metrics every -30s, dropping the originals. The aggregator will not be applied -to the system load metrics due to the `namepass` parameter. +### Transport Layer Security (TLS) -```toml -[[inputs.swap]] +Reference the detailed [TLS][] documentation. -[[inputs.system]] - fieldpass = ["load1"] # collects system load1 metric. - -[[aggregators.minmax]] - period = "30s" # send & clear the aggregate every 30s. - drop_original = true # drop the original metrics. - namepass = ["swap"] # only "pass" swap metrics through the aggregator. - -[[outputs.file]] - files = ["stdout"] -``` - -#### Processor Configuration Examples: - -Print only the metrics with `cpu` as the measurement name, all metrics are -passed to the output: -```toml -[[processors.printer]] - namepass = "cpu" - -[[outputs.file]] - files = ["/tmp/metrics.out"] -``` +[TOML]: https://github.com/toml-lang/toml#toml +[global tags]: #global-tags +[interval]: #intervals +[agent]: #agent +[plugins]: #plugins +[inputs]: #input-plugins +[outputs]: #output-plugins +[processors]: #processor-plugins +[aggregators]: #aggregator-plugins +[metric filtering]: #metric-filtering +[telegraf.conf]: /etc/telegraf.conf +[TLS]: /docs/TLS.md diff --git a/docs/DATA_FORMATS_INPUT.md b/docs/DATA_FORMATS_INPUT.md index c1192e72b..b71650168 100644 --- a/docs/DATA_FORMATS_INPUT.md +++ b/docs/DATA_FORMATS_INPUT.md @@ -1,37 +1,24 @@ -# Telegraf Input Data Formats +# Input Data Formats -Telegraf is able to parse the following input data formats into metrics: +Telegraf contains many general purpose plugins that support parsing input data +using a configurable parser into [metrics][]. This allows, for example, the +`kafka_consumer` input plugin to process messages in either InfluxDB Line +Protocol or in JSON format. -1. [InfluxDB Line Protocol](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx) -1. [JSON](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#json) -1. [Graphite](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite) -1. [Value](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#value), ie: 45 or "booyah" -1. [Nagios](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#nagios) (exec input only) -1. [Collectd](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#collectd) -1. [Dropwizard](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#dropwizard) +- [InfluxDB Line Protocol](/plugins/parsers/influx) +- [Collectd](/plugins/parsers/collectd) +- [CSV](/plugins/parsers/csv) +- [Dropwizard](/plugins/parsers/dropwizard) +- [Graphite](/plugins/parsers/graphite) +- [Grok](/plugins/parsers/grok) +- [JSON](/plugins/parsers/json) +- [Logfmt](/plugins/parsers/logfmt) +- [Nagios](/plugins/parsers/nagios) +- [Value](/plugins/parsers/value), ie: 45 or "booyah" +- [Wavefront](/plugins/parsers/wavefront) -Telegraf metrics, like InfluxDB -[points](https://docs.influxdata.com/influxdb/v0.10/write_protocols/line/), -are a combination of four basic parts: - -1. Measurement Name -1. Tags -1. Fields -1. Timestamp - -These four parts are easily defined when using InfluxDB line-protocol as a -data format. But there are other data formats that users may want to use which -require more advanced configuration to create usable Telegraf metrics. - -Plugins such as `exec` and `kafka_consumer` parse textual data. Up until now, -these plugins were statically configured to parse just a single -data format. `exec` mostly only supported parsing JSON, and `kafka_consumer` only -supported data in InfluxDB line-protocol. - -But now we are normalizing the parsing of various data formats across all -plugins that can support it. You will be able to identify a plugin that supports -different data formats by the presence of a `data_format` config option, for -example, in the exec plugin: +Any input plugin containing the `data_format` option can use it to select the +desired parser: ```toml [[inputs.exec]] @@ -46,610 +33,6 @@ example, in the exec plugin: ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" - - ## Additional configuration options go here ``` -Each data_format has an additional set of configuration options available, which -I'll go over below. - -# Influx: - -There are no additional configuration options for InfluxDB line-protocol. The -metrics are parsed directly into Telegraf metrics. - -#### Influx Configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "influx" -``` - -# JSON: - -The JSON data format flattens JSON into metric _fields_. -NOTE: Only numerical values are converted to fields, and they are converted -into a float. strings are ignored unless specified as a tag_key (see below). - -So for example, this JSON: - -```json -{ - "a": 5, - "b": { - "c": 6 - }, - "ignored": "I'm a string" -} -``` - -Would get translated into _fields_ of a measurement: - -``` -myjsonmetric a=5,b_c=6 -``` - -The _measurement_ _name_ is usually the name of the plugin, -but can be overridden using the `name_override` config option. - -#### JSON Configuration: - -The JSON data format supports specifying "tag keys". If specified, keys -will be searched for in the root-level of the JSON blob. If the key(s) exist, -they will be applied as tags to the Telegraf metrics. - -For example, if you had this configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "json" - - ## List of tag names to extract from top-level of JSON server response - tag_keys = [ - "my_tag_1", - "my_tag_2" - ] -``` - -with this JSON output from a command: - -```json -{ - "a": 5, - "b": { - "c": 6 - }, - "my_tag_1": "foo" -} -``` - -Your Telegraf metrics would get tagged with "my_tag_1" - -``` -exec_mycollector,my_tag_1=foo a=5,b_c=6 -``` - -If the JSON data is an array, then each element of the array is parsed with the configured settings. -Each resulting metric will be output with the same timestamp. - -For example, if the following configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/usr/bin/mycollector --foo=bar"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "json" - - ## List of tag names to extract from top-level of JSON server response - tag_keys = [ - "my_tag_1", - "my_tag_2" - ] -``` - -with this JSON output from a command: - -```json -[ - { - "a": 5, - "b": { - "c": 6 - }, - "my_tag_1": "foo", - "my_tag_2": "baz" - }, - { - "a": 7, - "b": { - "c": 8 - }, - "my_tag_1": "bar", - "my_tag_2": "baz" - } -] -``` - -Your Telegraf metrics would get tagged with "my_tag_1" and "my_tag_2" - -``` -exec_mycollector,my_tag_1=foo,my_tag_2=baz a=5,b_c=6 -exec_mycollector,my_tag_1=bar,my_tag_2=baz a=7,b_c=8 -``` - -# Value: - -The "value" data format translates single values into Telegraf metrics. This -is done by assigning a measurement name and setting a single field ("value") -as the parsed metric. - -#### Value Configuration: - -You **must** tell Telegraf what type of metric to collect by using the -`data_type` configuration option. Available options are: - -1. integer -2. float or long -3. string -4. boolean - -**Note:** It is also recommended that you set `name_override` to a measurement -name that makes sense for your metric, otherwise it will just be set to the -name of the plugin. - -```toml -[[inputs.exec]] - ## Commands array - commands = ["cat /proc/sys/kernel/random/entropy_avail"] - - ## override the default metric name of "exec" - name_override = "entropy_available" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "value" - data_type = "integer" # required -``` - -# Graphite: - -The Graphite data format translates graphite _dot_ buckets directly into -telegraf measurement names, with a single value field, and without any tags. -By default, the separator is left as ".", but this can be changed using the -"separator" argument. For more advanced options, -Telegraf supports specifying "templates" to translate -graphite buckets into Telegraf metrics. - -Templates are of the form: - -``` -"host.mytag.mytag.measurement.measurement.field*" -``` - -Where the following keywords exist: - -1. `measurement`: specifies that this section of the graphite bucket corresponds -to the measurement name. This can be specified multiple times. -2. `field`: specifies that this section of the graphite bucket corresponds -to the field name. This can be specified multiple times. -3. `measurement*`: specifies that all remaining elements of the graphite bucket -correspond to the measurement name. -4. `field*`: specifies that all remaining elements of the graphite bucket -correspond to the field name. - -Any part of the template that is not a keyword is treated as a tag key. This -can also be specified multiple times. - -NOTE: `field*` cannot be used in conjunction with `measurement*`! - -#### Measurement & Tag Templates: - -The most basic template is to specify a single transformation to apply to all -incoming metrics. So the following template: - -```toml -templates = [ - "region.region.measurement*" -] -``` - -would result in the following Graphite -> Telegraf transformation. - -``` -us.west.cpu.load 100 -=> cpu.load,region=us.west value=100 -``` - -Multiple templates can also be specified, but these should be differentiated -using _filters_ (see below for more details) - -```toml -templates = [ - "*.*.* region.region.measurement", # <- all 3-part measurements will match this one. - "*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one. -] -``` - -#### Field Templates: - -The field keyword tells Telegraf to give the metric that field name. -So the following template: - -```toml -separator = "_" -templates = [ - "measurement.measurement.field.field.region" -] -``` - -would result in the following Graphite -> Telegraf transformation. - -``` -cpu.usage.idle.percent.eu-east 100 -=> cpu_usage,region=eu-east idle_percent=100 -``` - -The field key can also be derived from all remaining elements of the graphite -bucket by specifying `field*`: - -```toml -separator = "_" -templates = [ - "measurement.measurement.region.field*" -] -``` - -which would result in the following Graphite -> Telegraf transformation. - -``` -cpu.usage.eu-east.idle.percentage 100 -=> cpu_usage,region=eu-east idle_percentage=100 -``` - -#### Filter Templates: - -Users can also filter the template(s) to use based on the name of the bucket, -using glob matching, like so: - -```toml -templates = [ - "cpu.* measurement.measurement.region", - "mem.* measurement.measurement.host" -] -``` - -which would result in the following transformation: - -``` -cpu.load.eu-east 100 -=> cpu_load,region=eu-east value=100 - -mem.cached.localhost 256 -=> mem_cached,host=localhost value=256 -``` - -#### Adding Tags: - -Additional tags can be added to a metric that don't exist on the received metric. -You can add additional tags by specifying them after the pattern. -Tags have the same format as the line protocol. -Multiple tags are separated by commas. - -```toml -templates = [ - "measurement.measurement.field.region datacenter=1a" -] -``` - -would result in the following Graphite -> Telegraf transformation. - -``` -cpu.usage.idle.eu-east 100 -=> cpu_usage,region=eu-east,datacenter=1a idle=100 -``` - -There are many more options available, -[More details can be found here](https://github.com/influxdata/influxdb/tree/master/services/graphite#templates) - -#### Graphite Configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "graphite" - - ## This string will be used to join the matched values. - separator = "_" - - ## Each template line requires a template pattern. It can have an optional - ## filter before the template and separated by spaces. It can also have optional extra - ## tags following the template. Multiple tags should be separated by commas and no spaces - ## similar to the line protocol format. There can be only one default template. - ## Templates support below format: - ## 1. filter + template - ## 2. filter + template + extra tag(s) - ## 3. filter + template with field key - ## 4. default template - templates = [ - "*.app env.service.resource.measurement", - "stats.* .host.measurement* region=eu-east,agent=sensu", - "stats2.* .host.measurement.field", - "measurement*" - ] -``` - -# Nagios: - -There are no additional configuration options for Nagios line-protocol. The -metrics are parsed directly into Telegraf metrics. - -Note: Nagios Input Data Formats is only supported in `exec` input plugin. - -#### Nagios Configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"] - - ## measurement name suffix (for separating different commands) - name_suffix = "_mycollector" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "nagios" -``` - -# Collectd: - -The collectd format parses the collectd binary network protocol. Tags are -created for host, instance, type, and type instance. All collectd values are -added as float64 fields. - -For more information about the binary network protocol see -[here](https://collectd.org/wiki/index.php/Binary_protocol). - -You can control the cryptographic settings with parser options. Create an -authentication file and set `collectd_auth_file` to the path of the file, then -set the desired security level in `collectd_security_level`. - -Additional information including client setup can be found -[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup). - -You can also change the path to the typesdb or add additional typesdb using -`collectd_typesdb`. - -#### Collectd Configuration: - -```toml -[[inputs.socket_listener]] - service_address = "udp://127.0.0.1:25826" - name_prefix = "collectd_" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "collectd" - - ## Authentication file for cryptographic security levels - collectd_auth_file = "/etc/collectd/auth_file" - ## One of none (default), sign, or encrypt - collectd_security_level = "encrypt" - ## Path of to TypesDB specifications - collectd_typesdb = ["/usr/share/collectd/types.db"] -``` - -# Dropwizard: - -The dropwizard format can parse the JSON representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overriden by defining custom [measurement & tag templates](./DATA_FORMATS_INPUT.md#measurement--tag-templates). All field value types are supported, `string`, `number` and `boolean`. - -A typical JSON of a dropwizard metric registry: - -```json -{ - "version": "3.0.0", - "counters" : { - "measurement,tag1=green" : { - "count" : 1 - } - }, - "meters" : { - "measurement" : { - "count" : 1, - "m15_rate" : 1.0, - "m1_rate" : 1.0, - "m5_rate" : 1.0, - "mean_rate" : 1.0, - "units" : "events/second" - } - }, - "gauges" : { - "measurement" : { - "value" : 1 - } - }, - "histograms" : { - "measurement" : { - "count" : 1, - "max" : 1.0, - "mean" : 1.0, - "min" : 1.0, - "p50" : 1.0, - "p75" : 1.0, - "p95" : 1.0, - "p98" : 1.0, - "p99" : 1.0, - "p999" : 1.0, - "stddev" : 1.0 - } - }, - "timers" : { - "measurement" : { - "count" : 1, - "max" : 1.0, - "mean" : 1.0, - "min" : 1.0, - "p50" : 1.0, - "p75" : 1.0, - "p95" : 1.0, - "p98" : 1.0, - "p99" : 1.0, - "p999" : 1.0, - "stddev" : 1.0, - "m15_rate" : 1.0, - "m1_rate" : 1.0, - "m5_rate" : 1.0, - "mean_rate" : 1.0, - "duration_units" : "seconds", - "rate_units" : "calls/second" - } - } -} -``` - -Would get translated into 4 different measurements: - -``` -measurement,metric_type=counter,tag1=green count=1 -measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 -measurement,metric_type=gauge value=1 -measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0 -measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 -``` - -You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field. -Eg. to parse the following JSON document: - -```json -{ - "time" : "2017-02-22T14:33:03.662+02:00", - "tags" : { - "tag1" : "green", - "tag2" : "yellow" - }, - "metrics" : { - "counters" : { - "measurement" : { - "count" : 1 - } - }, - "meters" : {}, - "gauges" : {}, - "histograms" : {}, - "timers" : {} - } -} -``` -and translate it into: - -``` -measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000 -``` - -you simply need to use the following additional configuration properties: - -```toml -dropwizard_metric_registry_path = "metrics" -dropwizard_time_path = "time" -dropwizard_time_format = "2006-01-02T15:04:05Z07:00" -dropwizard_tags_path = "tags" -## tag paths per tag are supported too, eg. -#[inputs.yourinput.dropwizard_tag_paths] -# tag1 = "tags.tag1" -# tag2 = "tags.tag2" -``` - - -For more information about the dropwizard json format see -[here](http://metrics.dropwizard.io/3.1.0/manual/json/). - -#### Dropwizard Configuration: - -```toml -[[inputs.exec]] - ## Commands array - commands = ["curl http://localhost:8080/sys/metrics"] - timeout = "5s" - - ## Data format to consume. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md - data_format = "dropwizard" - - ## Used by the templating engine to join matched values when cardinality is > 1 - separator = "_" - - ## Each template line requires a template pattern. It can have an optional - ## filter before the template and separated by spaces. It can also have optional extra - ## tags following the template. Multiple tags should be separated by commas and no spaces - ## similar to the line protocol format. There can be only one default template. - ## Templates support below format: - ## 1. filter + template - ## 2. filter + template + extra tag(s) - ## 3. filter + template with field key - ## 4. default template - ## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>) - templates = [] - - ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) - ## to locate the metric registry within the JSON document - # dropwizard_metric_registry_path = "metrics" - - ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) - ## to locate the default time of the measurements within the JSON document - # dropwizard_time_path = "time" - # dropwizard_time_format = "2006-01-02T15:04:05Z07:00" - - ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) - ## to locate the tags map within the JSON document - # dropwizard_tags_path = "tags" - - ## You may even use tag paths per tag - # [inputs.exec.dropwizard_tag_paths] - # tag1 = "tags.tag1" - # tag2 = "tags.tag2" - -``` \ No newline at end of file +[metrics]: /docs/METRICS.md diff --git a/docs/DATA_FORMATS_OUTPUT.md b/docs/DATA_FORMATS_OUTPUT.md index f4e41c254..a8650b250 100644 --- a/docs/DATA_FORMATS_OUTPUT.md +++ b/docs/DATA_FORMATS_OUTPUT.md @@ -4,12 +4,17 @@ In addition to output specific data formats, Telegraf supports a set of standard data formats that may be selected from when configuring many output plugins. -1. [InfluxDB Line Protocol](#influx) -1. [JSON](#json) -1. [Graphite](#graphite) +1. [InfluxDB Line Protocol](/plugins/serializers/influx) +1. [Carbon2](/plugins/serializers/carbon2) +1. [Graphite](/plugins/serializers/graphite) +1. [JSON](/plugins/serializers/json) +1. [Prometheus](/plugins/serializers/prometheus) +1. [SplunkMetric](/plugins/serializers/splunkmetric) +1. [Wavefront](/plugins/serializers/wavefront) You will be able to identify the plugins with support by the presence of a `data_format` config option, for example, in the `file` output plugin: + ```toml [[outputs.file]] ## Files to write to, "stdout" is a specially handled file. @@ -21,191 +26,3 @@ You will be able to identify the plugins with support by the presence of a ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` - -## Influx - -The `influx` data format outputs metrics using -[InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/). -This is the recommended format unless another format is required for -interoperability. - -### Influx Configuration -```toml -[[outputs.file]] - ## Files to write to, "stdout" is a specially handled file. - files = ["stdout", "/tmp/metrics.out"] - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "influx" - - ## Maximum line length in bytes. Useful only for debugging. - # influx_max_line_bytes = 0 - - ## When true, fields will be output in ascending lexical order. Enabling - ## this option will result in decreased performance and is only recommended - ## when you need predictable ordering while debugging. - # influx_sort_fields = false - - ## When true, Telegraf will output unsigned integers as unsigned values, - ## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned - ## integer values. Enabling this option will result in field type errors if - ## existing data has been written. - # influx_uint_support = false -``` - -## Graphite - -The Graphite data format is translated from Telegraf Metrics using either the -template pattern or tag support method. You can select between the two -methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support -method is used, otherwise the [`template` pattern](#template-pattern) is used. - -#### Template Pattern - -The `template` option describes how Telegraf traslates metrics into _dot_ -buckets. The default template is: - -``` -template = "host.tags.measurement.field" -``` - -In the above template, we have four parts: - -1. _host_ is a tag key. This can be any tag key that is in the Telegraf -metric(s). If the key doesn't exist, it will be ignored. If it does exist, the -tag value will be filled in. -1. _tags_ is a special keyword that outputs all remaining tag values, separated -by dots and in alphabetical order (by tag key). These will be filled after all -tag keys are filled. -1. _measurement_ is a special keyword that outputs the measurement name. -1. _field_ is a special keyword that outputs the field name. - -**Example Conversion**: - -``` -cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 -=> -tars.cpu-total.us-east-1.cpu.usage_user 0.89 1455320690 -tars.cpu-total.us-east-1.cpu.usage_idle 98.09 1455320690 -``` - -Fields with string values will be skipped. Boolean fields will be converted -to 1 (true) or 0 (false). - -#### Graphite Tag Support - -When the `graphite_tag_support` option is enabled, the template pattern is not -used. Instead, tags are encoded using -[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html) -added in Graphite 1.1. The `metric_path` is a combination of the optional -`prefix` option, measurement name, and field name. - -The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`. - -**Example Conversion**: -``` -cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 -=> -cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 -cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 -``` - -### Graphite Configuration - -```toml -[[outputs.file]] - ## Files to write to, "stdout" is a specially handled file. - files = ["stdout", "/tmp/metrics.out"] - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "graphite" - - ## Prefix added to each graphite bucket - prefix = "telegraf" - ## Graphite template pattern - template = "host.tags.measurement.field" - - ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. - # graphite_tag_support = false -``` - -## JSON - -The JSON output data format output for a single metric is in the -form: -```json -{ - "fields": { - "field_1": 30, - "field_2": 4, - "field_N": 59, - "n_images": 660 - }, - "name": "docker", - "tags": { - "host": "raynor" - }, - "timestamp": 1458229140 -} -``` - -When an output plugin needs to emit multiple metrics at one time, it may use -the batch format. The use of batch format is determined by the plugin, -reference the documentation for the specific plugin. -```json -{ - "metrics": [ - { - "fields": { - "field_1": 30, - "field_2": 4, - "field_N": 59, - "n_images": 660 - }, - "name": "docker", - "tags": { - "host": "raynor" - }, - "timestamp": 1458229140 - }, - { - "fields": { - "field_1": 30, - "field_2": 4, - "field_N": 59, - "n_images": 660 - }, - "name": "docker", - "tags": { - "host": "raynor" - }, - "timestamp": 1458229140 - } - ] -} -``` - -### JSON Configuration - -```toml -[[outputs.file]] - ## Files to write to, "stdout" is a specially handled file. - files = ["stdout", "/tmp/metrics.out"] - - ## Data format to output. - ## Each data format has its own unique set of configuration options, read - ## more about them here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md - data_format = "json" - - ## The resolution to use for the metric timestamp. Must be a duration string - ## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to - ## the power of 10 less than the specified units. - json_timestamp_units = "1s" -``` diff --git a/docs/FAQ.md b/docs/FAQ.md index 1d1c490aa..8819ee657 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -10,10 +10,13 @@ docker run --name telegraf -v /etc:/hostfs/etc:ro -v /proc:/hostfs/proc:ro -v /sys:/hostfs/sys:ro - -v /var/run/utmp:/var/run/utmp:ro + -v /var:/hostfs/var:ro + -v /run:/hostfs/run:ro -e HOST_ETC=/hostfs/etc -e HOST_PROC=/hostfs/proc -e HOST_SYS=/hostfs/sys + -e HOST_VAR=/hostfs/var + -e HOST_RUN=/hostfs/run -e HOST_MOUNT_PREFIX=/hostfs telegraf ``` @@ -40,6 +43,33 @@ If running as a service add the environment variable to `/etc/default/telegraf`: GODEBUG=netdns=cgo ``` +### Q: How can I manage series cardinality? + +High [series cardinality][], when not properly managed, can cause high load on +your database. Telegraf attempts to avoid creating series with high +cardinality, but some monitoring workloads such as tracking containers are are +inherently high cardinality. These workloads can still be monitored, but care +must be taken to manage cardinality growth. + +You can use the following techniques to avoid cardinality issues: + +- Use [metric filtering][] options to exclude unneeded measurements and tags. +- Write to a database with an appropriate [retention policy][]. +- Limit series cardinality in your database using the + [max-series-per-database][] and [max-values-per-tag][] settings. +- Consider using the [Time Series Index][tsi]. +- Monitor your databases using the [show cardinality][] commands. +- Consult the [InfluxDB documentation][influx docs] for the most up-to-date techniques. + +[series cardinality]: https://docs.influxdata.com/influxdb/v1.7/concepts/glossary/#series-cardinality +[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering +[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ +[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 +[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 +[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ +[show cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality +[influx docs]: https://docs.influxdata.com/influxdb/latest/ + ### Q: When will the next version be released? The latest release date estimate can be viewed on the diff --git a/docs/INPUTS.md b/docs/INPUTS.md new file mode 100644 index 000000000..f8e906f31 --- /dev/null +++ b/docs/INPUTS.md @@ -0,0 +1,149 @@ +### Input Plugins + +This section is for developers who want to create new collection inputs. +Telegraf is entirely plugin driven. This interface allows for operators to +pick and chose what is gathered and makes it easy for developers +to create new ways of generating metrics. + +Plugin authorship is kept as simple as possible to promote people to develop +and submit new inputs. + +### Input Plugin Guidelines + +- A plugin must conform to the [telegraf.Input][] interface. +- Input Plugins should call `inputs.Add` in their `init` function to register + themselves. See below for a quick example. +- Input Plugins must be added to the + `github.com/influxdata/telegraf/plugins/inputs/all/all.go` file. +- The `SampleConfig` function should return valid toml that describes how the + plugin can be configured. This is included in `telegraf config`. Please + consult the [SampleConfig][] page for the latest style + guidelines. +- The `Description` function should say in one line what this plugin does. +- Follow the recommended [CodeStyle][]. + +Let's say you've written a plugin that emits metrics about processes on the +current host. + +### Input Plugin Example + +```go +package simple + +// simple.go + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Simple struct { + Ok bool `toml:"ok"` +} + +func (s *Simple) Description() string { + return "a demo plugin" +} + +func (s *Simple) SampleConfig() string { + return ` + ## Indicate if everything is fine + ok = true +` +} + +func (s *Simple) Init() error { + return nil +} + +func (s *Simple) Gather(acc telegraf.Accumulator) error { + if s.Ok { + acc.AddFields("state", map[string]interface{}{"value": "pretty good"}, nil) + } else { + acc.AddFields("state", map[string]interface{}{"value": "not great"}, nil) + } + + return nil +} + +func init() { + inputs.Add("simple", func() telegraf.Input { return &Simple{} }) +} +``` + +### Development + +* Run `make static` followed by `make plugin-[pluginName]` to spin up a docker + dev environment using docker-compose. +* ***[Optional]*** When developing a plugin, add a `dev` directory with a + `docker-compose.yml` and `telegraf.conf` as well as any other supporting + files, where sensible. + +### Typed Metrics + +In addition the the `AddFields` function, the accumulator also supports +functions to add typed metrics: `AddGauge`, `AddCounter`, etc. Metric types +are ignored by the InfluxDB output, but can be used for other outputs, such as +[prometheus][prom metric types]. + +### Data Formats + +Some input plugins, such as the [exec][] plugin, can accept any supported +[input data formats][]. + +In order to enable this, you must specify a `SetParser(parser parsers.Parser)` +function on the plugin object (see the exec plugin for an example), as well as +defining `parser` as a field of the object. + +You can then utilize the parser internally in your plugin, parsing data as you +see fit. Telegraf's configuration layer will take care of instantiating and +creating the `Parser` object. + +Add the following to the `SampleConfig()`: + +```toml + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +### Service Input Plugins + +This section is for developers who want to create new "service" collection +inputs. A service plugin differs from a regular plugin in that it operates a +background service while Telegraf is running. One example would be the +`statsd` plugin, which operates a statsd server. + +Service Input Plugins are substantially more complicated than a regular +plugin, as they will require threads and locks to verify data integrity. +Service Input Plugins should be avoided unless there is no way to create their +behavior with a regular plugin. + +To create a Service Input implement the [telegraf.ServiceInput][] interface. + +### Metric Tracking + +Metric Tracking provides a system to be notified when metrics have been +successfully written to their outputs or otherwise discarded. This allows +inputs to be created that function as reliable queue consumers. + +To get started with metric tracking begin by calling `WithTracking` on the +[telegraf.Accumulator][]. Add metrics using the `AddTrackingMetricGroup` +function on the returned [telegraf.TrackingAccumulator][] and store the +`TrackingID`. The `Delivered()` channel will return a type with information +about the final delivery status of the metric group. + +Check the [amqp_consumer][] for an example implementation. + +[exec]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec +[amqp_consumer]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/amqp_consumer +[prom metric types]: https://prometheus.io/docs/concepts/metric_types/ +[input data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[telegraf.Input]: https://godoc.org/github.com/influxdata/telegraf#Input +[telegraf.ServiceInput]: https://godoc.org/github.com/influxdata/telegraf#ServiceInput +[telegraf.Accumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator +[telegraf.TrackingAccumulator]: https://godoc.org/github.com/influxdata/telegraf#Accumulator diff --git a/docs/LICENSE_OF_DEPENDENCIES.md b/docs/LICENSE_OF_DEPENDENCIES.md index 2d215984b..9e19d74d7 100644 --- a/docs/LICENSE_OF_DEPENDENCIES.md +++ b/docs/LICENSE_OF_DEPENDENCIES.md @@ -3,110 +3,170 @@ When distributed in a binary form, Telegraf may contain portions of the following works: -- code.cloudfoundry.org/clock [APACHE](https://github.com/cloudfoundry/clock/blob/master/LICENSE) -- collectd.org [MIT](https://github.com/collectd/go-collectd/blob/master/LICENSE) -- github.com/aerospike/aerospike-client-go [APACHE](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) -- github.com/amir/raidman [PUBLIC DOMAIN](https://github.com/amir/raidman/blob/master/UNLICENSE) -- github.com/armon/go-metrics [MIT](https://github.com/armon/go-metrics/blob/master/LICENSE) -- github.com/aws/aws-sdk-go [APACHE](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) -- github.com/beorn7/perks [MIT](https://github.com/beorn7/perks/blob/master/LICENSE) -- github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE) -- github.com/bsm/sarama-cluster [MIT](https://github.com/bsm/sarama-cluster/blob/master/LICENSE) -- github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE) -- github.com/chuckpreslar/rcon [MIT](https://github.com/chuckpreslar/rcon#license) -- github.com/couchbase/go-couchbase [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) -- github.com/couchbase/gomemcached [MIT](https://github.com/couchbase/gomemcached/blob/master/LICENSE) -- github.com/couchbase/goutils [MIT](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) -- github.com/dancannon/gorethink [APACHE](https://github.com/dancannon/gorethink/blob/master/LICENSE) -- github.com/davecgh/go-spew [ISC](https://github.com/davecgh/go-spew/blob/master/LICENSE) -- github.com/docker/docker [APACHE](https://github.com/docker/docker/blob/master/LICENSE) -- github.com/docker/cli [APACHE](https://github.com/docker/cli/blob/master/LICENSE) -- github.com/eapache/go-resiliency [MIT](https://github.com/eapache/go-resiliency/blob/master/LICENSE) -- github.com/eapache/go-xerial-snappy [MIT](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) -- github.com/eapache/queue [MIT](https://github.com/eapache/queue/blob/master/LICENSE) -- github.com/eclipse/paho.mqtt.golang [ECLIPSE](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) -- github.com/fsnotify/fsnotify [BSD](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) -- github.com/fsouza/go-dockerclient [BSD](https://github.com/fsouza/go-dockerclient/blob/master/LICENSE) -- github.com/gobwas/glob [MIT](https://github.com/gobwas/glob/blob/master/LICENSE) -- github.com/google/go-cmp [BSD](https://github.com/google/go-cmp/blob/master/LICENSE) -- github.com/gogo/protobuf [BSD](https://github.com/gogo/protobuf/blob/master/LICENSE) -- github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE) -- github.com/golang/snappy [BSD](https://github.com/golang/snappy/blob/master/LICENSE) -- github.com/go-logfmt/logfmt [MIT](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) -- github.com/gorilla/mux [BSD](https://github.com/gorilla/mux/blob/master/LICENSE) -- github.com/go-ini/ini [APACHE](https://github.com/go-ini/ini/blob/master/LICENSE) -- github.com/go-ole/go-ole [MPL](http://mattn.mit-license.org/2013) -- github.com/go-sql-driver/mysql [MPL](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) -- github.com/hailocab/go-hostpool [MIT](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) -- github.com/hashicorp/consul [MPL](https://github.com/hashicorp/consul/blob/master/LICENSE) -- github.com/hashicorp/go-msgpack [BSD](https://github.com/hashicorp/go-msgpack/blob/master/LICENSE) -- github.com/hashicorp/raft-boltdb [MPL](https://github.com/hashicorp/raft-boltdb/blob/master/LICENSE) -- github.com/hashicorp/raft [MPL](https://github.com/hashicorp/raft/blob/master/LICENSE) -- github.com/influxdata/tail [MIT](https://github.com/influxdata/tail/blob/master/LICENSE.txt) -- github.com/influxdata/toml [MIT](https://github.com/influxdata/toml/blob/master/LICENSE) -- github.com/influxdata/go-syslog [MIT](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) -- github.com/influxdata/wlog [MIT](https://github.com/influxdata/wlog/blob/master/LICENSE) -- github.com/jackc/pgx [MIT](https://github.com/jackc/pgx/blob/master/LICENSE) -- github.com/jmespath/go-jmespath [APACHE](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) -- github.com/kardianos/osext [BSD](https://github.com/kardianos/osext/blob/master/LICENSE) -- github.com/kardianos/service [ZLIB](https://github.com/kardianos/service/blob/master/LICENSE) (License not named but matches word for word with ZLib) -- github.com/kballard/go-shellquote [MIT](https://github.com/kballard/go-shellquote/blob/master/LICENSE) -- github.com/lib/pq [MIT](https://github.com/lib/pq/blob/master/LICENSE.md) -- github.com/matttproud/golang_protobuf_extensions [APACHE](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) -- github.com/Microsoft/ApplicationInsights-Go [APACHE](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) -- github.com/Microsoft/go-winio [MIT](https://github.com/Microsoft/go-winio/blob/master/LICENSE) -- github.com/miekg/dns [BSD](https://github.com/miekg/dns/blob/master/LICENSE) -- github.com/naoina/go-stringutil [MIT](https://github.com/naoina/go-stringutil/blob/master/LICENSE) -- github.com/naoina/toml [MIT](https://github.com/naoina/toml/blob/master/LICENSE) -- github.com/nats-io/gnatsd [MIT](https://github.com/nats-io/gnatsd/blob/master/LICENSE) -- github.com/nats-io/go-nats [MIT](https://github.com/nats-io/go-nats/blob/master/LICENSE) -- github.com/nats-io/nats [MIT](https://github.com/nats-io/nats/blob/master/LICENSE) -- github.com/nats-io/nuid [MIT](https://github.com/nats-io/nuid/blob/master/LICENSE) -- github.com/nsqio/go-nsq [MIT](https://github.com/nsqio/go-nsq/blob/master/LICENSE) -- github.com/opentracing-contrib/go-observer [APACHE](https://github.com/opentracing-contrib/go-observer/blob/master/LICENSE) -- github.com/opentracing/opentracing-go [MIT](https://github.com/opentracing/opentracing-go/blob/master/LICENSE) -- github.com/openzipkin/zipkin-go-opentracing [MIT](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) -- github.com/pierrec/lz4 [BSD](https://github.com/pierrec/lz4/blob/master/LICENSE) -- github.com/pierrec/xxHash [BSD](https://github.com/pierrec/xxHash/blob/master/LICENSE) -- github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE) -- github.com/pmezard/go-difflib [BSD](https://github.com/pmezard/go-difflib/blob/master/LICENSE) -- github.com/prometheus/client_golang [APACHE](https://github.com/prometheus/client_golang/blob/master/LICENSE) -- github.com/prometheus/client_model [APACHE](https://github.com/prometheus/client_model/blob/master/LICENSE) -- github.com/prometheus/common [APACHE](https://github.com/prometheus/common/blob/master/LICENSE) -- github.com/prometheus/procfs [APACHE](https://github.com/prometheus/procfs/blob/master/LICENSE) -- github.com/rcrowley/go-metrics [BSD](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) -- github.com/samuel/go-zookeeper [BSD](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) -- github.com/satori/go.uuid [MIT](https://github.com/satori/go.uuid/blob/master/LICENSE) -- github.com/shirou/gopsutil [BSD](https://github.com/shirou/gopsutil/blob/master/LICENSE) -- github.com/shirou/w32 [BSD](https://github.com/shirou/w32/blob/master/LICENSE) -- github.com/Shopify/sarama [MIT](https://github.com/Shopify/sarama/blob/master/MIT-LICENSE) -- github.com/Sirupsen/logrus [MIT](https://github.com/Sirupsen/logrus/blob/master/LICENSE) -- github.com/StackExchange/wmi [MIT](https://github.com/StackExchange/wmi/blob/master/LICENSE) -- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md) -- github.com/soniah/gosnmp [BSD](https://github.com/soniah/gosnmp/blob/master/LICENSE) -- github.com/streadway/amqp [BSD](https://github.com/streadway/amqp/blob/master/LICENSE) -- github.com/stretchr/objx [MIT](https://github.com/stretchr/objx/blob/master/LICENSE.md) -- github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENCE.txt) -- github.com/tidwall/gjson [MIT](https://github.com/tidwall/gjson/blob/master/LICENSE) -- github.com/tidwall/match [MIT](https://github.com/tidwall/match/blob/master/LICENSE) -- github.com/mitchellh/mapstructure [MIT](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) -- github.com/multiplay/go-ts3 [BSD](https://github.com/multiplay/go-ts3/blob/master/LICENSE) -- github.com/vjeantet/grok [APACHE](https://github.com/vjeantet/grok/blob/master/LICENSE) -- github.com/wvanbergen/kafka [MIT](https://github.com/wvanbergen/kafka/blob/master/LICENSE) -- github.com/wvanbergen/kazoo-go [MIT](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) -- github.com/yuin/gopher-lua [MIT](https://github.com/yuin/gopher-lua/blob/master/LICENSE) -- github.com/zensqlmonitor/go-mssqldb [BSD](https://github.com/zensqlmonitor/go-mssqldb/blob/master/LICENSE.txt) -- golang.org/x/crypto [BSD](https://github.com/golang/crypto/blob/master/LICENSE) -- golang.org/x/net [BSD](https://go.googlesource.com/net/+/master/LICENSE) -- golang.org/x/text [BSD](https://go.googlesource.com/text/+/master/LICENSE) -- golang.org/x/sys [BSD](https://go.googlesource.com/sys/+/master/LICENSE) -- google.golang.org/grpc [APACHE](https://github.com/google/grpc-go/blob/master/LICENSE) -- google.golang.org/genproto [APACHE](https://github.com/google/go-genproto/blob/master/LICENSE) -- gopkg.in/asn1-ber.v1 [MIT](https://github.com/go-asn1-ber/asn1-ber/blob/v1.2/LICENSE) -- gopkg.in/dancannon/gorethink.v1 [APACHE](https://github.com/dancannon/gorethink/blob/v1.1.2/LICENSE) -- gopkg.in/fatih/pool.v2 [MIT](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) -- gopkg.in/ldap.v2 [MIT](https://github.com/go-ldap/ldap/blob/v2.5.0/LICENSE) -- gopkg.in/mgo.v2 [BSD](https://github.com/go-mgo/mgo/blob/v2/LICENSE) -- gopkg.in/olivere/elastic.v5 [MIT](https://github.com/olivere/elastic/blob/v5.0.38/LICENSE) -- gopkg.in/tomb.v1 [BSD](https://github.com/go-tomb/tomb/blob/v1/LICENSE) -- gopkg.in/yaml.v2 [APACHE](https://github.com/go-yaml/yaml/blob/v2/LICENSE) +- cloud.google.com/go [Apache License 2.0](https://github.com/googleapis/google-cloud-go/blob/master/LICENSE) +- code.cloudfoundry.org/clock [Apache License 2.0](https://github.com/cloudfoundry/clock/blob/master/LICENSE) +- collectd.org [MIT License](https://git.octo.it/?p=collectd.git;a=blob;f=COPYING;hb=HEAD) +- github.com/Azure/azure-amqp-common-go [MIT License](https://github.com/Azure/azure-amqp-common-go/blob/master/LICENSE) +- github.com/Azure/azure-event-hubs-go [MIT License](https://github.com/Azure/azure-event-hubs-go/blob/master/LICENSE) +- github.com/Azure/azure-pipeline-go [MIT License](https://github.com/Azure/azure-pipeline-go/blob/master/LICENSE) +- github.com/Azure/azure-sdk-for-go [Apache License 2.0](https://github.com/Azure/azure-sdk-for-go/blob/master/LICENSE) +- github.com/Azure/azure-storage-queue-go [MIT License](https://github.com/Azure/azure-storage-queue-go/blob/master/LICENSE) +- github.com/Azure/go-amqp [MIT License](https://github.com/Azure/go-amqp/blob/master/LICENSE) +- github.com/Azure/go-autorest [Apache License 2.0](https://github.com/Azure/go-autorest/blob/master/LICENSE) +- github.com/Mellanox/rdmamap [Apache License 2.0](https://github.com/Mellanox/rdmamap/blob/master/LICENSE) +- github.com/Microsoft/ApplicationInsights-Go [MIT License](https://github.com/Microsoft/ApplicationInsights-Go/blob/master/LICENSE) +- github.com/Microsoft/go-winio [MIT License](https://github.com/Microsoft/go-winio/blob/master/LICENSE) +- github.com/Shopify/sarama [MIT License](https://github.com/Shopify/sarama/blob/master/LICENSE) +- github.com/StackExchange/wmi [MIT License](https://github.com/StackExchange/wmi/blob/master/LICENSE) +- github.com/aerospike/aerospike-client-go [Apache License 2.0](https://github.com/aerospike/aerospike-client-go/blob/master/LICENSE) +- github.com/alecthomas/units [MIT License](https://github.com/alecthomas/units/blob/master/COPYING) +- github.com/amir/raidman [The Unlicense](https://github.com/amir/raidman/blob/master/UNLICENSE) +- github.com/apache/thrift [Apache License 2.0](https://github.com/apache/thrift/blob/master/LICENSE) +- github.com/aristanetworks/glog [Apache License 2.0](https://github.com/aristanetworks/glog/blob/master/LICENSE) +- github.com/aristanetworks/goarista [Apache License 2.0](https://github.com/aristanetworks/goarista/blob/master/COPYING) +- github.com/aws/aws-sdk-go [Apache License 2.0](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) +- github.com/benbjohnson/clock [MIT License](https://github.com/benbjohnson/clock/blob/master/LICENSE) +- github.com/beorn7/perks [MIT License](https://github.com/beorn7/perks/blob/master/LICENSE) +- github.com/caio/go-tdigest [MIT License](https://github.com/caio/go-tdigest/blob/master/LICENSE) +- github.com/cenkalti/backoff [MIT License](https://github.com/cenkalti/backoff/blob/master/LICENSE) +- github.com/cespare/xxhash [MIT License](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) +- github.com/cisco-ie/nx-telemetry-proto [Apache License 2.0](https://github.com/cisco-ie/nx-telemetry-proto/blob/master/LICENSE) +- github.com/couchbase/go-couchbase [MIT License](https://github.com/couchbase/go-couchbase/blob/master/LICENSE) +- github.com/couchbase/gomemcached [MIT License](https://github.com/couchbase/gomemcached/blob/master/LICENSE) +- github.com/couchbase/goutils [COUCHBASE INC. COMMUNITY EDITION LICENSE](https://github.com/couchbase/goutils/blob/master/LICENSE.md) +- github.com/davecgh/go-spew [ISC License](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/denisenkom/go-mssqldb [BSD 3-Clause "New" or "Revised" License](https://github.com/denisenkom/go-mssqldb/blob/master/LICENSE.txt) +- github.com/devigned/tab [MIT License](https://github.com/devigned/tab/blob/master/LICENSE) +- github.com/dgrijalva/jwt-go [MIT License](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) +- github.com/dimchansky/utfbom [Apache License 2.0](https://github.com/dimchansky/utfbom/blob/master/LICENSE) +- github.com/docker/distribution [Apache License 2.0](https://github.com/docker/distribution/blob/master/LICENSE) +- github.com/docker/docker [Apache License 2.0](https://github.com/docker/docker/blob/master/LICENSE) +- github.com/docker/go-connections [Apache License 2.0](https://github.com/docker/go-connections/blob/master/LICENSE) +- github.com/docker/go-units [Apache License 2.0](https://github.com/docker/go-units/blob/master/LICENSE) +- github.com/docker/libnetwork [Apache License 2.0](https://github.com/docker/libnetwork/blob/master/LICENSE) +- github.com/eapache/go-resiliency [MIT License](https://github.com/eapache/go-resiliency/blob/master/LICENSE) +- github.com/eapache/go-xerial-snappy [MIT License](https://github.com/eapache/go-xerial-snappy/blob/master/LICENSE) +- github.com/eapache/queue [MIT License](https://github.com/eapache/queue/blob/master/LICENSE) +- github.com/eclipse/paho.mqtt.golang [Eclipse Public License - v 1.0](https://github.com/eclipse/paho.mqtt.golang/blob/master/LICENSE) +- github.com/ericchiang/k8s [Apache License 2.0](https://github.com/ericchiang/k8s/blob/master/LICENSE) +- github.com/ghodss/yaml [MIT License](https://github.com/ghodss/yaml/blob/master/LICENSE) +- github.com/glinton/ping [MIT License](https://github.com/glinton/ping/blob/master/LICENSE) +- github.com/go-logfmt/logfmt [MIT License](https://github.com/go-logfmt/logfmt/blob/master/LICENSE) +- github.com/go-ole/go-ole [MIT License](https://github.com/go-ole/go-ole/blob/master/LICENSE) +- github.com/go-redis/redis [BSD 2-Clause "Simplified" License](https://github.com/go-redis/redis/blob/master/LICENSE) +- github.com/go-sql-driver/mysql [Mozilla Public License 2.0](https://github.com/go-sql-driver/mysql/blob/master/LICENSE) +- github.com/goburrow/modbus [BSD 3-Clause "New" or "Revised" License](https://github.com/goburrow/modbus/blob/master/LICENSE) +- github.com/goburrow/serial [MIT License](https://github.com/goburrow/serial/LICENSE) +- github.com/gobwas/glob [MIT License](https://github.com/gobwas/glob/blob/master/LICENSE) +- github.com/gofrs/uuid [MIT License](https://github.com/gofrs/uuid/blob/master/LICENSE) +- github.com/gogo/protobuf [BSD 3-Clause Clear License](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang/geo [Apache License 2.0](https://github.com/golang/geo/blob/master/LICENSE) +- github.com/golang/groupcache [Apache License 2.0](https://github.com/golang/groupcache/blob/master/LICENSE) +- github.com/golang/mock [Apache License 2.0](https://github.com/golang/mock/blob/master/LICENSE) +- github.com/golang/protobuf [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/protobuf/blob/master/LICENSE) +- github.com/golang/snappy [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/google/go-cmp [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-cmp/blob/master/LICENSE) +- github.com/google/go-github [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-github/blob/master/LICENSE) +- github.com/google/go-querystring [BSD 3-Clause "New" or "Revised" License](https://github.com/google/go-querystring/blob/master/LICENSE) +- github.com/googleapis/gax-go [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/gax-go/blob/master/LICENSE) +- github.com/gorilla/mux [BSD 3-Clause "New" or "Revised" License](https://github.com/gorilla/mux/blob/master/LICENSE) +- github.com/hailocab/go-hostpool [MIT License](https://github.com/hailocab/go-hostpool/blob/master/LICENSE) +- github.com/harlow/kinesis-consumer [MIT License](https://github.com/harlow/kinesis-consumer/blob/master/MIT-LICENSE) +- github.com/hashicorp/consul [Mozilla Public License 2.0](https://github.com/hashicorp/consul/blob/master/LICENSE) +- github.com/hashicorp/go-cleanhttp [Mozilla Public License 2.0](https://github.com/hashicorp/go-cleanhttp/blob/master/LICENSE) +- github.com/hashicorp/go-rootcerts [Mozilla Public License 2.0](https://github.com/hashicorp/go-rootcerts/blob/master/LICENSE) +- github.com/hashicorp/go-uuid [Mozilla Public License 2.0](https://github.com/hashicorp/go-uuid/LICENSE) +- github.com/hashicorp/serf [Mozilla Public License 2.0](https://github.com/hashicorp/serf/blob/master/LICENSE) +- github.com/influxdata/go-syslog [MIT License](https://github.com/influxdata/go-syslog/blob/develop/LICENSE) +- github.com/influxdata/tail [MIT License](https://github.com/influxdata/tail/blob/master/LICENSE.txt) +- github.com/influxdata/toml [MIT License](https://github.com/influxdata/toml/blob/master/LICENSE) +- github.com/influxdata/wlog [MIT License](https://github.com/influxdata/wlog/blob/master/LICENSE) +- github.com/jackc/pgx [MIT License](https://github.com/jackc/pgx/blob/master/LICENSE) +- github.com/jcmturner/gofork [BSD 3-Clause "New" or "Revised" License](https://github.com/jcmturner/gofork/blob/master/LICENSE) +- github.com/jmespath/go-jmespath [Apache License 2.0](https://github.com/jmespath/go-jmespath/blob/master/LICENSE) +- github.com/jpillora/backoff [MIT License](https://github.com/jpillora/backoff/blob/master/LICENSE) +- github.com/kardianos/service [zlib License](https://github.com/kardianos/service/blob/master/LICENSE) +- github.com/karrick/godirwalk [BSD 2-Clause "Simplified" License](https://github.com/karrick/godirwalk/blob/master/LICENSE) +- github.com/kballard/go-shellquote [MIT License](https://github.com/kballard/go-shellquote/blob/master/LICENSE) +- github.com/klauspost/compress [BSD 3-Clause Clear License](https://github.com/klauspost/compress/blob/master/LICENSE) +- github.com/konsorten/go-windows-terminal-sequences [MIT License](https://github.com/konsorten/go-windows-terminal-sequences/blob/master/LICENSE) +- github.com/kubernetes/apimachinery [Apache License 2.0](https://github.com/kubernetes/apimachinery/blob/master/LICENSE) +- github.com/leodido/ragel-machinery [MIT License](https://github.com/leodido/ragel-machinery/blob/develop/LICENSE) +- github.com/mailru/easyjson [MIT License](https://github.com/mailru/easyjson/blob/master/LICENSE) +- github.com/matttproud/golang_protobuf_extensions [Apache License 2.0](https://github.com/matttproud/golang_protobuf_extensions/blob/master/LICENSE) +- github.com/mdlayher/apcupsd [MIT License](https://github.com/mdlayher/apcupsd/blob/master/LICENSE.md) +- github.com/mdlayher/genetlink [MIT License](https://github.com/mdlayher/genetlink/blob/master/LICENSE.md) +- github.com/mdlayher/netlink [MIT License](https://github.com/mdlayher/netlink/blob/master/LICENSE.md) +- github.com/miekg/dns [BSD 3-Clause Clear License](https://github.com/miekg/dns/blob/master/LICENSE) +- github.com/mitchellh/go-homedir [MIT License](https://github.com/mitchellh/go-homedir/blob/master/LICENSE) +- github.com/mitchellh/mapstructure [MIT License](https://github.com/mitchellh/mapstructure/blob/master/LICENSE) +- github.com/multiplay/go-ts3 [BSD 2-Clause "Simplified" License](https://github.com/multiplay/go-ts3/blob/master/LICENSE) +- github.com/naoina/go-stringutil [MIT License](https://github.com/naoina/go-stringutil/blob/master/LICENSE) +- github.com/nats-io/jwt [Apache License 2.0](https://github.com/nats-io/jwt/blob/master/LICENSE) +- github.com/nats-io/nats-server [Apache License 2.0](https://github.com/nats-io/nats-server/blob/master/LICENSE) +- github.com/nats-io/nats.go [Apache License 2.0](https://github.com/nats-io/nats.go/blob/master/LICENSE) +- github.com/nats-io/nkeys [Apache License 2.0](https://github.com/nats-io/nkeys/blob/master/LICENSE) +- github.com/nats-io/nuid [Apache License 2.0](https://github.com/nats-io/nuid/blob/master/LICENSE) +- github.com/newrelic/newrelic-telemetry-sdk-go [Apache License 2.0](https://github.com/newrelic/newrelic-telemetry-sdk-go/blob/master/LICENSE.md) +- github.com/nsqio/go-nsq [MIT License](https://github.com/nsqio/go-nsq/blob/master/LICENSE) +- github.com/openconfig/gnmi [Apache License 2.0](https://github.com/openconfig/gnmi/blob/master/LICENSE) +- github.com/opencontainers/go-digest [Apache License 2.0](https://github.com/opencontainers/go-digest/blob/master/LICENSE) +- github.com/opencontainers/image-spec [Apache License 2.0](https://github.com/opencontainers/image-spec/blob/master/LICENSE) +- github.com/openzipkin/zipkin-go-opentracing [MIT License](https://github.com/openzipkin/zipkin-go-opentracing/blob/master/LICENSE) +- github.com/pierrec/lz4 [BSD 3-Clause "New" or "Revised" License](https://github.com/pierrec/lz4/blob/master/LICENSE) +- github.com/pkg/errors [BSD 2-Clause "Simplified" License](https://github.com/pkg/errors/blob/master/LICENSE) +- github.com/pmezard/go-difflib [BSD 3-Clause Clear License](https://github.com/pmezard/go-difflib/blob/master/LICENSE) +- github.com/prometheus/client_golang [Apache License 2.0](https://github.com/prometheus/client_golang/blob/master/LICENSE) +- github.com/prometheus/client_model [Apache License 2.0](https://github.com/prometheus/client_model/blob/master/LICENSE) +- github.com/prometheus/common [Apache License 2.0](https://github.com/prometheus/common/blob/master/LICENSE) +- github.com/prometheus/procfs [Apache License 2.0](https://github.com/prometheus/procfs/blob/master/LICENSE) +- github.com/rcrowley/go-metrics [MIT License](https://github.com/rcrowley/go-metrics/blob/master/LICENSE) +- github.com/safchain/ethtool [Apache License 2.0](https://github.com/safchain/ethtool/blob/master/LICENSE) +- github.com/samuel/go-zookeeper [BSD 3-Clause Clear License](https://github.com/samuel/go-zookeeper/blob/master/LICENSE) +- github.com/shirou/gopsutil [BSD 3-Clause Clear License](https://github.com/shirou/gopsutil/blob/master/LICENSE) +- github.com/sirupsen/logrus [MIT License](https://github.com/sirupsen/logrus/blob/master/LICENSE) +- github.com/soniah/gosnmp [BSD 2-Clause "Simplified" License](https://github.com/soniah/gosnmp/blob/master/LICENSE) +- github.com/streadway/amqp [BSD 2-Clause "Simplified" License](https://github.com/streadway/amqp/blob/master/LICENSE) +- github.com/stretchr/objx [MIT License](https://github.com/stretchr/objx/blob/master/LICENSE) +- github.com/stretchr/testify [custom -- permissive](https://github.com/stretchr/testify/blob/master/LICENSE) +- github.com/tidwall/gjson [MIT License](https://github.com/tidwall/gjson/blob/master/LICENSE) +- github.com/tidwall/match [MIT License](https://github.com/tidwall/match/blob/master/LICENSE) +- github.com/tidwall/pretty [MIT License](https://github.com/tidwall/pretty/blob/master/LICENSE) +- github.com/vishvananda/netlink [Apache License 2.0](https://github.com/vishvananda/netlink/blob/master/LICENSE) +- github.com/vishvananda/netns [Apache License 2.0](https://github.com/vishvananda/netns/blob/master/LICENSE) +- github.com/vjeantet/grok [Apache License 2.0](https://github.com/vjeantet/grok/blob/master/LICENSE) +- github.com/vmware/govmomi [Apache License 2.0](https://github.com/vmware/govmomi/blob/master/LICENSE.txt) +- github.com/wavefronthq/wavefront-sdk-go [Apache License 2.0](https://github.com/wavefrontHQ/wavefront-sdk-go/blob/master/LICENSE) +- github.com/wvanbergen/kafka [MIT License](https://github.com/wvanbergen/kafka/blob/master/LICENSE) +- github.com/wvanbergen/kazoo-go [MIT License](https://github.com/wvanbergen/kazoo-go/blob/master/MIT-LICENSE) +- github.com/yuin/gopher-lua [MIT License](https://github.com/yuin/gopher-lua/blob/master/LICENSE) +- go.opencensus.io [Apache License 2.0](https://github.com/census-instrumentation/opencensus-go/blob/master/LICENSE) +- golang.org/x/crypto [BSD 3-Clause Clear License](https://github.com/golang/crypto/blob/master/LICENSE) +- golang.org/x/net [BSD 3-Clause Clear License](https://github.com/golang/net/blob/master/LICENSE) +- golang.org/x/oauth2 [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/oauth2/blob/master/LICENSE) +- golang.org/x/sync [BSD 3-Clause "New" or "Revised" License](https://github.com/golang/sync/blob/master/LICENSE) +- golang.org/x/sys [BSD 3-Clause Clear License](https://github.com/golang/sys/blob/master/LICENSE) +- golang.org/x/text [BSD 3-Clause Clear License](https://github.com/golang/text/blob/master/LICENSE) +- golang.org/x/time [BSD 3-Clause Clear License](https://github.com/golang/time/blob/master/LICENSE) +- golang.zx2c4.com/wireguard [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) +- golang.zx2c4.com/wireguard/wgctrl [MIT License](https://github.com/WireGuard/wgctrl-go/blob/master/LICENSE.md) +- google.golang.org/api [BSD 3-Clause "New" or "Revised" License](https://github.com/googleapis/google-api-go-client/blob/master/LICENSE) +- google.golang.org/genproto [Apache License 2.0](https://github.com/google/go-genproto/blob/master/LICENSE) +- google.golang.org/grpc [Apache License 2.0](https://github.com/grpc/grpc-go/blob/master/LICENSE) +- gopkg.in/asn1-ber.v1 [MIT License](https://github.com/go-asn1-ber/asn1-ber/blob/v1.3/LICENSE) +- gopkg.in/fatih/pool.v2 [MIT License](https://github.com/fatih/pool/blob/v2.0.0/LICENSE) +- gopkg.in/fsnotify.v1 [BSD 3-Clause "New" or "Revised" License](https://github.com/fsnotify/fsnotify/blob/v1.4.7/LICENSE) +- gopkg.in/gorethink/gorethink.v3 [Apache License 2.0](https://github.com/rethinkdb/rethinkdb-go/blob/v3.0.5/LICENSE) +- gopkg.in/inf.v0 [BSD 3-Clause "New" or "Revised" License](https://github.com/go-inf/inf/blob/v0.9.1/LICENSE) +- gopkg.in/jcmturner/aescts.v1 [Apache License 2.0](https://github.com/jcmturner/aescts/blob/v1.0.1/LICENSE) +- gopkg.in/jcmturner/dnsutils.v1 [Apache License 2.0](https://github.com/jcmturner/dnsutils/blob/v1.0.1/LICENSE) +- gopkg.in/jcmturner/gokrb5.v7 [Apache License 2.0](https://github.com/jcmturner/gokrb5/tree/v7.5.0/LICENSE) +- gopkg.in/jcmturner/rpc.v1 [Apache License 2.0](https://github.com/jcmturner/rpc/blob/v1.1.0/LICENSE) +- gopkg.in/ldap.v3 [MIT License](https://github.com/go-ldap/ldap/blob/v3.1.7/LICENSE) +- gopkg.in/mgo.v2 [BSD 2-Clause "Simplified" License](https://github.com/go-mgo/mgo/blob/v2/LICENSE) +- gopkg.in/olivere/elastic.v5 [MIT License](https://github.com/olivere/elastic/blob/v5.0.76/LICENSE) +- gopkg.in/tomb.v1 [BSD 3-Clause Clear License](https://github.com/go-tomb/tomb/blob/v1/LICENSE) +- gopkg.in/yaml.v2 [Apache License 2.0](https://github.com/go-yaml/yaml/blob/v2.2.2/LICENSE) + +## telegraf used and modified code from these projects +- github.com/DataDog/datadog-agent [Apache License 2.0](https://github.com/DataDog/datadog-agent/LICENSE) diff --git a/docs/METRICS.md b/docs/METRICS.md new file mode 100644 index 000000000..f903dcad4 --- /dev/null +++ b/docs/METRICS.md @@ -0,0 +1,22 @@ +# Metrics + +Telegraf metrics are the internal representation used to model data during +processing. Metrics are closely based on InfluxDB's data model and contain +four main components: + +- **Measurement Name**: Description and namespace for the metric. +- **Tags**: Key/Value string pairs and usually used to identify the + metric. +- **Fields**: Key/Value pairs that are typed and usually contain the + metric data. +- **Timestamp**: Date and time associated with the fields. + +This metric type exists only in memory and must be converted to a concrete +representation in order to be transmitted or viewed. To achieve this we +provide several [output data formats][] sometimes referred to as +*serializers*. Our default serializer converts to [InfluxDB Line +Protocol][line protocol] which provides a high performance and one-to-one +direct mapping from Telegraf metrics. + +[output data formats]: /docs/DATA_FORMATS_OUTPUT.md +[line protocol]: /plugins/serializers/influx diff --git a/docs/OUTPUTS.md b/docs/OUTPUTS.md new file mode 100644 index 000000000..c60cd96ba --- /dev/null +++ b/docs/OUTPUTS.md @@ -0,0 +1,114 @@ +### Output Plugins + +This section is for developers who want to create a new output sink. Outputs +are created in a similar manner as collection plugins, and their interface has +similar constructs. + +### Output Plugin Guidelines + +- An output must conform to the [telegraf.Output][] interface. +- Outputs should call `outputs.Add` in their `init` function to register + themselves. See below for a quick example. +- To be available within Telegraf itself, plugins must add themselves to the + `github.com/influxdata/telegraf/plugins/outputs/all/all.go` file. +- The `SampleConfig` function should return valid toml that describes how the + plugin can be configured. This is included in `telegraf config`. Please + consult the [SampleConfig][] page for the latest style guidelines. +- The `Description` function should say in one line what this output does. +- Follow the recommended [CodeStyle][]. + +### Output Plugin Example + +```go +package simpleoutput + +// simpleoutput.go + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type Simple struct { + Ok bool `toml:"ok"` +} + +func (s *Simple) Description() string { + return "a demo output" +} + +func (s *Simple) SampleConfig() string { + return ` + ok = true +` +} + +func (s *Simple) Init() error { + return nil +} + +func (s *Simple) Connect() error { + // Make a connection to the URL here + return nil +} + +func (s *Simple) Close() error { + // Close connection to the URL here + return nil +} + +func (s *Simple) Write(metrics []telegraf.Metric) error { + for _, metric := range metrics { + // write `metric` to the output sink here + } + return nil +} + +func init() { + outputs.Add("simpleoutput", func() telegraf.Output { return &Simple{} }) +} + +``` + +## Data Formats + +Some output plugins, such as the [file][] plugin, can write in any supported +[output data formats][]. + +In order to enable this, you must specify a +`SetSerializer(serializer serializers.Serializer)` +function on the plugin object (see the file plugin for an example), as well as +defining `serializer` as a field of the object. + +You can then utilize the serializer internally in your plugin, serializing data +before it's written. Telegraf's configuration layer will take care of +instantiating and creating the `Serializer` object. + +You should also add the following to your `SampleConfig()`: + +```toml + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" +``` + +## Flushing Metrics to Outputs + +Metrics are flushed to outputs when any of the following events happen: +- `flush_interval + rand(flush_jitter)` has elapsed since start or the last flush interval +- At least `metric_batch_size` count of metrics are waiting in the buffer +- The telegraf process has received a SIGUSR1 signal + +Note that if the flush takes longer than the `agent.interval` to write the metrics +to the output, you'll see a message saying the output `did not complete within its +flush interval`. This may mean your output is not keeping up with the flow of metrics, +and you may want to look into enabling compression, reducing the size of your metrics, +or investigate other reasons why the writes might be taking longer than expected. + +[file]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/file +[output data formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[telegraf.Output]: https://godoc.org/github.com/influxdata/telegraf#Output diff --git a/docs/PROCESSORS.md b/docs/PROCESSORS.md new file mode 100644 index 000000000..6ea82fdae --- /dev/null +++ b/docs/PROCESSORS.md @@ -0,0 +1,69 @@ +### Processor Plugins + +This section is for developers who want to create a new processor plugin. + +### Processor Plugin Guidelines + +* A processor must conform to the [telegraf.Processor][] interface. +* Processors should call `processors.Add` in their `init` function to register + themselves. See below for a quick example. +* To be available within Telegraf itself, plugins must add themselves to the + `github.com/influxdata/telegraf/plugins/processors/all/all.go` file. +* The `SampleConfig` function should return valid toml that describes how the + processor can be configured. This is include in the output of `telegraf + config`. +- The `SampleConfig` function should return valid toml that describes how the + plugin can be configured. This is included in `telegraf config`. Please + consult the [SampleConfig][] page for the latest style guidelines. +* The `Description` function should say in one line what this processor does. +- Follow the recommended [CodeStyle][]. + +### Processor Plugin Example + +```go +package printer + +// printer.go + +import ( + "fmt" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Printer struct { +} + +var sampleConfig = ` +` + +func (p *Printer) SampleConfig() string { + return sampleConfig +} + +func (p *Printer) Description() string { + return "Print all metrics that pass through this filter." +} + +func (p *Printer) Init() error { + return nil +} + +func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, metric := range in { + fmt.Println(metric.String()) + } + return in +} + +func init() { + processors.Add("printer", func() telegraf.Processor { + return &Printer{} + }) +} +``` + +[SampleConfig]: https://github.com/influxdata/telegraf/wiki/SampleConfig +[CodeStyle]: https://github.com/influxdata/telegraf/wiki/CodeStyle +[telegraf.Processor]: https://godoc.org/github.com/influxdata/telegraf#Processor diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..b7b55336c --- /dev/null +++ b/docs/README.md @@ -0,0 +1,21 @@ +# Telegraf + +- Concepts + - [Metrics][metrics] + - [Input Data Formats][parsers] + - [Output Data Formats][serializers] + - [Aggregators & Processors][aggproc] +- Administration + - [Configuration][conf] + - [Profiling][profiling] + - [Windows Service][winsvc] + - [FAQ][faq] + +[conf]: /docs/CONFIGURATION.md +[metrics]: /docs/METRICS.md +[parsers]: /docs/DATA_FORMATS_INPUT.md +[serializers]: /docs/DATA_FORMATS_OUTPUT.md +[aggproc]: /docs/AGGREGATORS_AND_PROCESSORS.md +[profiling]: /docs/PROFILING.md +[winsvc]: /docs/WINDOWS_SERVICE.md +[faq]: /docs/FAQ.md diff --git a/docs/TEMPLATE_PATTERN.md b/docs/TEMPLATE_PATTERN.md new file mode 100644 index 000000000..4244369d7 --- /dev/null +++ b/docs/TEMPLATE_PATTERN.md @@ -0,0 +1,135 @@ +# Template Patterns + +Template patterns are a mini language that describes how a dot delimited +string should be mapped to and from [metrics][]. + +A template has the form: +``` +"host.mytag.mytag.measurement.measurement.field*" +``` + +Where the following keywords can be set: + +1. `measurement`: specifies that this section of the graphite bucket corresponds +to the measurement name. This can be specified multiple times. +2. `field`: specifies that this section of the graphite bucket corresponds +to the field name. This can be specified multiple times. +3. `measurement*`: specifies that all remaining elements of the graphite bucket +correspond to the measurement name. +4. `field*`: specifies that all remaining elements of the graphite bucket +correspond to the field name. + +Any part of the template that is not a keyword is treated as a tag key. This +can also be specified multiple times. + +**NOTE:** `field*` cannot be used in conjunction with `measurement*`. + +### Examples + +#### Measurement & Tag Templates + +The most basic template is to specify a single transformation to apply to all +incoming metrics. So the following template: + +```toml +templates = [ + "region.region.measurement*" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +us.west.cpu.load 100 +=> cpu.load,region=us.west value=100 +``` + +Multiple templates can also be specified, but these should be differentiated +using _filters_ (see below for more details) + +```toml +templates = [ + "*.*.* region.region.measurement", # <- all 3-part measurements will match this one. + "*.*.*.* region.region.host.measurement", # <- all 4-part measurements will match this one. +] +``` + +#### Field Templates + +The field keyword tells Telegraf to give the metric that field name. +So the following template: + +```toml +separator = "_" +templates = [ + "measurement.measurement.field.field.region" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.idle.percent.eu-east 100 +=> cpu_usage,region=eu-east idle_percent=100 +``` + +The field key can also be derived from all remaining elements of the graphite +bucket by specifying `field*`: + +```toml +separator = "_" +templates = [ + "measurement.measurement.region.field*" +] +``` + +which would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.eu-east.idle.percentage 100 +=> cpu_usage,region=eu-east idle_percentage=100 +``` + +#### Filter Templates + +Users can also filter the template(s) to use based on the name of the bucket, +using glob matching, like so: + +```toml +templates = [ + "cpu.* measurement.measurement.region", + "mem.* measurement.measurement.host" +] +``` + +which would result in the following transformation: + +``` +cpu.load.eu-east 100 +=> cpu_load,region=eu-east value=100 + +mem.cached.localhost 256 +=> mem_cached,host=localhost value=256 +``` + +#### Adding Tags + +Additional tags can be added to a metric that don't exist on the received metric. +You can add additional tags by specifying them after the pattern. +Tags have the same format as the line protocol. +Multiple tags are separated by commas. + +```toml +templates = [ + "measurement.measurement.field.region datacenter=1a" +] +``` + +would result in the following Graphite -> Telegraf transformation. + +``` +cpu.usage.idle.eu-east 100 +=> cpu_usage,region=eu-east,datacenter=1a idle=100 +``` + +[metrics]: /docs/METRICS.md diff --git a/docs/TLS.md b/docs/TLS.md new file mode 100644 index 000000000..3cd6a1025 --- /dev/null +++ b/docs/TLS.md @@ -0,0 +1,105 @@ +# Transport Layer Security + +There is an ongoing effort to standardize TLS options across plugins. When +possible, plugins will provide the standard settings described below. With the +exception of the advanced configuration available TLS settings will be +documented in the sample configuration. + +### Client Configuration + +For client TLS support we have the following options: +```toml +## Root certificates for verifying server certificates encoded in PEM format. +# tls_ca = "/etc/telegraf/ca.pem" + +## The public and private keypairs for the client encoded in PEM format. May +## contain intermediate certificates. +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +## Skip TLS verification. +# insecure_skip_verify = false +``` + +### Server Configuration + +The server TLS configuration provides support for TLS mutual authentication: + +```toml +## Set one or more allowed client CA certificate file names to +## enable mutually authenticated TLS connections. +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + +## Add service certificate and key. +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +``` + +#### Advanced Configuration + +For plugins using the standard server configuration you can also set several +advanced settings. These options are not included in the sample configuration +for the interest of brevity. + +```toml +## Define list of allowed ciphers suites. If not defined the default ciphers +## supported by Go will be used. +## ex: tls_cipher_suites = [ +## "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", +## "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +## "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", +## "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", +## "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", +## "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", +## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", +## "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", +## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", +## "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", +## "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", +## "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", +## "TLS_RSA_WITH_AES_128_GCM_SHA256", +## "TLS_RSA_WITH_AES_256_GCM_SHA384", +## "TLS_RSA_WITH_AES_128_CBC_SHA256", +## "TLS_RSA_WITH_AES_128_CBC_SHA", +## "TLS_RSA_WITH_AES_256_CBC_SHA" +## ] +# tls_cipher_suites = [] + +## Minimum TLS version that is acceptable. +# tls_min_version = "TLS10" + +## Maximum SSL/TLS version that is acceptable. +# tls_max_version = "TLS13" +``` + +Cipher suites for use with `tls_cipher_suites`: +- `TLS_RSA_WITH_RC4_128_SHA` +- `TLS_RSA_WITH_3DES_EDE_CBC_SHA` +- `TLS_RSA_WITH_AES_128_CBC_SHA` +- `TLS_RSA_WITH_AES_256_CBC_SHA` +- `TLS_RSA_WITH_AES_128_CBC_SHA256` +- `TLS_RSA_WITH_AES_128_GCM_SHA256` +- `TLS_RSA_WITH_AES_256_GCM_SHA384` +- `TLS_ECDHE_ECDSA_WITH_RC4_128_SHA` +- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA` +- `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA` +- `TLS_ECDHE_RSA_WITH_RC4_128_SHA` +- `TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA` +- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA` +- `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA` +- `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256` +- `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256` +- `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256` +- `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256` +- `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384` +- `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384` +- `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305` +- `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305` +- `TLS_AES_128_GCM_SHA256` +- `TLS_AES_256_GCM_SHA384` +- `TLS_CHACHA20_POLY1305_SHA256` + +TLS versions for use with `tls_min_version` or `tls_max_version`: +- `TLS10` +- `TLS11` +- `TLS12` +- `TLS13` diff --git a/docs/WINDOWS_SERVICE.md b/docs/WINDOWS_SERVICE.md index 886887d52..b0b6ee5ad 100644 --- a/docs/WINDOWS_SERVICE.md +++ b/docs/WINDOWS_SERVICE.md @@ -46,8 +46,26 @@ Telegraf can manage its own service through the --service flag: | `telegraf.exe --service start` | Start the telegraf service | | `telegraf.exe --service stop` | Stop the telegraf service | -Troubleshooting common error #1067 +## Install multiple services + +Running multiple instances of Telegraf is seldom needed, as you can run +multiple instances of each plugin and route metric flow using the metric +filtering options. However, if you do need to run multiple telegraf instances +on a single system, you can install the service with the `--service-name` and +`--service-display-name` flags to give the services unique names: + +``` +> C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-1 --service-display-name "Telegraf 1" +> C:\"Program Files"\Telegraf\telegraf.exe --service install --service-name telegraf-2 --service-display-name "Telegraf 2" +``` + +## Troubleshooting + +When Telegraf runs as a Windows service, Telegraf logs messages to Windows events log before configuration file with logging settings is loaded. +Check event log for an error reported by `telegraf` service in case of Telegraf service reports failure on its start: Event Viewer->Windows Logs->Application + +**Troubleshooting common error #1067** When installing as service in Windows, always double check to specify full path of the config file, otherwise windows service will fail to start - --config C:\"Program Files"\Telegraf\telegraf.conf + --config "C:\Program Files\Telegraf\telegraf.conf" diff --git a/etc/telegraf.conf b/etc/telegraf.conf index 38942adee..239f77c60 100644 --- a/etc/telegraf.conf +++ b/etc/telegraf.conf @@ -9,9 +9,9 @@ # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. # -# Environment variables can be used anywhere in this config file, simply prepend -# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), -# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) # Global tags can be specified here in key="value" format. @@ -35,10 +35,9 @@ ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 - ## For failed writes, telegraf will cache metric_buffer_limit metrics for each - ## output, and will flush this buffer on a successful write. Oldest metrics - ## are dropped first when this buffer fills. - ## This buffer only fills when writes fail to output plugin(s). + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. @@ -47,8 +46,8 @@ ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. @@ -64,13 +63,32 @@ ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" - ## Logging configuration: - ## Run telegraf with debug log messages. - debug = false - ## Run telegraf in quiet mode (error log messages only). - quiet = false - ## Specify the log file name. The empty string means to log to stderr. - logfile = "" + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 ## Override default hostname, if empty use os.Hostname() hostname = "" @@ -82,6 +100,7 @@ # OUTPUT PLUGINS # ############################################################################### + # Configuration for sending metrics to InfluxDB [[outputs.influxdb]] ## The full HTTP or UDP URL for your InfluxDB instance. @@ -93,8 +112,16 @@ # urls = ["http://127.0.0.1:8086"] ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. # database = "telegraf" + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the 'database_tag' will not be included in the written metric. + # exclude_database_tag = false + ## If true, no CREATE DATABASE queries will be sent. Set to true when using ## Telegraf with a user without permissions to create databases or when the ## database already exists. @@ -104,6 +131,13 @@ ## the default retention policy. Only takes effect when using HTTP. # retention_policy = "" + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be included in the written metric. + # exclude_retention_policy_tag = false + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". ## Only takes effect when using HTTP. # write_consistency = "any" @@ -119,7 +153,7 @@ # user_agent = "telegraf" ## UDP payload size is the maximum packet size to send. - # udp_payload = 512 + # udp_payload = "512B" ## Optional TLS Config for use on HTTP connections. # tls_ca = "/etc/telegraf/ca.pem" @@ -181,10 +215,10 @@ # # exchange_type = "topic" # # ## If true, exchange will be passively declared. -# # exchange_declare_passive = false +# # exchange_passive = false # -# ## If true, exchange will be created as a durable exchange. -# # exchange_durable = true +# ## Exchange durability can be either "transient" or "durable". +# # exchange_durability = "durable" # # ## Additional exchange arguments. # # exchange_arguments = { } @@ -240,6 +274,14 @@ # ## Recommended to set to true. # # use_batch_format = false # +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# ## +# ## Please note that when use_batch_format = false each amqp message contains only +# ## a single metric, it is recommended to use compression with batch format +# ## for best results. +# # content_encoding = "identity" +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -256,7 +298,7 @@ # # timeout = "5s" # # ## Enable additional diagnostic logging. -# # enable_diagnosic_logging = false +# # enable_diagnostic_logging = false # # ## Context Tag Sources add Application Insights context tags to a tag value. # ## @@ -267,6 +309,88 @@ # # "ai.cloud.roleInstance" = "kubernetes_pod_name" +# # Send aggregate metrics to Azure Monitor +# [[outputs.azure_monitor]] +# ## Timeout for HTTP writes. +# # timeout = "20s" +# +# ## Set the namespace prefix, defaults to "Telegraf/". +# # namespace_prefix = "Telegraf/" +# +# ## Azure Monitor doesn't have a string value type, so convert string +# ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows +# ## a maximum of 10 dimensions so Telegraf will only send the first 10 +# ## alphanumeric dimensions. +# # strings_as_dimensions = false +# +# ## Both region and resource_id must be set or be available via the +# ## Instance Metadata service on Azure Virtual Machines. +# # +# ## Azure Region to publish metrics against. +# ## ex: region = "southcentralus" +# # region = "" +# # +# ## The Azure Resource ID against which metric will be logged, e.g. +# ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" +# # resource_id = "" +# +# ## Optionally, if in Azure US Government, China or other sovereign +# ## cloud environment, set appropriate REST endpoint for receiving +# ## metrics. (Note: region may be unused in this context) +# # endpoint_url = "https://monitoring.core.usgovcloudapi.net" + + +# # Publish Telegraf metrics to a Google Cloud PubSub topic +# [[outputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub topic. +# project = "my-project" +# +# ## Required. Name of PubSub topic to publish metrics to. +# topic = "my-topic" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. If true, will send all metrics per write in one PubSub message. +# # send_batched = true +# +# ## The following publish_* parameters specifically configures batching +# ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read +# ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1. +# # publish_count_threshold = 1000 +# +# ## Optional. Send a request to PubSub (i.e. actually publish a batch) +# ## when it has this many PubSub messages. If send_batched is true, +# ## this is ignored and treated as if it were 1 +# # publish_byte_threshold = 1000000 +# +# ## Optional. Specifically configures requests made to the PubSub API. +# # publish_num_go_routines = 2 +# +# ## Optional. Specifies a timeout for requests to the PubSub API. +# # publish_timeout = "30s" +# +# ## Optional. If true, published PubSub message data will be base64-encoded. +# # base64_data = false +# +# ## Optional. PubSub attributes to add to metrics. +# # [[inputs.pubsub.attributes]] +# # my_attr = "tag_value" + + # # Configuration for AWS CloudWatch output. # [[outputs.cloudwatch]] # ## Amazon REGION @@ -287,8 +411,25 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # ## Namespace for the CloudWatch MetricDatums # namespace = "InfluxData/Telegraf" +# +# ## If you have a large amount of metrics, you should consider to send statistic +# ## values instead of raw metrics which could not only improve performance but +# ## also save AWS API cost. If enable this flag, this plugin would parse the required +# ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. +# ## You could use basicstats aggregator to calculate those fields. If not all statistic +# ## fields are available, all fields would still be sent as raw metrics. +# # write_statistics = false +# +# ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) +# # high_resolution_metrics = false # # Configuration for CrateDB to send metrics to. @@ -309,6 +450,9 @@ # ## Datadog API key # apikey = "my-secret-key" # required. # +# # The base endpoint URL can optionally be specified but it defaults to: +# #url = "https://app.datadoghq.com/api/v1/series" +# # ## Connection timeout. # # timeout = "5s" @@ -332,7 +476,7 @@ # ## Set the interval to check if the Elasticsearch nodes are available # ## Setting to "0s" will disable the health check (not recommended in production) # health_check_interval = "10s" -# ## HTTP basic authentication details (eg. when using Shield) +# ## HTTP basic authentication details # # username = "telegraf" # # password = "mypassword" # @@ -370,11 +514,43 @@ # overwrite_template = false +# # Send metrics to command as input over stdin +# [[outputs.exec]] +# ## Command to ingest metrics via stdin. +# command = ["tee", "-a", "/dev/null"] +# +# ## Timeout for command to complete. +# # timeout = "5s" +# +# ## Data format to output. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# # data_format = "influx" + + # # Send telegraf metrics to file(s) # [[outputs.file]] # ## Files to write to, "stdout" is a specially handled file. # files = ["stdout", "/tmp/metrics.out"] # +# ## Use batch serialization format instead of line based delimiting. The +# ## batch format allows for the production of non line based output formats and +# ## may more effiently encode metric groups. +# # use_batch_format = false +# +# ## The file will be rotated after the time interval specified. When set +# ## to 0 no time based rotation is performed. +# # rotation_interval = "0d" +# +# ## The logfile will be rotated when it becomes larger than the specified +# ## size. When set to 0 no size based rotation is performed. +# # rotation_max_size = "0MB" +# +# ## Maximum number of rotated archives to keep, any older logs are deleted. +# ## If set to -1, no archives are removed. +# # rotation_max_archives = 5 +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -396,6 +572,8 @@ # # ## Enable Graphite tags support # # graphite_tag_support = false +# ## Character for separating metric name and field for Graphite tags +# # graphite_separator = "." # # ## timeout in seconds for the write connection to graphite # timeout = 2 @@ -408,16 +586,61 @@ # # insecure_skip_verify = false -# # Send telegraf metrics to graylog(s) +# # Send telegraf metrics to graylog # [[outputs.graylog]] # ## UDP endpoint for your graylog instance. -# servers = ["127.0.0.1:12201", "192.168.1.1:12201"] +# servers = ["127.0.0.1:12201"] +# +# ## The field to use as the GELF short_message, if unset the static string +# ## "telegraf" will be used. +# ## example: short_message_field = "message" +# # short_message_field = "" + + +# # Configurable HTTP health check resource based on metrics +# [[outputs.health]] +# ## Address and port to listen on. +# ## ex: service_address = "http://localhost:8080" +# ## service_address = "unix:///var/run/telegraf-health.sock" +# # service_address = "http://:8080" +# +# ## The maximum duration for reading the entire request. +# # read_timeout = "5s" +# ## The maximum duration for writing the entire response. +# # write_timeout = "5s" +# +# ## Username and password to accept for HTTP basic authentication. +# # basic_username = "user1" +# # basic_password = "secret" +# +# ## Allowed CA certificates for client certificates. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## TLS server certificate and private key. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## One or more check sub-tables should be defined, it is also recommended to +# ## use metric filtering to limit the metrics that flow into this output. +# ## +# ## When using the default buffer sizes, this example will fail when the +# ## metric buffer is half full. +# ## +# ## namepass = ["internal_write"] +# ## tagpass = { output = ["influxdb"] } +# ## +# ## [[outputs.health.compares]] +# ## field = "buffer_size" +# ## lt = 5000.0 +# ## +# ## [[outputs.health.contains]] +# ## field = "buffer_size" # # A plugin that can transmit metrics over HTTP # [[outputs.http]] # ## URL is the address to send metrics to -# url = "http://127.0.0.1:8080/metric" +# url = "http://127.0.0.1:8080/telegraf" # # ## Timeout for HTTP message # # timeout = "5s" @@ -429,6 +652,12 @@ # # username = "username" # # password = "pa$$word" # +# ## OAuth2 Client Credentials Grant +# # client_id = "clientid" +# # client_secret = "secret" +# # token_url = "https://indentityprovider/oauth2/v1/token" +# # scopes = ["urn:opc:idm:__myscopes__"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -442,12 +671,69 @@ # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # # data_format = "influx" # +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" +# # ## Additional HTTP headers # # [outputs.http.headers] # # # Should be set manually to "application/json" for json data_format # # Content-Type = "text/plain; charset=utf-8" +# # Configuration for sending metrics to InfluxDB +# [[outputs.influxdb_v2]] +# ## The URLs of the InfluxDB cluster nodes. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] +# urls = ["http://127.0.0.1:9999"] +# +# ## Token for authentication. +# token = "" +# +# ## Organization is the name of the organization you wish to write to; must exist. +# organization = "" +# +# ## Destination bucket to write into. +# bucket = "" +# +# ## The value of this tag will be used to determine the bucket. If this +# ## tag is not set the 'bucket' option is used as the default. +# # bucket_tag = "" +# +# ## If true, the bucket tag will not be added to the metric. +# # exclude_bucket_tag = false +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## Enable or disable uint support for writing uints influxdb 2.0. +# # influx_uint_support = false +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Configuration for sending metrics to an Instrumental project # [[outputs.instrumental]] # ## Project API Token (required) @@ -470,6 +756,22 @@ # ## Kafka topic for producer messages # topic = "telegraf" # +# ## The value of this tag will be used as the topic. If not set the 'topic' +# ## option is used. +# # topic_tag = "" +# +# ## If true, the 'topic_tag' will be removed from to the metric. +# # exclude_topic_tag = false +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Of particular interest, lz4 compression +# ## requires at least version 0.10.0.0. +# ## ex: version = "1.1.0" +# # version = "" +# # ## Optional topic suffix configuration. # ## If the section is omitted, no suffix is used. # ## Following topic suffix methods are supported: @@ -497,15 +799,31 @@ # # keys = ["foo", "bar"] # # separator = "_" # -# ## Telegraf tag to use as a routing key -# ## ie, if this tag exists, its value will be used as the routing key +# ## The routing tag specifies a tagkey on the metric whose value is used as +# ## the message key. The message key is used to determine which partition to +# ## send the message to. This tag is prefered over the routing_key option. # routing_tag = "host" # +# ## The routing key is set as the message key and used to determine which +# ## partition to send the message to. This value is only used when no +# ## routing_tag is set or as a fallback when the tag specified in routing tag +# ## is not found. +# ## +# ## If set to "random", a random value will be generated for each message. +# ## +# ## When unset, no message key is added and each message is routed to a random +# ## partition. +# ## +# ## ex: routing_key = "random" +# ## routing_key = "telegraf" +# # routing_key = "" +# # ## CompressionCodec represents the various compression codecs recognized by # ## Kafka in messages. # ## 0 : No compression # ## 1 : Gzip compression # ## 2 : Snappy compression +# ## 3 : LZ4 compression # # compression_codec = 0 # # ## RequiredAcks is used in Produce Requests to tell the broker how many @@ -528,7 +846,12 @@ # ## until the next flush. # # max_retry = 3 # +# ## The maximum permitted size of a message. Should be set equal to or +# ## smaller than the broker's 'message.max.bytes'. +# # max_message_bytes = 1000000 +# # ## Optional TLS Config +# # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" @@ -539,6 +862,9 @@ # # sasl_username = "kafka" # # sasl_password = "secret" # +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -566,6 +892,12 @@ # #profile = "" # #shared_credential_file = "" # +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# # ## Kinesis StreamName must exist prior to starting telegraf. # streamname = "StreamName" # ## DEPRECATED: PartitionKey as used for sharding data. @@ -590,10 +922,11 @@ # # method = "measurement" # # # ## Use the value of a tag for all writes, if the tag is not set the empty -# ## string will be used: +# ## default option will be used. When no default, defaults to "telegraf" # # [outputs.kinesis.partition] # # method = "tag" # # key = "host" +# # default = "mykey" # # # ## Data format to output. @@ -661,6 +994,10 @@ # ## metrics are written one metric per MQTT message. # # batch = false # +# ## When true, metric will have RETAIN flag set, making broker cache entries until someone +# ## actually reads it +# # retain = false +# # ## Data format to output. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -672,12 +1009,20 @@ # [[outputs.nats]] # ## URLs of NATS servers # servers = ["nats://localhost:4222"] +# # ## Optional credentials # # username = "" # # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# # ## NATS subject for producer messages # subject = "telegraf" # +# ## Use Transport Layer Security +# # secure = false +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -721,11 +1066,11 @@ # # ## Number of data points to send to OpenTSDB in Http requests. # ## Not used with telnet API. -# httpBatchSize = 50 +# http_batch_size = 50 # # ## URI Path for Http requests to OpenTSDB. # ## Used in cases where OpenTSDB is located behind a reverse proxy. -# httpPath = "/api/put" +# http_path = "/api/put" # # ## Debug true - Prints OpenTSDB communication # debug = false @@ -737,26 +1082,48 @@ # # Configuration for the Prometheus client to spawn # [[outputs.prometheus_client]] # ## Address to listen on -# # listen = ":9273" +# listen = ":9273" # -# ## Use TLS -# #tls_cert = "/etc/ssl/telegraf.crt" -# #tls_key = "/etc/ssl/telegraf.key" +# ## Metric version controls the mapping from Telegraf metrics into +# ## Prometheus format. When using the prometheus input, use the same value in +# ## both plugins to ensure metrics are round-tripped without modification. +# ## +# ## example: metric_version = 1; deprecated in 1.13 +# ## metric_version = 2; recommended version +# # metric_version = 1 # -# ## Use http basic authentication -# #basic_username = "Foo" -# #basic_password = "Bar" +# ## Use HTTP Basic Authentication. +# # basic_username = "Foo" +# # basic_password = "Bar" # -# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration +# ## If set, the IP Ranges which are allowed to access metrics. +# ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] +# # ip_range = [] +# +# ## Path to publish the metrics on. +# # path = "/metrics" +# +# ## Expiration interval for each metric. 0 == no expiration # # expiration_interval = "60s" # # ## Collectors to enable, valid entries are "gocollector" and "process". # ## If unset, both are enabled. -# collectors_exclude = ["gocollector", "process"] +# # collectors_exclude = ["gocollector", "process"] # -# # Send string metrics as Prometheus labels. -# # Unless set to false all string metrics will be sent as labels. -# string_as_label = true +# ## Send string metrics as Prometheus labels. +# ## Unless set to false all string metrics will be sent as labels. +# # string_as_label = true +# +# ## If set, enable TLS with the given certificate. +# # tls_cert = "/etc/ssl/telegraf.crt" +# # tls_key = "/etc/ssl/telegraf.key" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Export metric collection time. +# # export_timestamp = false # # Configuration for the Riemann server to send metrics to @@ -837,51 +1204,197 @@ # # data_format = "influx" +# # Configuration for Google Cloud Stackdriver to send metrics to +# [[outputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## The namespace for the metric descriptor +# namespace = "telegraf" +# +# ## Custom resource type +# # resource_type = "generic_node" +# +# ## Additonal resource labels +# # [outputs.stackdriver.resource_labels] +# # node_id = "$HOSTNAME" +# # namespace = "myapp" +# # location = "eu-north0" + + +# # Configuration for Syslog server to send metrics to +# [[outputs.syslog]] +# ## URL to connect to +# ## ex: address = "tcp://127.0.0.1:8094" +# ## ex: address = "tcp4://127.0.0.1:8094" +# ## ex: address = "tcp6://127.0.0.1:8094" +# ## ex: address = "tcp6://[2001:db8::1]:8094" +# ## ex: address = "udp://127.0.0.1:8094" +# ## ex: address = "udp4://127.0.0.1:8094" +# ## ex: address = "udp6://127.0.0.1:8094" +# address = "tcp://127.0.0.1:8094" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" +# +# ## The framing technique with which it is expected that messages are +# ## transported (default = "octet-counting"). Whether the messages come +# ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must +# ## be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" +# +# ## SD-PARAMs settings +# ## Syslog messages can contain key/value pairs within zero or more +# ## structured data sections. For each unrecognised metric tag/field a +# ## SD-PARAMS is created. +# ## +# ## Example: +# ## [[outputs.syslog]] +# ## sdparam_separator = "_" +# ## default_sdid = "default@32473" +# ## sdids = ["foo@123", "bar@456"] +# ## +# ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 +# ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] +# +# ## SD-PARAMs separator between the sdid and tag/field key (default = "_") +# # sdparam_separator = "_" +# +# ## Default sdid used for tags/fields that don't contain a prefix defined in +# ## the explict sdids setting below If no default is specified, no SD-PARAMs +# ## will be used for unrecognised field. +# # default_sdid = "default@32473" +# +# ## List of explicit prefixes to extract from tag/field keys and use as the +# ## SDID, if they match (see above example for more details): +# # sdids = ["foo@123", "bar@456"] +# +# ## Default severity value. Severity and Facility are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field +# ## with key "severity_code" is defined. If unset, 5 (notice) is the default +# # default_severity_code = 5 +# +# ## Default facility value. Facility and Severity are used to calculate the +# ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with +# ## key "facility_code" is defined. If unset, 1 (user-level) is the default +# # default_facility_code = 1 +# +# ## Default APP-NAME value (RFC5424#section-6.2.5) +# ## Used when no metric tag with key "appname" is defined. +# ## If unset, "Telegraf" is the default +# # default_appname = "Telegraf" + + +# # Write metrics to Warp 10 +# [[outputs.warp10]] +# # Prefix to add to the measurement. +# prefix = "telegraf." +# +# # URL of the Warp 10 server +# warp_url = "http://localhost:8080" +# +# # Write token to access your app on warp 10 +# token = "Token" +# +# # Warp 10 query timeout +# # timeout = "15s" +# +# ## Print Warp 10 error body +# # print_error_body = false +# +# ## Max string error size +# # max_string_error_size = 511 +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Configuration for Wavefront server to send metrics to # [[outputs.wavefront]] -# ## DNS name of the wavefront proxy server -# host = "wavefront.example.com" +# ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy +# ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 +# url = "https://metrics.wavefront.com" # -# ## Port that the Wavefront proxy server listens on -# port = 2878 +# ## Authentication Token for Wavefront. Only required if using Direct Ingestion +# #token = "DUMMY_TOKEN" +# +# ## DNS name of the wavefront proxy server. Do not use if url is specified +# #host = "wavefront.example.com" +# +# ## Port that the Wavefront proxy server listens on. Do not use if url is specified +# #port = 2878 # # ## prefix for metrics keys # #prefix = "my.specific.prefix." # -# ## whether to use "value" for name of simple fields +# ## whether to use "value" for name of simple fields. default is false # #simple_fields = false # -# ## character to use between metric and field name. defaults to . (dot) +# ## character to use between metric and field name. default is . (dot) # #metric_separator = "." # -# ## Convert metric name paths to use metricSeperator character -# ## When true (default) will convert all _ (underscore) chartacters in final metric name +# ## Convert metric name paths to use metricSeparator character +# ## When true will convert all _ (underscore) characters in final metric name. default is true # #convert_paths = true # +# ## Use Strict rules to sanitize metric and tag names from invalid characters +# ## When enabled forward slash (/) and comma (,) will be accpeted +# #use_strict = false +# # ## Use Regex to sanitize metric and tag names from invalid characters -# ## Regex is more thorough, but significantly slower +# ## Regex is more thorough, but significantly slower. default is false # #use_regex = false # # ## point tags to use as the source name for Wavefront (if none found, host will be used) -# #source_override = ["hostname", "agent_host", "node_host"] +# #source_override = ["hostname", "address", "agent_host", "node_host"] # -# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true +# ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true # #convert_bool = true # # ## Define a mapping, namespaced by metric prefix, from string values to numeric values -# ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for -# ## any metrics beginning with "elasticsearch" +# ## deprecated in 1.9; use the enum processor plugin # #[[outputs.wavefront.string_to_number.elasticsearch]] # # green = 1.0 # # yellow = 0.5 # # red = 0.0 - ############################################################################### # PROCESSOR PLUGINS # ############################################################################### + +# # Clone metrics and apply modifications. +# [[processors.clone]] +# ## All modifications on inputs and aggregators can be overridden: +# # name_override = "new_name" +# # name_prefix = "new_name_prefix" +# # name_suffix = "new_name_suffix" +# +# ## Tags to be added (all values must be strings) +# # [processors.clone.tags] +# # additional_tag = "tag_value" + + # # Convert values to another metric value type # [[processors.converter]] # ## Tags to convert @@ -890,6 +1403,7 @@ # ## select the keys to convert. The array may contain globs. # ## = [...] # [processors.converter.tags] +# measurement = [] # string = [] # integer = [] # unsigned = [] @@ -902,6 +1416,7 @@ # ## select the keys to convert. The array may contain globs. # ## = [...] # [processors.converter.fields] +# measurement = [] # tag = [] # string = [] # integer = [] @@ -910,6 +1425,55 @@ # float = [] +# # Dates measurements, tags, and fields that pass through this filter. +# [[processors.date]] +# ## New tag to create +# tag_key = "month" +# +# ## Date format string, must be a representation of the Go "reference time" +# ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". +# date_format = "Jan" +# +# ## Offset duration added to the date string when writing the new tag. +# # date_offset = "0s" +# +# ## Timezone to use when creating the tag. This can be set to one of +# ## "UTC", "Local", or to a location name in the IANA Time Zone database. +# ## example: timezone = "America/Los_Angeles" +# # timezone = "UTC" + + +# # Filter metrics with repeating field values +# [[processors.dedup]] +# ## Maximum time to suppress output +# dedup_interval = "600s" + + +# # Map enum values according to given table. +# [[processors.enum]] +# [[processors.enum.mapping]] +# ## Name of the field to map +# field = "status" +# +# ## Name of the tag to map +# # tag = "status" +# +# ## Destination tag or field to be used for the mapped value. By default the +# ## source tag or field is used, overwriting the original value. +# dest = "status_code" +# +# ## Default value to be used for all values not contained in the mapping +# ## table. When unset, the unmodified value for the field will be used if no +# ## match is found. +# # default = 0 +# +# ## Table of mappings +# [processors.enum.mapping.value_mappings] +# green = 1 +# amber = 2 +# red = 3 + + # # Apply metric modifications using override semantics. # [[processors.override]] # ## All modifications on inputs and aggregators can be overridden: @@ -922,6 +1486,33 @@ # # additional_tag = "tag_value" +# # Parse a value in a specified field/tag(s) and add the result in a new metric +# [[processors.parser]] +# ## The name of the fields whose value will be parsed. +# parse_fields = [] +# +# ## If true, incoming metrics are not emitted. +# drop_original = false +# +# ## If set to override, emitted metrics will be merged by overriding the +# ## original metric using the newly parsed metrics. +# merge = "override" +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Rotate a single valued metric into a multi field metric +# [[processors.pivot]] +# ## Tag to use for naming the new field. +# tag_key = "name" +# ## Field to use as the value of the new field. +# value_key = "value" + + # # Print all metrics that pass through this filter. # [[processors.printer]] @@ -934,10 +1525,12 @@ # # key = "resp_code" # # ## Regular expression to match on a tag value # # pattern = "^(\\d)\\d\\d$" -# # ## Pattern for constructing a new value (${1} represents first subgroup) +# # ## Matches of the pattern will be replaced with this string. Use ${1} +# # ## notation to use the text of the first submatch. # # replacement = "${1}xx" # # # [[processors.regex.fields]] +# # ## Field to change # # key = "request" # # ## All the power of the Go regular expressions available here # # ## For example, named subgroups @@ -956,6 +1549,99 @@ # # result_key = "search_category" +# # Rename measurements, tags, and fields that pass through this filter. +# [[processors.rename]] + + +# # Add the S2 Cell ID as a tag based on latitude and longitude fields +# [[processors.s2geo]] +# ## The name of the lat and lon fields containing WGS-84 latitude and +# ## longitude in decimal degrees. +# # lat_field = "lat" +# # lon_field = "lon" +# +# ## New tag to create +# # tag_key = "s2_cell_id" +# +# ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) +# # cell_level = 9 + + +# # Perform string processing on tags, fields, and measurements +# [[processors.strings]] +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Convert a field value to titlecase +# # [[processors.strings.titlecase]] +# # field = "status" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" +# +# ## Replace all non-overlapping instances of old with new +# # [[processors.strings.replace]] +# # measurement = "*" +# # old = ":" +# # new = "_" +# +# ## Trims strings based on width +# # [[processors.strings.left]] +# # field = "message" +# # width = 10 +# +# ## Decode a base64 encoded utf-8 string +# # [[processors.strings.base64decode]] +# # field = "message" + + +# # Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit. +# [[processors.tag_limit]] +# ## Maximum number of tags to preserve +# limit = 10 +# +# ## List of tags to preferentially preserve +# keep = ["foo", "bar", "baz"] + + +# # Uses a Go template to create a new tag +# [[processors.template]] +# ## Tag to set with the output of the template. +# tag = "topic" +# +# ## Go template used to create the tag value. In order to ease TOML +# ## escaping requirements, you may wish to use single quotes around the +# ## template string. +# template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' + + # # Print all metrics that pass through this filter. # [[processors.topk]] # ## How many seconds between aggregations @@ -1006,19 +1692,42 @@ # # add_aggregate_fields = [] +# # Rotate multi field metric into several single field metrics +# [[processors.unpivot]] +# ## Tag to use for the name. +# tag_key = "name" +# ## Field to use for the name of the value. +# value_key = "value" + ############################################################################### # AGGREGATOR PLUGINS # ############################################################################### + # # Keep the aggregate basicstats of each metric passing through. # [[aggregators.basicstats]] -# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Configures which basic stats to push as fields +# # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] + + +# # Report the final metric of a series +# [[aggregators.final]] # ## The period on which to flush & clear the aggregator. # period = "30s" # ## If true, the original metric will be dropped by the # ## aggregator and will not get sent to the output plugins. # drop_original = false +# +# ## The time that a series is not updated until considering it final. +# series_timeout = "5m" # # Create aggregate histograms. @@ -1030,16 +1739,24 @@ # ## aggregator and will not get sent to the output plugins. # drop_original = false # +# ## If true, the histogram will be reset on flush instead +# ## of accumulating the results. +# reset = false +# +# ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. +# ## Defaults to true. +# cumulative = true +# # ## Example config that aggregates all fields of the metric. # # [[aggregators.histogram.config]] -# # ## The set of buckets. +# # ## Right borders of buckets (with +Inf implicitly added). # # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] # # ## The name of metric. # # measurement_name = "cpu" # # ## Example config that aggregates only specific fields of the metric. # # [[aggregators.histogram.config]] -# # ## The set of buckets. +# # ## Right borders of buckets (with +Inf implicitly added). # # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] # # ## The name of metric. # # measurement_name = "diskio" @@ -1047,6 +1764,13 @@ # # fields = ["io_time", "read_time", "write_time"] +# # Merge metrics into multifield metrics by series key +# [[aggregators.merge]] +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = true + + # # Keep the aggregate min/max of each metric passing through. # [[aggregators.minmax]] # ## General Aggregator Arguments: @@ -1057,11 +1781,23 @@ # drop_original = false +# # Count the occurrence of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + ############################################################################### # INPUT PLUGINS # ############################################################################### + # Read metrics about cpu usage [[inputs.cpu]] ## Whether to report per-cpu stats or not @@ -1081,7 +1817,7 @@ # mount_points = ["/"] ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] # Read metrics about disk IO by device @@ -1098,6 +1834,8 @@ ## Currently only Linux is supported via udev properties. You can view ## available properties for a device by running: ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] # ## Using the same metadata source as device_tags, you can also customize the @@ -1133,7 +1871,36 @@ # Read metrics about system load & uptime [[inputs.system]] - # no configuration + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] + + +# # Gather ActiveMQ metrics +# [[inputs.activemq]] +# ## ActiveMQ WebConsole URL +# url = "http://127.0.0.1:8161" +# +# ## Required ActiveMQ Endpoint +# ## deprecated in 1.11; use the url option +# # server = "127.0.0.1" +# # port = 8161 +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Required ActiveMQ webadmin root path +# # webadmin = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Read stats from aerospike server(s) @@ -1177,6 +1944,16 @@ # # insecure_skip_verify = false +# # Monitor APC UPSes connected to apcupsd +# [[inputs.apcupsd]] +# # A list of running apcupsd server to connect to. +# # If not provided will default to tcp://127.0.0.1:3551 +# servers = ["tcp://127.0.0.1:3551"] +# +# ## Timeout for dialing server. +# timeout = "5s" + + # # Gather metrics from Apache Aurora schedulers # [[inputs.aurora]] # ## Schedulers are the base addresses of your Aurora Schedulers @@ -1203,6 +1980,18 @@ # # insecure_skip_verify = false +# # Gather Azure Storage Queue metrics +# [[inputs.azure_storage_queue]] +# ## Required Azure Storage Account name +# account_name = "mystorageaccount" +# +# ## Required Azure Storage Account access key +# account_key = "storageaccountaccesskey" +# +# ## Set to false to disable peeking age of oldest message (executes faster) +# # peek_oldest_message_age = true + + # # Read metrics of bcache from stats_total and dirty_data # [[inputs.bcache]] # ## Bcache sets path @@ -1215,6 +2004,25 @@ # bcacheDevs = ["bcache0"] +# # Collects Beanstalkd server and tubes stats +# [[inputs.beanstalkd]] +# ## Server to collect data from +# server = "localhost:11300" +# +# ## List of tubes to gather stats about. +# ## If no tubes specified then data gathered for each tube on server reported by list-tubes command +# tubes = ["notifications"] + + +# # Read BIND nameserver XML statistics +# [[inputs.bind]] +# ## An array of BIND XML statistics URI to gather stats. +# ## Default is "http://localhost:8053/xml/v3". +# # urls = ["http://localhost:8053/xml/v3"] +# # gather_memory_contexts = false +# # gather_views = false + + # # Collect bond interface status, slaves statuses and failures count # [[inputs.bond]] # ## Sets 'proc' directory path @@ -1339,12 +2147,18 @@ # ## 4) environment variables # ## 5) shared credentials file # ## 6) EC2 Instance Profile -# #access_key = "" -# #secret_key = "" -# #token = "" -# #role_arn = "" -# #profile = "" -# #shared_credential_file = "" +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" # # # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # # metrics are made available to the 1 minute period. Some are collected at @@ -1364,25 +2178,38 @@ # interval = "5m" # # ## Configure the TTL for the internal cache of metrics. -# ## Defaults to 1 hr if not specified -# #cache_ttl = "10m" +# # cache_ttl = "1h" # # ## Metric Statistic Namespace (required) # namespace = "AWS/ELB" # # ## Maximum requests per second. Note that the global default AWS rate limit is -# ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a -# ## maximum of 400. Optional - default value is 200. +# ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 50. # ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html -# ratelimit = 200 +# # ratelimit = 25 # -# ## Metrics to Pull (optional) +# ## Timeout for http requests made by the cloudwatch client. +# # timeout = "5s" +# +# ## Namespace-wide statistic filters. These allow fewer queries to be made to +# ## cloudwatch. +# # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # statistic_exclude = [] +# +# ## Metrics to Pull # ## Defaults to all Metrics in Namespace if nothing is provided # ## Refreshes Namespace available metrics every 1h # #[[inputs.cloudwatch.metrics]] # # names = ["Latency", "RequestCount"] # # -# # ## Dimension filters for Metric (optional) +# # ## Statistic filters for Metric. These allow for retrieving specific +# # ## statistics for an individual metric. +# # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] +# # # statistic_exclude = [] +# # +# # ## Dimension filters for Metric. All dimensions defined for the metric names +# # ## must be specified in order to retrieve the metric statistics. # # [[inputs.cloudwatch.metrics.dimensions]] # # name = "LoadBalancerName" # # value = "p-example" @@ -1407,7 +2234,7 @@ # # Gather health check statuses from services registered in Consul # [[inputs.consul]] # ## Consul server address -# # address = "localhost" +# # address = "localhost:8500" # # ## URI scheme for the Consul server, one of "http", "https" # # scheme = "http" @@ -1419,8 +2246,8 @@ # # username = "" # # password = "" # -# ## Data centre to query the health checks from -# # datacentre = "" +# ## Data center to query the health checks from +# # datacenter = "" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" @@ -1452,8 +2279,12 @@ # # Read CouchDB Stats from one or more servers # [[inputs.couchdb]] # ## Works with CouchDB stats endpoints out of the box -# ## Multiple HOSTs from which to read CouchDB stats: +# ## Multiple Hosts from which to read CouchDB stats: # hosts = ["http://localhost:8086/_stats"] +# +# ## Use HTTP Basic Authentication. +# # basic_username = "telegraf" +# # basic_password = "p@ssw0rd" # # Input plugin for DC/OS metrics @@ -1550,6 +2381,9 @@ # ## Only collect metrics for these containers, collect all if empty # container_names = [] # +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# # ## Containers to include and exclude. Globs accepted. # ## Note that an empty array for both will include all containers # container_name_include = [] @@ -1557,6 +2391,8 @@ # # ## Container states to include and exclude. Globs accepted. # ## When empty only containers in the "running" state will be captured. +# ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] +# ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] # # container_state_include = [] # # container_state_exclude = [] # @@ -1566,8 +2402,10 @@ # ## Whether to report for each container per-device blkio (8:0, 8:1...) and # ## network (eth0, eth1, ...) stats or not # perdevice = true +# # ## Whether to report for each container total blkio and network stats or not # total = false +# # ## Which environment variables should we use as a tag # ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] # @@ -1592,13 +2430,41 @@ # ## # ## If no servers are specified, then localhost is used as the host. # servers = ["localhost:24242"] +# # ## Type is one of "user", "domain", "ip", or "global" # type = "global" +# # ## Wildcard matches like "*.com". An empty string "" is same as "*" # ## If type = "ip" filters should be # filters = [""] +# # Read metrics about docker containers from Fargate/ECS v2 meta endpoints. +# [[inputs.ecs]] +# ## ECS metadata url +# # endpoint_url = "http://169.254.170.2" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "RUNNING" state will be captured. +# ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", +# ## "RESOURCES_PROVISIONED", "STOPPED". +# # container_status_include = [] +# # container_status_exclude = [] +# +# ## ecs labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# ecs_label_include = [ "com.amazonaws.ecs.*" ] +# ecs_label_exclude = [] +# +# ## Timeout for queries. +# # timeout = "5s" + + # # Read stats from one or more Elasticsearch servers or clusters # [[inputs.elasticsearch]] # ## specify a list of one or more Elasticsearch servers @@ -1623,15 +2489,27 @@ # ## - cluster # # cluster_health_level = "indices" # -# ## Set cluster_stats to true when you want to also obtain cluster stats from the -# ## Master node. +# ## Set cluster_stats to true when you want to also obtain cluster stats. # cluster_stats = false # +# ## Only gather cluster_stats from the master node. To work this require local = true +# cluster_stats_only_from_master = true +# +# ## Indices to collect; can be one or more indices names or _all +# indices_include = ["_all"] +# +# ## One of "shards", "cluster", "indices" +# indices_level = "shards" +# # ## node_stats is a list of sub-stats that you want to have gathered. Valid options # ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", # ## "breaker". Per default, all stats are gathered. # # node_stats = ["jvm", "http"] # +# ## HTTP Basic Authentication username and password. +# # username = "" +# # password = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -1640,6 +2518,15 @@ # # insecure_skip_verify = false +# # Returns ethtool statistics for given interfaces +# [[inputs.ethtool]] +# ## List of interfaces to pull metrics for +# # interface_include = ["eth0"] +# +# ## List of interfaces to ignore when pulling metrics. +# # interface_exclude = ["eth1"] + + # # Read metrics from one or more commands that can output to stdout # [[inputs.exec]] # ## Commands array @@ -1682,6 +2569,65 @@ # # timeout = "5s" +# # Reload and gather from file[s] on telegraf's interval. +# [[inputs.file]] +# ## Files to parse each interval. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only read the apache log file +# files = ["/var/log/apache/access.log"] +# +# ## The dataformat to be read from files +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Name a tag containing the name of the file the data was parsed from. Leave empty +# ## to disable. +# # file_tag = "" + + +# # Count files in a directory +# [[inputs.filecount]] +# ## Directory to gather stats about. +# ## deprecated in 1.9; use the directories option +# # directory = "/var/cache/apt/archives" +# +# ## Directories to gather stats about. +# ## This accept standard unit glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/** -> recursively find all directories in /var/log and count files in each directories +# ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories +# ## /var/log -> count all files in /var/log and all of its subdirectories +# directories = ["/var/cache/apt/archives"] +# +# ## Only count files that match the name pattern. Defaults to "*". +# name = "*.deb" +# +# ## Count files in subdirectories. Defaults to true. +# recursive = false +# +# ## Only count regular files. Defaults to true. +# regular_only = true +# +# ## Follow all symlinks while walking the directory tree. Defaults to false. +# follow_symlinks = false +# +# ## Only count files that are at least this size. If size is +# ## a negative number, only count files that are smaller than the +# ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... +# ## Without quotes and units, interpreted as size in bytes. +# size = "0B" +# +# ## Only count files that have not been touched for at least this +# ## duration. If mtime is negative, only count files that have been +# ## touched in this duration. Defaults to "0s". +# mtime = "0s" + + # # Read stats about given file(s) # [[inputs.filestat]] # ## Files to gather stats about. @@ -1694,10 +2640,23 @@ # ## See https://github.com/gobwas/glob for more examples # ## # files = ["/var/log/**.log"] +# # ## If true, read the entire file and calculate an md5 checksum. # md5 = false +# # Read real time temps from fireboard.io servers +# [[inputs.fireboard]] +# ## Specify auth token for your account +# auth_token = "invalidAuthToken" +# ## You can override the fireboard server URL if necessary +# # url = https://fireboard.io/api/v1/devices.json +# ## You can set a different http_timeout if you need to +# ## You should set a string using an number and time indicator +# ## for example "12s" for 12 seconds. +# # http_timeout = "4s" + + # # Read metrics exposed by fluentd in_monitor plugin # [[inputs.fluentd]] # ## This plugin reads information exposed by fluentd (using /api/plugins.json endpoint). @@ -1714,6 +2673,24 @@ # ] +# # Gather repository information from GitHub hosted repositories. +# [[inputs.github]] +# ## List of repositories to monitor. +# repositories = [ +# "influxdata/telegraf", +# "influxdata/influxdb" +# ] +# +# ## Github API access token. Unauthenticated requests are limited to 60 per hour. +# # access_token = "" +# +# ## Github API enterprise url. Github Enterprise accounts must specify their base url. +# # enterprise_base_url = "" +# +# ## Timeout for HTTP requests. +# # http_timeout = "5s" + + # # Read flattened metrics from one or more GrayLog HTTP endpoints # [[inputs.graylog]] # ## API endpoint, currently supported API: @@ -1761,6 +2738,10 @@ # ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats # servers = ["http://myhaproxy.com:1936/haproxy?stats"] # +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# # ## You can also use local socket with standard wildcard globbing. # ## Server address not starting with 'http' will be treated as a possible # ## socket, so both examples below are valid. @@ -1809,8 +2790,12 @@ # # username = "username" # # password = "pa$$word" # -# ## Tag all metrics with the url -# # tag_url = true +# ## HTTP entity-body to send with POST/PUT requests. +# # body = "" +# +# ## HTTP Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "identity" # # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" @@ -1822,6 +2807,9 @@ # ## Amount of time allowed to complete the HTTP request # # timeout = "5s" # +# ## List of success status codes +# # success_status_codes = [200] +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -1831,9 +2819,13 @@ # # HTTP/HTTPS request given an address a method and a timeout # [[inputs.http_response]] +# ## Deprecated in 1.12, use 'urls' # ## Server address (default http://localhost) # # address = "http://localhost" # +# ## List of urls to query. +# # urls = ["http://localhost"] +# # ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) # # http_proxy = "http://localhost:8888" # @@ -1866,6 +2858,9 @@ # ## HTTP Request Headers (all values must be strings) # # [inputs.http_response.headers] # # Host = "github.com" +# +# ## Interface to use when dialing an address +# # interface = "eth0" # # Read flattened metrics from one or more JSON HTTP endpoints @@ -1916,6 +2911,34 @@ # # apiVersion = "v1" +# # Gather Icinga2 status +# [[inputs.icinga2]] +# ## Required Icinga2 server address +# # server = "https://localhost:5665" +# +# ## Required Icinga2 object type ("services" or "hosts") +# # object_type = "services" +# +# ## Credentials for basic HTTP authentication +# # username = "admin" +# # password = "admin" +# +# ## Maximum time to receive response. +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true + + +# # Gets counters from all InfiniBand cards and ports installed +# [[inputs.infiniband]] +# # no configuration + + # # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints # [[inputs.influxdb]] # ## Works with InfluxDB debug endpoints out of the box, @@ -1928,6 +2951,10 @@ # "http://localhost:8086/debug/vars" # ] # +# ## Username and password to send using HTTP Basic Authentication. +# # username = "" +# # password = "" +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -1947,9 +2974,17 @@ # # This plugin gathers interrupts data from /proc/interrupts and /proc/softirqs. # [[inputs.interrupts]] +# ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is +# ## stored as a field. +# ## +# ## The default is false for backwards compatibility, and will be changed to +# ## true in a future version. It is recommended to set to true on new +# ## deployments. +# # cpu_as_tag = false +# # ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. # # [inputs.interrupts.tagdrop] -# # irq = [ "NET_RX", "TASKLET" ] +# # irq = [ "NET_RX", "TASKLET" ] # # Read metrics from the bare metal servers via IPMI @@ -1957,6 +2992,11 @@ # ## optionally specify the path to the ipmitool executable # # path = "/usr/bin/ipmitool" # ## +# ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. +# ## Sudo must be configured to allow the telegraf user to run ipmitool +# ## without a password. +# # use_sudo = false +# ## # ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR # # privilege = "ADMINISTRATOR" # ## @@ -1975,6 +3015,9 @@ # # ## Timeout for the ipmitool command to complete # timeout = "20s" +# +# ## Schema Version: (Optional, defaults to version 1) +# metric_version = 2 # # Gather packets and bytes counters from Linux ipsets @@ -1996,8 +3039,10 @@ # ## iptables can be restricted to only list command "iptables -nvL". # use_sudo = false # ## Setting 'use_lock' to true runs iptables with the "-w" option. -# ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") +# ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") # use_lock = false +# ## Define an alternate executable, such as "ip6tables". Default is "iptables". +# # binary = "ip6tables" # ## defines the table to monitor: # table = "filter" # ## defines the chains to monitor. @@ -2006,6 +3051,55 @@ # chains = [ "INPUT" ] +# # Collect virtual and real server stats from Linux IPVS +# [[inputs.ipvs]] +# # no configuration + + +# # Read jobs and cluster metrics from Jenkins instances +# [[inputs.jenkins]] +# ## The Jenkins URL in the format "schema://host:port" +# url = "http://my-jenkins-instance:8080" +# # username = "admin" +# # password = "admin" +# +# ## Set response_timeout +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional Max Job Build Age filter +# ## Default 1 hour, ignore builds older than max_build_age +# # max_build_age = "1h" +# +# ## Optional Sub Job Depth filter +# ## Jenkins can have unlimited layer of sub jobs +# ## This config will limit the layers of pulling, default value 0 means +# ## unlimited pulling until no more sub jobs +# # max_subjob_depth = 0 +# +# ## Optional Sub Job Per Layer +# ## In workflow-multibranch-plugin, each branch will be created as a sub job. +# ## This config will limit to call only the lasted branches in each layer, +# ## empty will use default value 10 +# # max_subjob_per_layer = 10 +# +# ## Jobs to exclude from gathering +# # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] +# +# ## Nodes to exclude from gathering +# # node_exclude = [ "node1", "node2" ] +# +# ## Worker pool for jenkins plugin only +# ## Empty this field will use default value 5 +# # max_connections = 5 + + # # Read JMX metrics through Jolokia # [[inputs.jolokia]] # # DEPRECATED: the jolokia plugin has been deprecated in favor of the @@ -2156,13 +3250,78 @@ # # no configuration +# # Read status information from one or more Kibana servers +# [[inputs.kibana]] +# ## Specify a list of one or more Kibana servers +# servers = ["http://localhost:5601"] +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from the Kubernetes api +# [[inputs.kube_inventory]] +# ## URL for the Kubernetes API +# url = "https://127.0.0.1" +# +# ## Namespace to use. Set to "" to use all namespaces. +# # namespace = "default" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Set response_timeout (default 5 seconds) +# # response_timeout = "5s" +# +# ## Optional Resources to exclude from gathering +# ## Leave them with blank with try to gather everything available. +# ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", +# ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" +# # resource_exclude = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional Resources to include when gathering +# ## Overrides resource_exclude if both set. +# # resource_include = [ "deployments", "nodes", "statefulsets" ] +# +# ## Optional TLS Config +# # tls_ca = "/path/to/cafile" +# # tls_cert = "/path/to/certfile" +# # tls_key = "/path/to/keyfile" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics from the kubernetes kubelet api # [[inputs.kubernetes]] # ## URL for the kubelet -# url = "http://1.1.1.1:10255" +# url = "http://127.0.0.1:10255" # -# ## Use bearer token for authorization -# # bearer_token = /path/to/bearer/token +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# ## If both of these are empty, we'll use the default serviceaccount: +# ## at: /run/secrets/kubernetes.io/serviceaccount/token +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## Pod labels to be added as tags. An empty array for both include and +# ## exclude will include all labels. +# # label_include = [] +# # label_exclude = ["*"] # # ## Set response_timeout (default 5 seconds) # # response_timeout = "5s" @@ -2187,6 +3346,39 @@ # # no configuration +# # Read metrics exposed by Logstash +# [[inputs.logstash]] +# ## The URL of the exposed Logstash API endpoint. +# url = "http://127.0.0.1:9600" +# +# ## Use Logstash 5 single pipeline API, set to true when monitoring +# ## Logstash 5. +# # single_pipeline = false +# +# ## Enable optional collection components. Can contain +# ## "pipelines", "process", and "jvm". +# # collect = ["pipelines", "process", "jvm"] +# +# ## Timeout for HTTP requests. +# # timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials. +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Use TLS but skip chain & host verification. +# # insecure_skip_verify = false +# +# ## Optional HTTP headers. +# # [inputs.logstash.headers] +# # "X-Special-Header" = "Special-Value" + + # # Read metrics from local Lustre service on OST, MDS # [[inputs.lustre2]] # ## An array of /proc globs to search for Lustre stats @@ -2215,6 +3407,26 @@ # # campaign_id = "" +# # Retrives information on a specific host in a MarkLogic Cluster +# [[inputs.marklogic]] +# ## Base URL of the MarkLogic HTTP Server. +# url = "http://localhost:8002" +# +# ## List of specific hostnames to retrieve information. At least (1) required. +# # hosts = ["hostname1", "hostname2"] +# +# ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges +# # username = "myuser" +# # password = "mypassword" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Read metrics from one or many mcrouter servers # [[inputs.mcrouter]] # ## An array of address to gather stats about. Specify an ip or hostname @@ -2237,8 +3449,10 @@ # [[inputs.mesos]] # ## Timeout, in ms. # timeout = 100 +# # ## A list of Mesos masters. # masters = ["http://localhost:5050"] +# # ## Master metrics groups to be collected, by default, all enabled. # master_collections = [ # "resources", @@ -2246,13 +3460,17 @@ # "system", # "agents", # "frameworks", +# "framework_offers", # "tasks", # "messages", # "evqueue", # "registrar", +# "allocator", # ] +# # ## A list of Mesos slaves, default is [] # # slaves = [] +# # ## Slave metrics groups to be collected, by default, all enabled. # # slave_collections = [ # # "resources", @@ -2271,14 +3489,93 @@ # # insecure_skip_verify = false -# # Collects scores from a minecraft server's scoreboard using the RCON protocol +# # Collects scores from a Minecraft server's scoreboard using the RCON protocol # [[inputs.minecraft]] -# ## server address for minecraft +# ## Address of the Minecraft server. # # server = "localhost" -# ## port for RCON +# +# ## Server RCON Port. # # port = "25575" -# ## password RCON for mincraft server -# # password = "" +# +# ## Server RCON Password. +# password = "" +# +# ## Uncomment to remove deprecated metric components. +# # tagdrop = ["server"] + + +# # Retrieve data from MODBUS slave devices +# [[inputs.modbus]] +# ## Connection Configuration +# ## +# ## The plugin supports connections to PLCs via MODBUS/TCP or +# ## via serial line communication in binary (RTU) or readable (ASCII) encoding +# ## +# ## Device name +# name = "Device" +# +# ## Slave ID - addresses a MODBUS device on the bus +# ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] +# slave_id = 1 +# +# ## Timeout for each request +# timeout = "1s" +# +# # TCP - connect via Modbus/TCP +# controller = "tcp://localhost:502" +# +# # Serial (RS485; RS232) +# #controller = "file:///dev/ttyUSB0" +# #baud_rate = 9600 +# #data_bits = 8 +# #parity = "N" +# #stop_bits = 1 +# #transmission_mode = "RTU" +# +# +# ## Measurements +# ## +# +# ## Digital Variables, Discrete Inputs and Coils +# ## name - the variable name +# ## address - variable address +# +# discrete_inputs = [ +# { name = "start", address = [0]}, +# { name = "stop", address = [1]}, +# { name = "reset", address = [2]}, +# { name = "emergency_stop", address = [3]}, +# ] +# coils = [ +# { name = "motor1_run", address = [0]}, +# { name = "motor1_jog", address = [1]}, +# { name = "motor1_stop", address = [2]}, +# ] +# +# ## Analog Variables, Input Registers and Holding Registers +# ## name - the variable name +# ## byte_order - the ordering of bytes +# ## |---AB, ABCD - Big Endian +# ## |---BA, DCBA - Little Endian +# ## |---BADC - Mid-Big Endian +# ## |---CDAB - Mid-Little Endian +# ## data_type - UINT16, INT16, INT32, UINT32, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) +# ## scale - the final numeric variable representation +# ## address - variable address +# +# holding_registers = [ +# { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, +# { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, +# { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, +# { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, +# { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, +# { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, +# ] +# input_registers = [ +# { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, +# { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, +# { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, +# ] # # Read metrics from one or many MongoDB servers @@ -2293,6 +3590,13 @@ # ## When true, collect per database stats # # gather_perdb_stats = false # +# ## When true, collect per collection stats +# # gather_col_stats = false +# +# ## List of db where collections stats are collected +# ## If empty, all db are concerned +# # col_stats_dbs = ["local"] +# # ## Optional TLS Config # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" @@ -2301,6 +3605,51 @@ # # insecure_skip_verify = false +# # Read metrics and status information about processes managed by Monit +# [[inputs.monit]] +# ## Monit HTTPD address +# address = "http://127.0.0.1:2812" +# +# ## Username and Password for Monit +# # username = "" +# # password = "" +# +# ## Amount of time allowed to complete the HTTP request +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Aggregates the contents of multiple files into a single point +# [[inputs.multifile]] +# ## Base directory where telegraf will look for files. +# ## Omit this option to use absolute paths. +# base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" +# +# ## If true, Telegraf discard all data when a single file can't be read. +# ## Else, Telegraf omits the field generated from this file. +# # fail_early = true +# +# ## Files to parse each interval. +# [[inputs.multifile.file]] +# file = "in_pressure_input" +# dest = "pressure" +# conversion = "float" +# [[inputs.multifile.file]] +# file = "in_temp_input" +# dest = "temperature" +# conversion = "float(3)" +# [[inputs.multifile.file]] +# file = "in_humidityrelative_input" +# dest = "humidityrelative" +# conversion = "float(3)" + + # # Read metrics from one or many mysql servers # [[inputs.mysql]] # ## specify servers via a url matching: @@ -2326,55 +3675,59 @@ # ## <1.6: metric_version = 1 (or unset) # metric_version = 2 # -# ## the limits for metrics form perf_events_statements -# perf_events_statements_digest_text_limit = 120 -# perf_events_statements_limit = 250 -# perf_events_statements_time_limit = 86400 -# # # ## if the list is empty, then metrics are gathered from all databasee tables -# table_schema_databases = [] -# # +# # table_schema_databases = [] +# # ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list -# gather_table_schema = false -# # +# # gather_table_schema = false +# # ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST -# gather_process_list = true -# # -# ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS -# gather_user_statistics = true -# # +# # gather_process_list = false +# +# ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS +# # gather_user_statistics = false +# # ## gather auto_increment columns and max values from information schema -# gather_info_schema_auto_inc = true -# # +# # gather_info_schema_auto_inc = false +# # ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS -# gather_innodb_metrics = true -# # +# # gather_innodb_metrics = false +# # ## gather metrics from SHOW SLAVE STATUS command output -# gather_slave_status = true -# # +# # gather_slave_status = false +# # ## gather metrics from SHOW BINARY LOGS command output -# gather_binary_logs = false -# # +# # gather_binary_logs = false +# +# ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES +# # gather_global_variables = true +# # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE -# gather_table_io_waits = false -# # +# # gather_table_io_waits = false +# # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS -# gather_table_lock_waits = false -# # +# # gather_table_lock_waits = false +# # ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE -# gather_index_io_waits = false -# # +# # gather_index_io_waits = false +# # ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS -# gather_event_waits = false -# # +# # gather_event_waits = false +# # ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME -# gather_file_events_stats = false -# # +# # gather_file_events_stats = false +# # ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST -# gather_perf_events_statements = false -# # +# # gather_perf_events_statements = false +# +# ## the limits for metrics form perf_events_statements +# # perf_events_statements_digest_text_limit = 120 +# # perf_events_statements_limit = 250 +# # perf_events_statements_time_limit = 86400 +# # ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) -# interval_slow = "30m" +# ## example: interval_slow = "30m" +# # interval_slow = "" # # ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) # # tls_ca = "/etc/telegraf/ca.pem" @@ -2393,6 +3746,21 @@ # # response_timeout = "5s" +# # Neptune Apex data collector +# [[inputs.neptune_apex]] +# ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. +# ## Measurements will be logged under "apex". +# +# ## The base URL of the local Apex(es). If you specify more than one server, they will +# ## be differentiated by the "source" tag. +# servers = [ +# "http://apex.local", +# ] +# +# ## The response_timeout specifies how long to wait for a reply from the Apex. +# #response_timeout = "5s" + + # # Read metrics about network interface usage # [[inputs.net]] # ## By default, telegraf gathers stats from any up interface (excluding loopback) @@ -2432,7 +3800,7 @@ # # expect = "ssh" # # ## Uncomment to remove deprecated fields -# # fieldexclude = ["result_type", "string_found"] +# # fielddrop = ["result_type", "string_found"] # # Read TCP metrics such as established, time wait and sockets counts. @@ -2463,12 +3831,91 @@ # # # HTTP response timeout (default: 5s) # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx Plus Api documentation +# [[inputs.nginx_plus_api]] +# ## An array of API URI to gather stats. +# urls = ["http://localhost/api"] +# +# # Nginx API version, default: 3 +# # api_version = 3 +# +# # HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module) +# [[inputs.nginx_upstream_check]] +# ## An URL where Nginx Upstream check module is enabled +# ## It should be set to return a JSON formatted response +# url = "http://127.0.0.1/status?format=json" +# +# ## HTTP method +# # method = "GET" +# +# ## Optional HTTP headers +# # headers = {"X-Special-Header" = "Special-Value"} +# +# ## Override HTTP "Host" header +# # host_header = "check.example.com" +# +# ## Timeout for HTTP requests +# timeout = "5s" +# +# ## Optional HTTP Basic Auth credentials +# # username = "username" +# # password = "pa$$word" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read Nginx virtual host traffic status module information (nginx-module-vts) +# [[inputs.nginx_vts]] +# ## An array of ngx_http_status_module or status URI to gather stats. +# urls = ["http://localhost/status"] +# +# ## HTTP response timeout (default: 5s) +# response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Read NSQ topic and channel statistics. # [[inputs.nsq]] # ## An array of NSQD HTTP API endpoints -# endpoints = ["http://localhost:4151"] +# endpoints = ["http://localhost:4151"] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Collect kernel snmp counters and network interface statistics @@ -2491,11 +3938,11 @@ # # Pulls statistics from nvidia GPUs attached to the host # [[inputs.nvidia_smi]] -# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath -# # bin_path = /usr/bin/nvidia-smi +# ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath +# # bin_path = "/usr/bin/nvidia-smi" # -# ## Optional: timeout for GPU polling -# # timeout = 5s +# ## Optional: timeout for GPU polling +# # timeout = "5s" # # OpenLDAP cn=Monitor plugin @@ -2506,7 +3953,7 @@ # # ldaps, starttls, or no encryption. default is an empty string, disabling all encryption. # # note that port will likely need to be changed to 636 for ldaps # # valid options: "" | "starttls" | "ldaps" -# ssl = "" +# tls = "" # # # skip peer certificate verification. Default is false. # insecure_skip_verify = false @@ -2523,7 +3970,19 @@ # reverse_metric_names = true -# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver +# # Get standard NTP query metrics from OpenNTPD. +# [[inputs.openntpd]] +# ## Run ntpctl binary with sudo. +# # use_sudo = false +# +# ## Location of the ntpctl binary. +# # binary = "/usr/sbin/ntpctl" +# +# ## Maximum time the ntpctl binary is allowed to run. +# # timeout = "5ms" + + +# # A plugin to collect stats from Opensmtpd - a validating, recursive, and caching DNS resolver # [[inputs.opensmtpd]] # ## If running as a restricted user you can prepend sudo for additional access: # #use_sudo = false @@ -2535,6 +3994,38 @@ # timeout = 1000 +# # Read current weather and forecasts data from openweathermap.org +# [[inputs.openweathermap]] +# ## OpenWeatherMap API key. +# app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" +# +# ## City ID's to collect weather data from. +# city_id = ["5391959"] +# +# ## Language of the description field. Can be one of "ar", "bg", +# ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", +# ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", +# ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" +# # lang = "en" +# +# ## APIs to fetch; can contain "weather" or "forecast". +# fetch = ["weather", "forecast"] +# +# ## OpenWeatherMap base URL +# # base_url = "https://api.openweathermap.org/" +# +# ## Timeout for HTTP response. +# # response_timeout = "5s" +# +# ## Preferred unit system for temperature and wind speed. Can be one of +# ## "metric", "imperial", or "standard". +# # units = "metric" +# +# ## Query interval; OpenWeatherMap updates their weather data every 10 +# ## minutes. +# interval = "10m" + + # # Read metrics of passenger using passenger-status # [[inputs.passenger]] # ## Path of passenger-status. @@ -2576,29 +4067,65 @@ # ## "fcgi://10.0.0.12:9000/status" # ## "cgi://10.0.10.12:9001/status" # ## -# ## Example of multiple gathering from local socket and remove host +# ## Example of multiple gathering from local socket and remote host # ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] # urls = ["http://localhost/status"] +# +# ## Duration allowed to complete HTTP requests. +# # timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # Ping given url(s) and return statistics # [[inputs.ping]] -# ## NOTE: this plugin forks the ping command. You may need to set capabilities -# ## via setcap cap_net_raw+p /bin/ping -# # -# ## List of urls to ping -# urls = ["www.google.com"] # required -# ## number of pings to send per collection (ping -c ) +# ## Hosts to send ping packets to. +# urls = ["example.org"] +# +# ## Method used for sending pings, can be either "exec" or "native". When set +# ## to "exec" the systems ping command will be executed. When set to "native" +# ## the plugin will send pings directly. +# ## +# ## While the default is "exec" for backwards compatibility, new deployments +# ## are encouraged to use the "native" method for improved compatibility and +# ## performance. +# # method = "exec" +# +# ## Number of ping packets to send per interval. Corresponds to the "-c" +# ## option of the ping command. # # count = 1 -# ## interval, in s, at which to ping. 0 == default (ping -i ) +# +# ## Time to wait between sending ping packets in seconds. Operates like the +# ## "-i" option of the ping command. # # ping_interval = 1.0 -# ## per-ping timeout, in s. 0 == no timeout (ping -W ) +# +# ## If set, the time to wait for a ping response in seconds. Operates like +# ## the "-W" option of the ping command. # # timeout = 1.0 -# ## total-ping deadline, in s. 0 == no deadline (ping -w ) +# +# ## If set, the total ping deadline, in seconds. Operates like the -w option +# ## of the ping command. # # deadline = 10 -# ## interface or source address to send ping from (ping -I ) -# ## on Darwin and Freebsd only source address possible: (ping -S ) +# +# ## Interface or source address to send ping from. Operates like the -I or -S +# ## option of the ping command. # # interface = "" +# +# ## Specify the ping executable binary. +# # binary = "ping" +# +# ## Arguments for ping command. When arguments is not empty, the command from +# ## the binary option will be used and other options (ping_interval, timeout, +# ## etc) will be ignored. +# # arguments = ["-c", "3"] +# +# ## Use only IPv6 addresses when resolving a hostname. +# # ipv6 = false # # Measure postfix queue statistics @@ -2615,6 +4142,18 @@ # unix_sockets = ["/var/run/pdns.controlsocket"] +# # Read metrics from one or many PowerDNS Recursor servers +# [[inputs.powerdns_recursor]] +# ## Path to the Recursor control socket. +# unix_sockets = ["/var/run/pdns_recursor.controlsocket"] +# +# ## Directory to create receive socket. This default is likely not writable, +# ## please reference the full plugin documentation for a recommended setup. +# # socket_dir = "/var/run/" +# ## Socket permissions for the receive socket. +# # socket_mode = "0666" + + # # Monitor process cpu and memory usage # [[inputs.procstat]] # ## PID file to monitor process @@ -2630,6 +4169,9 @@ # ## CGroup name or path # # cgroup = "systemd/system.slice/nginx.service" # +# ## Windows service name +# # win_service = "" +# # ## override for process_name # ## This is optional; default is sourced from /proc//status # # process_name = "bar" @@ -2637,6 +4179,9 @@ # ## Field name prefix # # prefix = "" # +# ## When true add the full cmdline as a tag. +# # cmdline_tag = false +# # ## Add PID as a tag instead of a field; useful to differentiate between # ## processes whose tags are otherwise the same. Can create a large number # ## of series, use judiciously. @@ -2649,28 +4194,6 @@ # # pid_finder = "pgrep" -# # Read metrics from one or many prometheus clients -# [[inputs.prometheus]] -# ## An array of urls to scrape metrics from. -# urls = ["http://localhost:9100/metrics"] -# -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] -# -# ## Use bearer token for authorization -# # bearer_token = /path/to/bearer/token -# -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# # response_timeout = "3s" -# -# ## Optional TLS Config -# # tls_ca = /path/to/cafile -# # tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - - # # Reads last_run_summary.yaml file and converts to measurments # [[inputs.puppetagent]] # ## Location of puppet last run summary file @@ -2720,6 +4243,15 @@ # ## Note that an empty array for both will include all queues # queue_name_include = [] # queue_name_exclude = [] +# +# ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. +# ## If neither are specified, metrics for all federation upstreams are gathered. +# ## Federation link metrics will only be gathered for queues and exchanges +# ## whose non-federation metrics will be collected (e.g a queue excluded +# ## by the 'queue_name_exclude' option will also be excluded from federation). +# ## Globs accepted. +# # federation_upstream_include = ["dataCentre-*"] +# # federation_upstream_exclude = [] # # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) @@ -2740,6 +4272,16 @@ # ## If no servers are specified, then localhost is used as the host. # ## If no port is specified, 6379 is used # servers = ["tcp://localhost:6379"] +# +# ## specify server password +# # password = "s#cr@t%" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = true # # Read metrics from one or many RethinkDB servers @@ -2800,13 +4342,13 @@ # [[inputs.smart]] # ## Optionally specify the path to the smartctl executable # # path = "/usr/bin/smartctl" -# # +# # ## On most platforms smartctl requires root access. # ## Setting 'use_sudo' to true will make use of sudo to run smartctl. # ## Sudo must be configured to to allow the telegraf user to run smartctl -# ## with out password. +# ## without a password. # # use_sudo = false -# # +# # ## Skip checking disks in this power mode. Defaults to # ## "standby" to not wake up disks that have stoped rotating. # ## See --nocheck in the man pages for smartctl. @@ -2814,79 +4356,66 @@ # ## power mode and might require changing this value to # ## "never" depending on your disks. # # nocheck = "standby" -# # -# ## Gather detailed metrics for each SMART Attribute. -# ## Defaults to "false" -# ## +# +# ## Gather all returned S.M.A.R.T. attribute metrics and the detailed +# ## information from each drive into the 'smart_attribute' measurement. # # attributes = false -# # +# # ## Optionally specify devices to exclude from reporting. # # excludes = [ "/dev/pass6" ] -# # +# # ## Optionally specify devices and device type, if unset # ## a scan (smartctl --scan) for S.M.A.R.T. devices will # ## done and all found will be included except for the # ## excluded in excludes. # # devices = [ "/dev/ada0 -d atacam" ] +# +# ## Timeout for the smartctl command to complete. +# # timeout = "30s" # # Retrieves SNMP values from remote agents # [[inputs.snmp]] -# agents = [ "127.0.0.1:161" ] -# ## Timeout for each SNMP query. -# timeout = "5s" -# ## Number of retries to attempt within timeout. -# retries = 3 -# ## SNMP version, values can be 1, 2, or 3 -# version = 2 +# ## Agent addresses to retrieve values from. +# ## example: agents = ["udp://127.0.0.1:161"] +# ## agents = ["tcp://127.0.0.1:161"] +# agents = ["udp://127.0.0.1:161"] +# +# ## Timeout for each request. +# # timeout = "5s" +# +# ## SNMP version; can be 1, 2, or 3. +# # version = 2 # # ## SNMP community string. -# community = "public" +# # community = "public" # -# ## The GETBULK max-repetitions parameter -# max_repetitions = 10 +# ## Number of retries to attempt. +# # retries = 3 # -# ## SNMPv3 auth parameters -# #sec_name = "myuser" -# #auth_protocol = "md5" # Values: "MD5", "SHA", "" -# #auth_password = "pass" -# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" -# #context_name = "" -# #priv_protocol = "" # Values: "DES", "AES", "" -# #priv_password = "" +# ## The GETBULK max-repetitions parameter. +# # max_repetitions = 10 # -# ## measurement name -# name = "system" -# [[inputs.snmp.field]] -# name = "hostname" -# oid = ".1.0.0.1.1" -# [[inputs.snmp.field]] -# name = "uptime" -# oid = ".1.0.0.1.2" -# [[inputs.snmp.field]] -# name = "load" -# oid = ".1.0.0.1.3" -# [[inputs.snmp.field]] -# oid = "HOST-RESOURCES-MIB::hrMemorySize" +# ## SNMPv3 authentication and encryption options. +# ## +# ## Security Name. +# # sec_name = "myuser" +# ## Authentication protocol; one of "MD5", "SHA", or "". +# # auth_protocol = "MD5" +# ## Authentication password. +# # auth_password = "pass" +# ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". +# # sec_level = "authNoPriv" +# ## Context Name. +# # context_name = "" +# ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". +# # priv_protocol = "" +# ## Privacy password used for encrypted messages. +# # priv_password = "" # -# [[inputs.snmp.table]] -# ## measurement name -# name = "remote_servers" -# inherit_tags = [ "hostname" ] -# [[inputs.snmp.table.field]] -# name = "server" -# oid = ".1.0.0.0.1.0" -# is_tag = true -# [[inputs.snmp.table.field]] -# name = "connections" -# oid = ".1.0.0.0.1.1" -# [[inputs.snmp.table.field]] -# name = "latency" -# oid = ".1.0.0.0.1.2" -# -# [[inputs.snmp.table]] -# ## auto populate table's fields using the MIB -# oid = "HOST-RESOURCES-MIB::hrNetworkTable" +# ## Add fields and tables defining the variables you wish to collect. This +# ## example collects the system uptime and interface variables. Reference the +# ## full plugin documentation for configuration details. # # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. @@ -3000,6 +4529,10 @@ # # ## specify a list of one or more Solr cores (default - all) # # cores = ["main"] +# +# ## Optional HTTP Basic Auth Credentials +# # username = "username" +# # password = "pa$$word" # # Read metrics from Microsoft SQL Server @@ -3009,7 +4542,8 @@ # ## By default, the host is localhost, listening on default port, TCP 1433. # ## for Windows, the user is the currently running AD user (SSO). # ## See https://github.com/denisenkom/go-mssqldb for detailed connection -# ## parameters. +# ## parameters, in particular, tls connections can be created like so: +# ## "encrypt=true;certificate=;hostNameInCertificate=" # # servers = [ # # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # # ] @@ -3022,8 +4556,7 @@ # ## If you are using AzureDB, setting this to true will gather resource utilization metrics # # azuredb = false # -# ## If you would like to exclude some of the metrics queries, list them here -# ## Possible choices: +# ## Possible queries: # ## - PerformanceCounters # ## - WaitStatsCategorized # ## - DatabaseIO @@ -3034,7 +4567,101 @@ # ## - MemoryClerk # ## - VolumeSpace # ## - PerformanceMetrics -# # exclude_query = [ 'DatabaseIO' ] +# ## - Schedulers +# ## - AzureDBResourceStats +# ## - AzureDBResourceGovernance +# ## - SqlRequests +# ## - ServerProperties +# ## A list of queries to include. If not specified, all the above listed queries are used. +# # include_query = [] +# +# ## A list of queries to explicitly ignore. +# exclude_query = [ 'Schedulers' , 'SqlRequests'] + + +# # Gather timeseries from Google Cloud Platform v3 monitoring API +# [[inputs.stackdriver]] +# ## GCP Project +# project = "erudite-bloom-151019" +# +# ## Include timeseries that start with the given metric type. +# metric_type_prefix_include = [ +# "compute.googleapis.com/", +# ] +# +# ## Exclude timeseries that start with the given metric type. +# # metric_type_prefix_exclude = [] +# +# ## Many metrics are updated once per minute; it is recommended to override +# ## the agent level interval with a value of 1m or greater. +# interval = "1m" +# +# ## Maximum number of API calls to make per second. The quota for accounts +# ## varies, it can be viewed on the API dashboard: +# ## https://cloud.google.com/monitoring/quotas#quotas_and_limits +# # rate_limit = 14 +# +# ## The delay and window options control the number of points selected on +# ## each gather. When set, metrics are gathered between: +# ## start: now() - delay - window +# ## end: now() - delay +# # +# ## Collection delay; if set too low metrics may not yet be available. +# # delay = "5m" +# # +# ## If unset, the window will start at 1m and be updated dynamically to span +# ## the time between calls (approximately the length of the plugin interval). +# # window = "1m" +# +# ## TTL for cached list of metric types. This is the maximum amount of time +# ## it may take to discover new metrics. +# # cache_ttl = "1h" +# +# ## If true, raw bucket counts are collected for distribution value types. +# ## For a more lightweight collection, you may wish to disable and use +# ## distribution_aggregation_aligners instead. +# # gather_raw_distribution_buckets = true +# +# ## Aggregate functions to be used for metrics whose value type is +# ## distribution. These aggregate values are recorded in in addition to raw +# ## bucket counts; if they are enabled. +# ## +# ## For a list of aligner strings see: +# ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner +# # distribution_aggregation_aligners = [ +# # "ALIGN_PERCENTILE_99", +# # "ALIGN_PERCENTILE_95", +# # "ALIGN_PERCENTILE_50", +# # ] +# +# ## Filters can be added to reduce the number of time series matched. All +# ## functions are supported: starts_with, ends_with, has_substring, and +# ## one_of. Only the '=' operator is supported. +# ## +# ## The logical operators when combining filters are defined statically using +# ## the following values: +# ## filter ::= {AND } +# ## resource_labels ::= {OR } +# ## metric_labels ::= {OR } +# ## +# ## For more details, see https://cloud.google.com/monitoring/api/v3/filters +# # +# ## Resource labels refine the time series selection with the following expression: +# ## resource.labels. = +# # [[inputs.stackdriver.filter.resource_labels]] +# # key = "instance_name" +# # value = 'starts_with("localhost")' +# # +# ## Metric labels refine the time series selection with the following expression: +# ## metric.labels. = +# # [[inputs.stackdriver.filter.metric_labels]] +# # key = "device_name" +# # value = 'one_of("sda", "sdb")' + + +# # Get synproxy counter statistics from procfs +# [[inputs.synproxy]] +# # no configuration # # Sysstat metrics collector @@ -3046,18 +4673,15 @@ # ## Arch: /usr/lib/sa/sadc # ## RHEL/CentOS: /usr/lib64/sa/sadc # sadc_path = "/usr/lib/sa/sadc" # required -# # -# # +# # ## Path to the sadf command, if it is not in PATH # # sadf_path = "/usr/bin/sadf" -# # -# # +# # ## Activities is a list of activities, that are passed as argument to the # ## sadc collector utility (e.g: DISK, SNMP etc...) # ## The more activities that are added, the more data is collected. # # activities = ["DISK"] -# # -# # +# # ## Group metrics to measurements. # ## # ## If group is false each metric will be prefixed with a description @@ -3065,8 +4689,7 @@ # ## # ## If Group is true, corresponding metrics are grouped to a single measurement. # # group = true -# # -# # +# # ## Options for the sadf command. The values on the left represent the sadf # ## options and the values on the right their description (which are used for # ## grouping and prefixing metrics). @@ -3090,8 +4713,7 @@ # -w = "task" # # -H = "hugepages" # only available for newer linux distributions # # "-I ALL" = "interrupts" # requires INT activity -# # -# # +# # ## Device tags can be used to add additional tags for devices. # ## For example the configuration below adds a tag vg with value rootvg for # ## all metrics with sda devices. @@ -3099,6 +4721,17 @@ # # vg = "rootvg" +# # Gather systemd units state +# [[inputs.systemd_units]] +# ## Set timeout for systemctl execution +# # timeout = "1s" +# # +# ## Filter for a specific unit type, default is "service", other possible +# ## values are "socket", "target", "device", "mount", "automount", "swap", +# ## "timer", "path", "slice" and "scope ": +# # unittype = "service" + + # # Reads metrics from a Teamspeak 3 Server via ServerQuery # [[inputs.teamspeak]] # ## Server address for Teamspeak 3 ServerQuery @@ -3111,6 +4744,27 @@ # # virtual_servers = [1] +# # Read metrics about temperature +# [[inputs.temp]] +# # no configuration + + +# # Read Tengine's basic status information (ngx_http_reqstat_module) +# [[inputs.tengine]] +# # An array of Tengine reqstat module URI to gather stats. +# urls = ["http://127.0.0.1/us"] +# +# # HTTP response timeout (default: 5s) +# # response_timeout = "5s" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.cer" +# # tls_key = "/etc/telegraf/key.key" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + # # Gather metrics from the Tomcat server status page. # [[inputs.tomcat]] # ## URL of the Tomcat server status @@ -3157,17 +4811,33 @@ # ## The default location of the unbound-control binary can be overridden with: # # binary = "/usr/sbin/unbound-control" # +# ## The default location of the unbound config file can be overridden with: +# # config_file = "/etc/unbound/unbound.conf" +# # ## The default timeout of 1s can be overriden with: # # timeout = "1s" # # ## When set to true, thread metrics are tagged with the thread id. # ## -# ## The default is false for backwards compatibility, and will be change to +# ## The default is false for backwards compatibility, and will be changed to # ## true in a future version. It is recommended to set to true on new # ## deployments. # thread_as_tag = false +# # Read uWSGI metrics. +# [[inputs.uwsgi]] +# ## List with urls of uWSGI Stats servers. URL must match pattern: +# ## scheme://address[:port] +# ## +# ## For example: +# ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] +# servers = ["tcp://127.0.0.1:1717"] +# +# ## General connection timout +# # timeout = "5s" + + # # A plugin to collect stats from Varnish HTTP Cache # [[inputs.varnish]] # ## If running as a restricted user you can prepend sudo for additional access: @@ -3185,6 +4855,41 @@ # ## Optional name for the varnish instance (or working directory) to query # ## Usually appened after -n in varnish cli # # instance_name = instanceName +# +# ## Timeout for varnishstat command +# # timeout = "1s" + + +# # Collect Wireguard server interface and peer statistics +# [[inputs.wireguard]] +# ## Optional list of Wireguard device/interface names to query. +# ## If omitted, all Wireguard interfaces are queried. +# # devices = ["wg0"] + + +# # Monitor wifi signal strength and quality +# [[inputs.wireless]] +# ## Sets 'proc' directory path +# ## If not specified, then default is /proc +# # host_proc = "/proc" + + +# # Reads metrics from a SSL certificate +# [[inputs.x509_cert]] +# ## List certificate sources +# sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] +# +# ## Timeout for SSL connection +# # timeout = "5s" +# +# ## Pass a different name into the TLS request (Server Name Indication) +# ## example: server_name = "myhost.example.org" +# # server_name = "" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" # # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools @@ -3224,11 +4929,11 @@ # # insecure_skip_verify = true - ############################################################################### # SERVICE INPUT PLUGINS # ############################################################################### + # # AMQP consumer plugin # [[inputs.amqp_consumer]] # ## Broker to consume from. @@ -3244,7 +4949,7 @@ # # username = "" # # password = "" # -# ## Exchange to declare and consume from. +# ## Name of the exchange to declare. If unset, no exchange will be declared. # exchange = "telegraf" # # ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". @@ -3260,15 +4965,32 @@ # # exchange_arguments = { } # # exchange_arguments = {"hash_propery" = "timestamp"} # -# ## AMQP queue name +# ## AMQP queue name. # queue = "telegraf" # -# ## Binding Key +# ## AMQP queue durability can be "transient" or "durable". +# queue_durability = "durable" +# +# ## If true, queue will be passively declared. +# # queue_passive = false +# +# ## A binding between the exchange and queue using this binding key is +# ## created. If unset, no binding is created. # binding_key = "#" # # ## Maximum number of messages server should give to the worker. # # prefetch_count = 50 # +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# # ## Auth method. PLAIN and EXTERNAL are supported # ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as # ## described here: https://www.rabbitmq.com/plugins.html @@ -3281,6 +5003,10 @@ # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -3310,9 +5036,429 @@ # ] -# # Influx HTTP write listener -# [[inputs.http_listener]] +# # Cisco GNMI telemetry input plugin based on GNMI telemetry data produced in IOS XR +# [[inputs.cisco_telemetry_gnmi]] +# ## Address and port of the GNMI GRPC server +# addresses = ["10.49.234.114:57777"] +# +# ## define credentials +# username = "cisco" +# password = "cisco" +# +# ## GNMI encoding requested (one of: "proto", "json", "json_ietf") +# # encoding = "proto" +# +# ## redial in case of failures after +# redial = "10s" +# +# ## enable client-side TLS and define CA to authenticate the device +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # insecure_skip_verify = true +# +# ## define client-side TLS certificate & key to authenticate to the device +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## GNMI subscription prefix (optional, can usually be left empty) +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# # origin = "" +# # prefix = "" +# # target = "" +# +# ## Define additional aliases to map telemetry encoding paths to simple measurement names +# #[inputs.cisco_telemetry_gnmi.aliases] +# # ifcounters = "openconfig:/interfaces/interface/state/counters" +# +# [[inputs.cisco_telemetry_gnmi.subscription]] +# ## Name of the measurement that will be emitted +# name = "ifcounters" +# +# ## Origin and path of the subscription +# ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths +# ## +# ## origin usually refers to a (YANG) data model implemented by the device +# ## and path to a specific substructe inside it that should be subscribed to (similar to an XPath) +# ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr +# origin = "openconfig-interfaces" +# path = "/interfaces/interface/state/counters" +# +# # Subscription mode (one of: "target_defined", "sample", "on_change") and interval +# subscription_mode = "sample" +# sample_interval = "10s" +# +# ## Suppress redundant transmissions when measured values are unchanged +# # suppress_redundant = false +# +# ## If suppression is enabled, send updates at least every X seconds anyway +# # heartbeat_interval = "60s" + + +# # Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms +# [[inputs.cisco_telemetry_mdt]] +# ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when +# ## using the grpc transport. +# transport = "grpc" +# +# ## Address and port to host telemetry listener +# service_address = ":57000" +# +# ## Enable TLS; grpc transport only. +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Enable TLS client authentication and define allowed CA certificates; grpc +# ## transport only. +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags +# # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] +# +# ## Define aliases to map telemetry encoding paths to simple measurement names +# [inputs.cisco_telemetry_mdt.aliases] +# ifstats = "ietf-interfaces:interfaces-state/interface/statistics" + + +# # Read metrics from one or many ClickHouse servers +# [[inputs.clickhouse]] +# ## Username for authorization on ClickHouse server +# ## example: user = "default"" +# username = "default" +# +# ## Password for authorization on ClickHouse server +# ## example: password = "super_secret" +# +# ## HTTP(s) timeout while getting metrics values +# ## The timeout includes connection time, any redirects, and reading the response body. +# ## example: timeout = 1s +# # timeout = 5s +# +# ## List of servers for metrics scraping +# ## metrics scrape via HTTP(s) clickhouse interface +# ## https://clickhouse.tech/docs/en/interfaces/http/ +# ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] +# servers = ["http://127.0.0.1:8123"] +# +# ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster +# ## with using same "user:password" described in "user" and "password" parameters +# ## and get this server hostname list from "system.clusters" table +# ## see +# ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters +# ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers +# ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ +# ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables +# ## example: auto_discovery = false +# # auto_discovery = true +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster IN (...)" filter will apply +# ## please use only full cluster names here, regexp and glob filters is not allowed +# ## for "/etc/clickhouse-server/config.d/remote.xml" +# ## +# ## +# ## +# ## +# ## clickhouse-ru-1.local9000 +# ## clickhouse-ru-2.local9000 +# ## +# ## +# ## clickhouse-eu-1.local9000 +# ## clickhouse-eu-2.local9000 +# ## +# ## +# ## +# ## +# ## +# ## +# ## example: cluster_include = ["my-own-cluster"] +# # cluster_include = [] +# +# ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" +# ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply +# ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] +# # cluster_exclude = [] +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from Google PubSub +# [[inputs.cloud_pubsub]] +# ## Required. Name of Google Cloud Platform (GCP) Project that owns +# ## the given PubSub subscription. +# project = "my-project" +# +# ## Required. Name of PubSub subscription to ingest metrics from. +# subscription = "my-subscription" +# +# ## Required. Data format to consume. +# ## Each data format has its own unique set of configuration options. +# ## Read more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional. Filepath for GCP credentials JSON file to authorize calls to +# ## PubSub APIs. If not set explicitly, Telegraf will attempt to use +# ## Application Default Credentials, which is preferred. +# # credentials_file = "path/to/my/creds.json" +# +# ## Optional. Number of seconds to wait before attempting to restart the +# ## PubSub subscription receiver after an unexpected error. +# ## If the streaming pull for a PubSub Subscription fails (receiver), +# ## the agent attempts to restart receiving messages after this many seconds. +# # retry_delay_seconds = 5 +# +# ## Optional. Maximum byte length of a message to consume. +# ## Larger messages are dropped with an error. If less than 0 or unspecified, +# ## treated as no limit. +# # max_message_len = 1000000 +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## The following are optional Subscription ReceiveSettings in PubSub. +# ## Read more about these values: +# ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings +# +# ## Optional. Maximum number of seconds for which a PubSub subscription +# ## should auto-extend the PubSub ACK deadline for each message. If less than +# ## 0, auto-extension is disabled. +# # max_extension = 0 +# +# ## Optional. Maximum number of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_messages = 0 +# +# ## Optional. Maximum size in bytes of unprocessed messages in PubSub +# ## (unacknowledged but not yet expired in PubSub). +# ## A value of 0 is treated as the default PubSub value. +# ## Negative values will be treated as unlimited. +# # max_outstanding_bytes = 0 +# +# ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn +# ## to pull messages from PubSub concurrently. This limit applies to each +# ## subscription separately and is treated as the PubSub default if less than +# ## 1. Note this setting does not limit the number of messages that can be +# ## processed concurrently (use "max_outstanding_messages" instead). +# # max_receiver_go_routines = 0 +# +# ## Optional. If true, Telegraf will attempt to base64 decode the +# ## PubSub message data before parsing +# # base64_data = false + + +# # Google Cloud Pub/Sub Push HTTP listener +# [[inputs.cloud_pubsub_push]] # ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Application secret to verify messages originate from Cloud Pub/Sub +# # token = "" +# +# ## Path to listen to. +# # path = "/" +# +# ## Maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## Maximum duration before timing out write of the response. This should be set to a value +# ## large enough that you can send at least 'metric_batch_size' number of messages within the +# ## duration. +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. +# # add_meta = false +# +# ## Optional. Maximum messages to read from PubSub that have not been written +# ## to an output. Defaults to 1000. +# ## For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message contains 10 metrics and the output +# ## metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read logging output from the Docker engine +# [[inputs.docker_log]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# # endpoint = "unix:///var/run/docker.sock" +# +# ## When true, container logs are read from the beginning; otherwise +# ## reading begins at the end of the log. +# # from_beginning = false +# +# ## Timeout for Docker API calls. +# # timeout = "5s" +# +# ## Containers to include and exclude. Globs accepted. +# ## Note that an empty array for both will include all containers +# # container_name_include = [] +# # container_name_exclude = [] +# +# ## Container states to include and exclude. Globs accepted. +# ## When empty only containers in the "running" state will be captured. +# # container_state_include = [] +# # container_state_exclude = [] +# +# ## docker labels to include and exclude as tags. Globs accepted. +# ## Note that an empty array for both will include all labels as tags +# # docker_label_include = [] +# # docker_label_exclude = [] +# +# ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars +# source_tag = false +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # Azure Event Hubs service input plugin +# [[inputs.eventhub_consumer]] +# ## The default behavior is to create a new Event Hub client from environment variables. +# ## This requires one of the following sets of environment variables to be set: +# ## +# ## 1) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_CONNECTION_STRING" +# ## +# ## 2) Expected Environment Variables: +# ## - "EVENTHUB_NAMESPACE" +# ## - "EVENTHUB_NAME" +# ## - "EVENTHUB_KEY_NAME" +# ## - "EVENTHUB_KEY_VALUE" +# +# ## Uncommenting the option below will create an Event Hub client based solely on the connection string. +# ## This can either be the associated environment variable or hard coded directly. +# # connection_string = "" +# +# ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister +# # persistence_dir = "" +# +# ## Change the default consumer group +# # consumer_group = "" +# +# ## By default the event hub receives all messages present on the broker, alternative modes can be set below. +# ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). +# ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). +# # from_timestamp = +# # latest = true +# +# ## Set a custom prefetch count for the receiver(s) +# # prefetch_count = 1000 +# +# ## Add an epoch to the receiver(s) +# # epoch = 0 +# +# ## Change to set a custom user agent, "telegraf" is used by default +# # user_agent = "telegraf" +# +# ## To consume from a specific partition, set the partition_ids option. +# ## An empty array will result in receiving from all partitions. +# # partition_ids = ["0","1"] +# +# ## Max undelivered messages +# # max_undelivered_messages = 1000 +# +# ## Set either option below to true to use a system property as timestamp. +# ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. +# ## It is recommended to use this setting when the data itself has no timestamp. +# # enqueued_time_as_ts = true +# # iot_hub_enqueued_time_as_ts = true +# +# ## Tags or fields to create from keys present in the application property bag. +# ## These could for example be set by message enrichments in Azure IoT Hub. +# # application_property_tags = [] +# # application_property_fields = [] +# +# ## Tag or field name to use for metadata +# ## By default all metadata is disabled +# # sequence_number_field = "SequenceNumber" +# # enqueued_time_field = "EnqueuedTime" +# # offset_field = "Offset" +# # partition_id_tag = "PartitionID" +# # partition_key_tag = "PartitionKey" +# # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" +# # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" +# # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" +# # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" +# # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Run executable as long-running input plugin +# [[inputs.execd]] +# ## Program to run as daemon +# command = ["telegraf-smartctl", "-d", "/dev/sda"] +# +# ## Define how the process is signaled on each collection interval. +# ## Valid values are: +# ## "none" : Do not signal anything. +# ## The process must output metrics by itself. +# ## "STDIN" : Send a newline on STDIN. +# ## "SIGHUP" : Send a HUP signal. Not available on Windows. +# ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. +# ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. +# signal = "none" +# +# ## Delay before the process is restarted after an unexpected termination +# restart_delay = "10s" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.http_listener]] +# ## Address and port to host InfluxDB listener on # service_address = ":8186" # # ## maximum duration before timing out read of the request @@ -3320,13 +5466,93 @@ # ## maximum duration before timing out write of the response # write_timeout = "10s" # -# ## Maximum allowed http request body size in bytes. -# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) -# max_body_size = 0 +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" # -# ## Maximum line size allowed to be sent in bytes. -# ## 0 means to use the default of 65536 bytes (64 kibibytes) -# max_line_size = 0 +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# tls_cert = "/etc/telegraf/cert.pem" +# tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" + + +# # Generic HTTP write listener +# [[inputs.http_listener_v2]] +# ## Address and port to host HTTP listener on +# service_address = ":8080" +# +# ## Path to listen to. +# # path = "/telegraf" +# +# ## HTTP methods to accept. +# # methods = ["POST", "PUT"] +# +# ## maximum duration before timing out read of the request +# # read_timeout = "10s" +# ## maximum duration before timing out write of the response +# # write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) +# # max_body_size = "500MB" +# +# ## Part of the request to consume. Available options are "body" and +# ## "query". +# # data_source = "body" +# +# ## Set one or more allowed client CA certificate file names to +# ## enable mutually authenticated TLS connections +# # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] +# +# ## Add service certificate and key +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# +# ## Optional username and password to accept for HTTP basic authentication. +# ## You probably want to make sure you have TLS configured above for this. +# # basic_username = "foobar" +# # basic_password = "barfoo" +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Accept metrics over InfluxDB 1.x HTTP API +# [[inputs.influxdb_listener]] +# ## Address and port to host InfluxDB listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed HTTP request body size in bytes. +# ## 0 means to use the default of 32MiB. +# max_body_size = "32MiB" +# +# ## Optional tag name used to store the database. +# ## If the write has a database in the query string then it will be kept in this tag name. +# ## This tag can be used in downstream outputs. +# ## The default value of nothing means it will be off and the database will not be recorded. +# # database_tag = "" # # ## Set one or more allowed client CA certificate file names to # ## enable mutually authenticated TLS connections @@ -3377,9 +5603,13 @@ # "/interfaces", # ] # -# ## x509 Certificate to use with TLS connection. If it is not provided, an insecure -# ## channel will be opened with server -# ssl_cert = "/etc/telegraf/cert.pem" +# ## Optional TLS Config +# # enable_tls = true +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. # ## Failed streams/calls will not be retried if 0 is provided @@ -3389,50 +5619,85 @@ # str_as_tags = false -# # Read metrics from Kafka topic(s) +# # Read metrics from Kafka topics # [[inputs.kafka_consumer]] -# ## kafka servers +# ## Kafka brokers. # brokers = ["localhost:9092"] -# ## topic(s) to consume +# +# ## Topics to consume. # topics = ["telegraf"] # +# ## When set this tag will be added to all metrics with the topic as the value. +# # topic_tag = "" +# +# ## Optional Client id +# # client_id = "Telegraf" +# +# ## Set the minimal supported Kafka version. Setting this enables the use of new +# ## Kafka features and APIs. Must be 0.10.2.0 or greater. +# ## ex: version = "1.1.0" +# # version = "" +# # ## Optional TLS Config +# # enable_tls = true # # tls_ca = "/etc/telegraf/ca.pem" # # tls_cert = "/etc/telegraf/cert.pem" # # tls_key = "/etc/telegraf/key.pem" # ## Use TLS but skip chain & host verification # # insecure_skip_verify = false # -# ## Optional SASL Config +# ## SASL authentication credentials. These settings should typically be used +# ## with TLS encryption enabled using the "enable_tls" option. # # sasl_username = "kafka" # # sasl_password = "secret" # -# ## the name of the consumer group -# consumer_group = "telegraf_metrics_consumers" -# ## Offset (must be either "oldest" or "newest") -# offset = "oldest" +# ## SASL protocol version. When connecting to Azure EventHub set to 0. +# # sasl_version = 1 +# +# ## Name of the consumer group. +# # consumer_group = "telegraf_metrics_consumers" +# +# ## Initial offset position; one of "oldest" or "newest". +# # offset = "oldest" +# +# ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". +# # balance_strategy = "range" +# +# ## Maximum length of a message to consume, in bytes (default 0/unlimited); +# ## larger messages are dropped +# max_message_len = 1000000 +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 # # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" -# -# ## Maximum length of a message to consume, in bytes (default 0/unlimited); -# ## larger messages are dropped -# max_message_len = 65536 # # Read metrics from Kafka topic(s) # [[inputs.kafka_consumer_legacy]] # ## topic(s) to consume # topics = ["telegraf"] +# # ## an array of Zookeeper connection strings # zookeeper_peers = ["localhost:2181"] +# # ## Zookeeper Chroot # zookeeper_chroot = "" +# # ## the name of the consumer group # consumer_group = "telegraf_metrics_consumers" +# # ## Offset (must be either "oldest" or "newest") # offset = "oldest" # @@ -3447,6 +5712,70 @@ # max_message_len = 65536 +# # Configuration for the AWS Kinesis input. +# [[inputs.kinesis_consumer]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# # access_key = "" +# # secret_key = "" +# # token = "" +# # role_arn = "" +# # profile = "" +# # shared_credential_file = "" +# +# ## Endpoint to make request against, the correct endpoint is automatically +# ## determined and this option should only be set if you wish to override the +# ## default. +# ## ex: endpoint_url = "http://localhost:8000" +# # endpoint_url = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# +# ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) +# # shard_iterator_type = "TRIM_HORIZON" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# +# ## Optional +# ## Configuration for a dynamodb checkpoint +# [inputs.kinesis_consumer.checkpoint_dynamodb] +# ## unique name for this consumer +# app_name = "default" +# table_name = "default" + + +# # Read metrics off Arista LANZ, via socket +# [[inputs.lanz]] +# ## URL to Arista LANZ endpoint +# servers = [ +# "tcp://127.0.0.1:50001" +# ] + + # # Stream and parse log file(s). # [[inputs.logparser]] # ## Log files to parse. @@ -3483,6 +5812,7 @@ # # ## Custom patterns can also be defined here. Put one pattern per line. # custom_patterns = ''' +# ''' # # ## Timezone allows you to provide an override for timestamps that # ## don't already include an offset @@ -3493,36 +5823,63 @@ # ## 1. Local -- interpret based on machine localtime # ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones # ## 3. UTC -- or blank/unspecified, will return timestamp in UTC -# timezone = "Canada/Eastern" -# ''' +# # timezone = "Canada/Eastern" +# +# ## When set to "disable", timestamp will not incremented if there is a +# ## duplicate. +# # unique_timestamp = "auto" # # Read metrics from MQTT topic(s) # [[inputs.mqtt_consumer]] # ## MQTT broker URLs to be used. The format should be scheme://host:port, # ## schema can be tcp, ssl, or ws. -# servers = ["tcp://localhost:1883"] +# servers = ["tcp://127.0.0.1:1883"] # -# ## MQTT QoS, must be 0, 1, or 2 -# qos = 0 -# ## Connection timeout for initial connection in seconds -# connection_timeout = "30s" -# -# ## Topics to subscribe to +# ## Topics that will be subscribed to. # topics = [ # "telegraf/host01/cpu", # "telegraf/+/mem", # "sensors/#", # ] # -# # if true, messages that can't be delivered while the subscriber is offline -# # will be delivered when it comes back (such as on service restart). -# # NOTE: if true, client_id MUST be set -# persistent_session = false -# # If empty, a random client ID will be generated. -# client_id = "" +# ## The message topic will be stored in a tag specified by this value. If set +# ## to the empty string no topic tag will be created. +# # topic_tag = "topic" # -# ## username and password to connect MQTT server. +# ## QoS policy for messages +# ## 0 = at most once +# ## 1 = at least once +# ## 2 = exactly once +# ## +# ## When using a QoS of 1 or 2, you should enable persistent_session to allow +# ## resuming unacknowledged messages. +# # qos = 0 +# +# ## Connection timeout for initial connection in seconds +# # connection_timeout = "30s" +# +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# +# ## Persistent session disables clearing of the client session on connection. +# ## In order for this option to work you must also set client_id to identify +# ## the client. To receive messages that arrived while the client is offline, +# ## also set the qos option to 1 or 2 and don't forget to also set the QoS when +# ## publishing. +# # persistent_session = false +# +# ## If unset, a random client ID will be generated. +# # client_id = "" +# +# ## Username and password to connect MQTT server. # # username = "telegraf" # # password = "metricsmetricsmetricsmetrics" # @@ -3543,19 +5900,46 @@ # # Read metrics from NATS subject(s) # [[inputs.nats_consumer]] # ## urls of NATS servers -# # servers = ["nats://localhost:4222"] +# servers = ["nats://localhost:4222"] +# +# ## subject(s) to consume +# subjects = ["telegraf"] +# +# ## name a queue group +# queue_group = "telegraf_consumers" +# +# ## Optional credentials +# # username = "" +# # password = "" +# +# ## Optional NATS 2.0 and NATS NGS compatible user credentials +# # credentials = "/etc/telegraf/nats.creds" +# # ## Use Transport Layer Security # # secure = false -# ## subject(s) to consume -# # subjects = ["telegraf"] -# ## name a queue group -# # queue_group = "telegraf_consumers" +# +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false # # ## Sets the limits for pending msgs and bytes for each subscription # ## These shouldn't need to be adjusted except in very high throughput scenarios # # pending_message_limit = 65536 # # pending_bytes_limit = 67108864 # +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -3567,14 +5951,26 @@ # [[inputs.nsq_consumer]] # ## Server option still works but is deprecated, we just prepend it to the nsqd array. # # server = "localhost:4150" +# # ## An array representing the NSQD TCP HTTP Endpoints # nsqd = ["localhost:4150"] +# # ## An array representing the NSQLookupd HTTP Endpoints # nsqlookupd = ["localhost:4161"] # topic = "telegraf" # channel = "consumer" # max_in_flight = 100 # +# ## Maximum messages to read from the broker that have not been written by an +# ## output. For best throughput set based on the number of metrics within +# ## each message and the size of the output's metric_batch_size. +# ## +# ## For example, if each message from the queue contains 10 metrics and the +# ## output metric_batch_size is 1000, setting this to 100 will ensure that a +# ## full batch is collected and the write is triggered immediately without +# ## waiting until the next flush_interval. +# # max_undelivered_messages = 1000 +# # ## Data format to consume. # ## Each data format has its own unique set of configuration options, read # ## more about them here: @@ -3582,6 +5978,19 @@ # data_format = "influx" +# # Read metrics from one or many pgbouncer servers +# [[inputs.pgbouncer]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# address = "host=localhost user=pgbouncer sslmode=disable" + + # # Read metrics from one or many postgresql servers # [[inputs.postgresql]] # ## specify address via a url matching: @@ -3660,7 +6069,10 @@ # ## field is used to define custom tags (separated by commas) # ## The optional "measurement" value can be used to override the default # ## output measurement name ("postgresql"). -# # +# ## +# ## The script option can be used to specify the .sql file path. +# ## If script and sqlquery options specified at same time, sqlquery will be used +# ## # ## Structure : # ## [[inputs.postgresql_extensible.query]] # ## sqlquery string @@ -3681,6 +6093,93 @@ # tagvalue="postgresql.stats" +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Metric version controls the mapping from Prometheus metrics into +# ## Telegraf metrics. When using the prometheus_client output, use the same +# ## value in both plugins to ensure metrics are round-tripped without +# ## modification. +# ## +# ## example: metric_version = 1; deprecated in 1.13 +# ## metric_version = 2; recommended version +# # metric_version = 1 +# +# ## Url tag name (tag containing scrapped url. optional, default is "url") +# # url_tag = "scrapeUrl" +# +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] +# +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" +# +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to 'https' & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true +# ## Restricts Kubernetes monitoring to a single namespace +# ## ex: monitor_kubernetes_pods_namespace = "default" +# # monitor_kubernetes_pods_namespace = "" +# # label selector to target pods which have the label +# # kubernetes_label_selector = "env=dev,app=nginx" +# # field selector to target pods +# # eg. To scrape pods on a specific node +# # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" +# +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# # bearer_token = "/path/to/bearer/token" +# ## OR +# # bearer_token_string = "abc_123" +# +# ## HTTP Basic Authentication username and password. ('bearer_token' and +# ## 'bearer_token_string' take priority) +# # username = "" +# # password = "" +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional TLS Config +# # tls_ca = /path/to/cafile +# # tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + + +# # SFlow V5 Protocol Listener +# [[inputs.sflow]] +# ## Address to listen for sFlow packets. +# ## example: service_address = "udp://:6343" +# ## service_address = "udp4://:6343" +# ## service_address = "udp6://:6343" +# service_address = "udp://:6343" +# +# ## Set the size of the operating system's receive buffer. +# ## example: read_buffer_size = "64KiB" +# # read_buffer_size = "" + + +# # Receive SNMP traps +# [[inputs.snmp_trap]] +# ## Transport, local address, and port to listen on. Transport must +# ## be "udp://". Omit local address to listen on all interfaces. +# ## example: "udp://127.0.0.1:1234" +# ## +# ## Special permissions may be required to listen on a port less than +# ## 1024. See README.md for details +# ## +# # service_address = "udp://:162" +# ## Timeout running snmptranslate command +# # timeout = "5s" + + # # Generic socket listener capable of handling multiple socket types. # [[inputs.socket_listener]] # ## URL to listen on @@ -3695,6 +6194,13 @@ # # service_address = "unix:///tmp/telegraf.sock" # # service_address = "unixgram:///tmp/telegraf.sock" # +# ## Change the file mode bits on unix sockets. These permissions may not be +# ## respected by some platforms, to safely restrict write permissions it is best +# ## to place the socket into a directory that has previously been created +# ## with the desired permissions. +# ## ex: socket_mode = "777" +# # socket_mode = "" +# # ## Maximum number of concurrent connections. # ## Only applies to stream sockets (e.g. TCP). # ## 0 (default) is unlimited. @@ -3712,11 +6218,11 @@ # ## Enables client authentication if set. # # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] # -# ## Maximum socket buffer size in bytes. +# ## Maximum socket buffer size (in bytes when no unit specified). # ## For stream sockets, once the buffer fills up, the sender will start backing up. # ## For datagram sockets, once the buffer fills up, metrics will start dropping. # ## Defaults to the OS default. -# # read_buffer_size = 65535 +# # read_buffer_size = "64KiB" # # ## Period between keep alive probes. # ## Only applies to TCP sockets. @@ -3729,6 +6235,10 @@ # ## more about them here: # ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # # data_format = "influx" +# +# ## Content encoding for message payloads, can be set to "gzip" to or +# ## "identity" to apply no encoding. +# # content_encoding = "identity" # # Statsd UDP/TCP Server @@ -3763,7 +6273,7 @@ # delete_timings = true # # ## Percentiles to calculate for timing & histogram stats -# percentiles = [90] +# percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] # # ## separator to use between elements of a statsd metric # metric_separator = "_" @@ -3772,8 +6282,11 @@ # ## http://docs.datadoghq.com/guides/dogstatsd/ # parse_data_dog_tags = false # +# ## Parses datadog extensions to the statsd format +# datadog_extensions = false +# # ## Statsd data translation templates, more info can be read here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite +# ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # # templates = [ # # "cpu.* measurement*" # # ] @@ -3788,7 +6301,19 @@ # percentile_limit = 1000 -# # Accepts syslog messages per RFC5425 +# # Suricata stats plugin +# [[inputs.suricata]] +# ## Data sink for Suricata stats log +# # This is expected to be a filename of a +# # unix socket to be created for listening. +# source = "/var/run/suricata-stats.sock" +# +# # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" +# # becomes "detect_alert" when delimiter is "_". +# delimiter = "_" + + +# # Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587 # [[inputs.syslog]] # ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 # ## Protocol, address and port to host the syslog receiver. @@ -3812,9 +6337,19 @@ # ## Only applies to stream sockets (e.g. TCP). # # max_connections = 1024 # -# ## Read timeout (default = 500ms). +# ## Read timeout is the maximum time allowed for reading a single message (default = 5s). # ## 0 means unlimited. -# # read_timeout = 500ms +# # read_timeout = "5s" +# +# ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). +# ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), +# ## or the non-transparent framing technique (RFC6587#section-3.4.2). +# ## Must be one of "octet-counting", "non-transparent". +# # framing = "octet-counting" +# +# ## The trailer to be expected in case of non-trasparent framing (default = "LF"). +# ## Must be one of "LF", or "NUL". +# # trailer = "LF" # # ## Whether to parse in best effort mode or not (default = false). # ## By default best effort parsing is off. @@ -3869,6 +6404,186 @@ # # see https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener +# # Read metrics from VMware vCenter +# [[inputs.vsphere]] +# ## List of vCenter URLs to be monitored. These three lines must be uncommented +# ## and edited for the plugin to work. +# vcenters = [ "https://vcenter.local/sdk" ] +# username = "user@corp.local" +# password = "secret" +# +# ## VMs +# ## Typical VM metrics (if omitted or empty, all metrics are collected) +# # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) +# # vm_exclude = [] # Inventory paths to exclude +# vm_metric_include = [ +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.run.summation", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.wait.summation", +# "mem.active.average", +# "mem.granted.average", +# "mem.latency.average", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.usage.average", +# "power.power.average", +# "virtualDisk.numberReadAveraged.average", +# "virtualDisk.numberWriteAveraged.average", +# "virtualDisk.read.average", +# "virtualDisk.readOIO.latest", +# "virtualDisk.throughput.usage.average", +# "virtualDisk.totalReadLatency.average", +# "virtualDisk.totalWriteLatency.average", +# "virtualDisk.write.average", +# "virtualDisk.writeOIO.latest", +# "sys.uptime.latest", +# ] +# # vm_metric_exclude = [] ## Nothing is excluded by default +# # vm_instances = true ## true by default +# +# ## Hosts +# ## Typical host metrics (if omitted or empty, all metrics are collected) +# # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) +# # host_exclude [] # Inventory paths to exclude +# host_metric_include = [ +# "cpu.coreUtilization.average", +# "cpu.costop.summation", +# "cpu.demand.average", +# "cpu.idle.summation", +# "cpu.latency.average", +# "cpu.readiness.average", +# "cpu.ready.summation", +# "cpu.swapwait.summation", +# "cpu.usage.average", +# "cpu.usagemhz.average", +# "cpu.used.summation", +# "cpu.utilization.average", +# "cpu.wait.summation", +# "disk.deviceReadLatency.average", +# "disk.deviceWriteLatency.average", +# "disk.kernelReadLatency.average", +# "disk.kernelWriteLatency.average", +# "disk.numberReadAveraged.average", +# "disk.numberWriteAveraged.average", +# "disk.read.average", +# "disk.totalReadLatency.average", +# "disk.totalWriteLatency.average", +# "disk.write.average", +# "mem.active.average", +# "mem.latency.average", +# "mem.state.latest", +# "mem.swapin.average", +# "mem.swapinRate.average", +# "mem.swapout.average", +# "mem.swapoutRate.average", +# "mem.totalCapacity.average", +# "mem.usage.average", +# "mem.vmmemctl.average", +# "net.bytesRx.average", +# "net.bytesTx.average", +# "net.droppedRx.summation", +# "net.droppedTx.summation", +# "net.errorsRx.summation", +# "net.errorsTx.summation", +# "net.usage.average", +# "power.power.average", +# "storageAdapter.numberReadAveraged.average", +# "storageAdapter.numberWriteAveraged.average", +# "storageAdapter.read.average", +# "storageAdapter.write.average", +# "sys.uptime.latest", +# ] +# ## Collect IP addresses? Valid values are "ipv4" and "ipv6" +# # ip_addresses = ["ipv6", "ipv4" ] +# +# # host_metric_exclude = [] ## Nothing excluded by default +# # host_instances = true ## true by default +# +# +# ## Clusters +# # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # cluster_exclude = [] # Inventory paths to exclude +# # cluster_metric_include = [] ## if omitted or empty, all metrics are collected +# # cluster_metric_exclude = [] ## Nothing excluded by default +# # cluster_instances = false ## false by default +# +# ## Datastores +# # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) +# # datastore_exclude = [] # Inventory paths to exclude +# # datastore_metric_include = [] ## if omitted or empty, all metrics are collected +# # datastore_metric_exclude = [] ## Nothing excluded by default +# # datastore_instances = false ## false by default +# +# ## Datacenters +# # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) +# # datacenter_exclude = [] # Inventory paths to exclude +# datacenter_metric_include = [] ## if omitted or empty, all metrics are collected +# datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. +# # datacenter_instances = false ## false by default +# +# ## Plugin Settings +# ## separator character to use for measurement and field names (default: "_") +# # separator = "_" +# +# ## number of objects to retreive per query for realtime resources (vms and hosts) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_objects = 256 +# +# ## number of metrics to retreive per query for non-realtime resources (clusters and datastores) +# ## set to 64 for vCenter 5.5 and 6.0 (default: 256) +# # max_query_metrics = 256 +# +# ## number of go routines to use for collection and discovery of objects and metrics +# # collect_concurrency = 1 +# # discover_concurrency = 1 +# +# ## the interval before (re)discovering objects subject to metrics collection (default: 300s) +# # object_discovery_interval = "300s" +# +# ## timeout applies to any of the api request made to vcenter +# # timeout = "60s" +# +# ## When set to true, all samples are sent as integers. This makes the output +# ## data types backwards compatible with Telegraf 1.9 or lower. Normally all +# ## samples from vCenter, with the exception of percentages, are integer +# ## values, but under some conditions, some averaging takes place internally in +# ## the plugin. Setting this flag to "false" will send values as floats to +# ## preserve the full precision when averaging takes place. +# # use_int_samples = true +# +# ## Custom attributes from vCenter can be very useful for queries in order to slice the +# ## metrics along different dimension and for forming ad-hoc relationships. They are disabled +# ## by default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# ## By default, since they can add a considerable amount of tags to the resulting metrics. To +# ## enable, simply set custom_attribute_exlude to [] (empty set) and use custom_attribute_include +# ## to select the attributes you want to include. +# # custom_attribute_include = [] +# # custom_attribute_exclude = ["*"] +# +# ## Optional SSL Config +# # ssl_ca = "/path/to/cafile" +# # ssl_cert = "/path/to/certfile" +# # ssl_key = "/path/to/keyfile" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + # # A Webhooks Event collector # [[inputs.webhooks]] # ## Address and port to host Webhook listener on diff --git a/etc/telegraf_windows.conf b/etc/telegraf_windows.conf index 54b7ee0e1..5b7092899 100644 --- a/etc/telegraf_windows.conf +++ b/etc/telegraf_windows.conf @@ -1,18 +1,26 @@ -# Telegraf configuration - +# Telegraf Configuration +# # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. - +# # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. - +# # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. +# +# Environment variables can be used anywhere in this config file, simply surround +# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), +# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) + # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" + # Configuration for telegraf agent [agent] @@ -22,11 +30,15 @@ ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true - ## Telegraf will cache metric_buffer_limit metrics for each output, and will - ## flush this buffer on a successful write. - metric_buffer_limit = 1000 - ## Flush the buffer whenever full, regardless of flush_interval. - flush_buffer_when_full = true + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## Maximum number of unwritten metrics per output. Increasing this value + ## allows for longer periods of output downtime without dropping metrics at the + ## cost of higher maximum memory usage. + metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. ## Each plugin will sleep for a random time within jitter before collecting. @@ -34,58 +46,197 @@ ## same time, which can have a measurable effect on the system. collection_jitter = "0s" - ## Default flushing interval for all outputs. You shouldn't set this below - ## interval. Maximum flush_interval will be flush_interval + flush_jitter + ## Default flushing interval for all outputs. Maximum flush_interval will be + ## flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" - ## Logging configuration: - ## Run telegraf in debug mode - debug = false - ## Run telegraf in quiet mode - quiet = false - ## Specify the log file name. The empty string means to log to stdout. - logfile = "/Program Files/Telegraf/telegraf.log" + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Log at debug level. + # debug = false + ## Log only error level messages. + # quiet = false + + ## Log target controls the destination for logs and can be one of "file", + ## "stderr" or, on Windows, "eventlog". When set to "file", the output file + ## is determined by the "logfile" setting. + # logtarget = "file" + + ## Name of the file to be logged to when using the "file" logtarget. If set to + ## the empty string then logs are written to stderr. + # logfile = "" + + ## The logfile will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. Logs are rotated only when + ## written to, if there is no log activity rotation may be delayed. + # logfile_rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # logfile_rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # logfile_rotation_max_archives = 5 ## Override default hostname, if empty use os.Hostname() hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false ############################################################################### -# OUTPUTS # +# OUTPUT PLUGINS # ############################################################################### -# Configuration for influxdb server to send metrics to + +# Configuration for sending metrics to InfluxDB [[outputs.influxdb]] - # The full HTTP or UDP endpoint URL for your InfluxDB instance. - # Multiple urls can be specified but it is assumed that they are part of the same - # cluster, this means that only ONE of the urls will be written to each interval. - # urls = ["udp://127.0.0.1:8089"] # UDP endpoint example - urls = ["http://127.0.0.1:8086"] # required - # The target database for metrics (telegraf will create it if not exists) - database = "telegraf" # required - # Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h". - # note: using second precision greatly helps InfluxDB compression - precision = "s" + ## The full HTTP or UDP URL for your InfluxDB instance. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + # urls = ["unix:///var/run/influxdb.sock"] + # urls = ["udp://127.0.0.1:8089"] + # urls = ["http://127.0.0.1:8086"] - ## Write timeout (for the InfluxDB client), formatted as a string. - ## If not provided, will default to 5s. 0s means no timeout (not recommended). - timeout = "5s" + ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. + # database = "telegraf" + + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the 'database_tag' will not be included in the written metric. + # exclude_database_tag = false + + ## If true, no CREATE DATABASE queries will be sent. Set to true when using + ## Telegraf with a user without permissions to create databases or when the + ## database already exists. + # skip_database_creation = false + + ## Name of existing retention policy to write to. Empty string writes to + ## the default retention policy. Only takes effect when using HTTP. + # retention_policy = "" + + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be included in the written metric. + # exclude_retention_policy_tag = false + + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". + ## Only takes effect when using HTTP. + # write_consistency = "any" + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## HTTP Basic Auth # username = "telegraf" # password = "metricsmetricsmetricsmetrics" - # Set the user agent for HTTP POSTs (can be useful for log differentiation) + + ## HTTP User-Agent # user_agent = "telegraf" - # Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) - # udp_payload = 512 + + ## UDP payload size is the maximum packet size to send. + # udp_payload = "512B" + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + # influx_uint_support = false + +# # Configuration for sending metrics to InfluxDB +# [[outputs.influxdb_v2]] +# ## The URLs of the InfluxDB cluster nodes. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] +# urls = ["http://127.0.0.1:9999"] +# +# ## Token for authentication. +# token = "" +# +# ## Organization is the name of the organization you wish to write to; must exist. +# organization = "" +# +# ## Destination bucket to write into. +# bucket = "" +# +# ## The value of this tag will be used to determine the bucket. If this +# ## tag is not set the 'bucket' option is used as the default. +# # bucket_tag = "" +# +# ## If true, the bucket tag will not be added to the metric. +# # exclude_bucket_tag = false +# +# ## Timeout for HTTP messages. +# # timeout = "5s" +# +# ## Additional HTTP headers +# # http_headers = {"X-Special-Header" = "Special-Value"} +# +# ## HTTP Proxy override, if unset values the standard proxy environment +# ## variables are consulted to determine which proxy, if any, should be used. +# # http_proxy = "http://corporate.proxy:3128" +# +# ## HTTP User-Agent +# # user_agent = "telegraf" +# +# ## Content-Encoding for write request body, can be set to "gzip" to +# ## compress body or "identity" to apply no encoding. +# # content_encoding = "gzip" +# +# ## Enable or disable uint support for writing uints influxdb 2.0. +# # influx_uint_support = false +# +# ## Optional TLS Config for use on HTTP connections. +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false ############################################################################### -# INPUTS # +# INPUT PLUGINS # ############################################################################### + # Windows Performance Counters plugin. # These are the recommended method of monitoring system metrics on windows, # as the regular system plugins (inputs.cpu, inputs.mem, etc.) rely on WMI, @@ -120,8 +271,8 @@ "% Disk Time", "% Disk Read Time", "% Disk Write Time", - "Current Disk Queue Length", "% Free Space", + "Current Disk Queue Length", "Free Megabytes", ] Measurement = "win_disk" @@ -187,7 +338,6 @@ "Standby Cache Reserve Bytes", "Standby Cache Normal Priority Bytes", "Standby Cache Core Bytes", - ] # Use 6 x - to remove the Instance bit from the query. Instances = ["------"] @@ -205,44 +355,31 @@ Instances = ["_Total"] Measurement = "win_swap" - [[inputs.win_perf_counters.object]] - ObjectName = "Network Interface" - Instances = ["*"] - Counters = [ - "Bytes Sent/sec", - "Bytes Received/sec", - "Packets Sent/sec", - "Packets Received/sec", - "Packets Received Discarded", - "Packets Received Errors", - "Packets Outbound Discarded", - "Packets Outbound Errors", - ] - - # Windows system plugins using WMI (disabled by default, using # win_perf_counters over WMI is recommended) + # # Read metrics about cpu usage # [[inputs.cpu]] # ## Whether to report per-cpu stats or not # percpu = true # ## Whether to report total system cpu stats or not # totalcpu = true -# ## Comment this line if you want the raw CPU time metrics -# fielddrop = ["time_*"] +# ## If true, collect raw CPU time metrics. +# collect_cpu_time = false +# ## If true, compute and report the sum of all non-idle CPU states. +# report_active = false # # Read metrics about disk usage by mount point # [[inputs.disk]] -# ## By default, telegraf gather stats for all mountpoints. -# ## Setting mountpoints will restrict the stats to the specified mountpoints. -# ## mount_points=["/"] +# ## By default stats will be gathered for all mount points. +# ## Set mount_points will restrict the stats to only the specified mount points. +# # mount_points = ["/"] # -# ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually -# ## present on /run, /var/run, /dev/shm or /dev). -# # ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] +# ## Ignore mount points by filesystem type. +# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] # # Read metrics about disk IO by device @@ -250,9 +387,26 @@ # ## By default, telegraf will gather stats for all devices including # ## disk partitions. # ## Setting devices will restrict the stats to the specified devices. -# ## devices = ["sda", "sdb"] -# ## Uncomment the following line if you do not need disk serial numbers. -# ## skip_serial_number = true +# # devices = ["sda", "sdb", "vd*"] +# ## Uncomment the following line if you need disk serial numbers. +# # skip_serial_number = false +# # +# ## On systems which support it, device metadata can be added in the form of +# ## tags. +# ## Currently only Linux is supported via udev properties. You can view +# ## available properties for a device by running: +# ## 'udevadm info -q property -n /dev/sda' +# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] +# # +# ## Using the same metadata source as device_tags, you can also customize the +# ## name of the device via templates. +# ## The 'name_templates' parameter is a list of templates to try and apply to +# ## the device. The template may contain variables in the form of '$PROPERTY' or +# ## '${PROPERTY}'. The first template which does not contain any variables not +# ## present for the device is used as the device name tag. +# ## The typical use case is for LVM volumes, to get the VG/LV name instead of +# ## the near-meaningless DM-0 name. +# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] # # Read metrics about memory usage @@ -263,4 +417,3 @@ # # Read metrics about swap memory usage # [[inputs.swap]] # # no configuration - diff --git a/filter/filter_test.go b/filter/filter_test.go index 2f52e036a..18ebcd795 100644 --- a/filter/filter_test.go +++ b/filter/filter_test.go @@ -37,6 +37,24 @@ func TestCompile(t *testing.T) { assert.True(t, f.Match("network")) } +func TestIncludeExclude(t *testing.T) { + tags := []string{} + labels := []string{"best", "com_influxdata", "timeseries", "com_influxdata_telegraf", "ever"} + + filter, err := NewIncludeExcludeFilter([]string{}, []string{"com_influx*"}) + if err != nil { + t.Fatalf("Failed to create include/exclude filter - %v", err) + } + + for i := range labels { + if filter.Match(labels[i]) { + tags = append(tags, labels[i]) + } + } + + assert.Equal(t, []string{"best", "timeseries", "ever"}, tags) +} + var benchbool bool func BenchmarkFilterSingleNoGlobFalse(b *testing.B) { diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..77c448d27 --- /dev/null +++ b/go.mod @@ -0,0 +1,154 @@ +module github.com/influxdata/telegraf + +go 1.12 + +require ( + cloud.google.com/go v0.53.0 + cloud.google.com/go/datastore v1.1.0 // indirect + cloud.google.com/go/pubsub v1.2.0 + code.cloudfoundry.org/clock v1.0.0 // indirect + collectd.org v0.3.0 + github.com/Azure/azure-event-hubs-go/v3 v3.2.0 + github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 + github.com/Azure/go-autorest/autorest v0.9.3 + github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 + github.com/BurntSushi/toml v0.3.1 + github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee + github.com/Microsoft/ApplicationInsights-Go v0.4.2 + github.com/Microsoft/go-winio v0.4.9 // indirect + github.com/Shopify/sarama v1.24.1 + github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 + github.com/aerospike/aerospike-client-go v1.27.0 + github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 + github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 + github.com/apache/thrift v0.12.0 + github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 // indirect + github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 + github.com/armon/go-metrics v0.3.0 // indirect + github.com/aws/aws-sdk-go v1.30.9 + github.com/benbjohnson/clock v1.0.2 + github.com/bitly/go-hostpool v0.1.0 // indirect + github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect + github.com/caio/go-tdigest v2.3.0+incompatible // indirect + github.com/cenkalti/backoff v2.0.0+incompatible // indirect + github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 + github.com/cockroachdb/apd v1.1.0 // indirect + github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 + github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 // indirect + github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a // indirect + github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible // indirect + github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133 + github.com/docker/go-connections v0.3.0 // indirect + github.com/docker/go-units v0.3.3 // indirect + github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 + github.com/eclipse/paho.mqtt.golang v1.2.0 + github.com/ericchiang/k8s v1.2.0 + github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 + github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 + github.com/go-logfmt/logfmt v0.4.0 + github.com/go-ole/go-ole v1.2.1 // indirect + github.com/go-redis/redis v6.12.0+incompatible + github.com/go-sql-driver/mysql v1.5.0 + github.com/goburrow/modbus v0.1.0 + github.com/goburrow/serial v0.1.0 // indirect + github.com/gobwas/glob v0.2.3 + github.com/gofrs/uuid v2.1.0+incompatible + github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d + github.com/golang/geo v0.0.0-20190916061304-5b978397cfec + github.com/golang/protobuf v1.3.5 + github.com/google/go-cmp v0.4.0 + github.com/google/go-github v17.0.0+incompatible + github.com/google/go-querystring v1.0.0 // indirect + github.com/gorilla/mux v1.6.2 + github.com/gotestyourself/gotestyourself v2.2.0+incompatible // indirect + github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect + github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 + github.com/hashicorp/consul v1.2.1 + github.com/hashicorp/go-msgpack v0.5.5 // indirect + github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 // indirect + github.com/hashicorp/memberlist v0.1.5 // indirect + github.com/hashicorp/serf v0.8.1 // indirect + github.com/influxdata/go-syslog/v2 v2.0.1 + github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41 + github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 + github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 + github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect + github.com/jackc/pgx v3.6.0+incompatible + github.com/jcmturner/gofork v1.0.0 // indirect + github.com/kardianos/service v1.0.0 + github.com/karrick/godirwalk v1.12.0 + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 + github.com/klauspost/compress v1.9.2 // indirect + github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 // indirect + github.com/lib/pq v1.3.0 // indirect + github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 + github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe + github.com/miekg/dns v1.0.14 + github.com/mitchellh/go-testing-interface v1.0.0 // indirect + github.com/multiplay/go-ts3 v1.0.0 + github.com/naoina/go-stringutil v0.1.0 // indirect + github.com/nats-io/nats-server/v2 v2.1.4 + github.com/nats-io/nats.go v1.9.1 + github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 + github.com/nsqio/go-nsq v1.0.7 + github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 + github.com/opencontainers/go-digest v1.0.0-rc1 // indirect + github.com/opencontainers/image-spec v1.0.1 // indirect + github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect + github.com/opentracing/opentracing-go v1.0.2 // indirect + github.com/openzipkin/zipkin-go-opentracing v0.3.4 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/client_model v0.2.0 + github.com/prometheus/common v0.9.1 + github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 + github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect + github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect + github.com/shirou/gopsutil v2.20.2+incompatible + github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 // indirect + github.com/sirupsen/logrus v1.4.2 + github.com/soniah/gosnmp v1.25.0 + github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 + github.com/stretchr/testify v1.5.1 + github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 + github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 // indirect + github.com/tidwall/gjson v1.3.0 + github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e // indirect + github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc // indirect + github.com/vjeantet/grok v1.0.0 + github.com/vmware/govmomi v0.19.0 + github.com/wavefronthq/wavefront-sdk-go v0.9.2 + github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf + github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a // indirect + github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 // indirect + golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect + golang.org/x/net v0.0.0-20200301022130-244492dfa37a + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect + golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 + golang.org/x/tools v0.0.0-20200317043434-63da46f3035e // indirect + golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 + gonum.org/v1/gonum v0.6.2 // indirect + google.golang.org/api v0.20.0 + google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 + google.golang.org/grpc v1.28.0 + gopkg.in/fatih/pool.v2 v2.0.0 // indirect + gopkg.in/gorethink/gorethink.v3 v3.0.5 + gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect + gopkg.in/ldap.v3 v3.1.0 + gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce + gopkg.in/olivere/elastic.v5 v5.0.70 + gopkg.in/yaml.v2 v2.2.5 + gotest.tools v2.2.0+incompatible // indirect + honnef.co/go/tools v0.0.1-2020.1.3 // indirect + k8s.io/apimachinery v0.17.1 // indirect +) + +// replaced due to https://github.com/satori/go.uuid/issues/73 +replace github.com/satori/go.uuid => github.com/gofrs/uuid v3.2.0+incompatible diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..a3d70f21d --- /dev/null +++ b/go.sum @@ -0,0 +1,890 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= +cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= +code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= +collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= +github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= +github.com/Azure/azure-event-hubs-go/v3 v3.2.0 h1:CQlxKH5a4NX1ZmbdqXUPRwuNGh2XvtgmhkZvkEuWzhs= +github.com/Azure/azure-event-hubs-go/v3 v3.2.0/go.mod h1:BPIIJNH/l/fVHYq3Rm6eg4clbrULrQ3q7+icmqHyyLc= +github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.1.9 h1:u7JFb9fFTE6Y/j8ae2VK33ePrRqJqoCM/IWkQdAZ+rg= +github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= +github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687 h1:7MiZ6Th+YTmwUdrKmFg5OMsGYz7IdQwjqL0RPxkhhOQ= +github.com/Azure/azure-storage-queue-go v0.0.0-20181215014128-6ed74e755687/go.mod h1:K6am8mT+5iFXgingS9LUc7TmbsW6XBw3nxaRyaMyWc8= +github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= +github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3 h1:OZEIaBbMdUE/Js+BQKlpO81XlISgipr6yDJ+PSwsgi4= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1 h1:pZdL8o72rK+avFWl+p9nE8RWi1JInZrWJYlnpfXJwHk= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee h1:atI/FFjXh6hIVlPE1Jup9m8N4B9q/OSbMUe2EBahs+w= +github.com/Mellanox/rdmamap v0.0.0-20191106181932-7c3c4763a6ee/go.mod h1:jDA6v0TUYrFEIAE5uGJ29LQOeONIgMdP4Rkqb8HUnPM= +github.com/Microsoft/ApplicationInsights-Go v0.4.2 h1:HIZoGXMiKNwAtMAgCSSX35j9mP+DjGF9ezfBvxMDLLg= +github.com/Microsoft/ApplicationInsights-Go v0.4.2/go.mod h1:CukZ/G66zxXtI+h/VcVn3eVVDGDHfXM2zVILF7bMmsg= +github.com/Microsoft/go-winio v0.4.9 h1:3RbgqgGVqmcpbOiwrjbVtDHLlJBGF6aE+yHmNtBNsFQ= +github.com/Microsoft/go-winio v0.4.9/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/sarama v1.24.1 h1:svn9vfN3R1Hz21WR2Gj0VW9ehaDGkiOS+VqlIcZOkMI= +github.com/Shopify/sarama v1.24.1/go.mod h1:fGP8eQ6PugKEI0iUETYYtnP6d1pH/bdDMTel1X5ajsU= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/aerospike/aerospike-client-go v1.27.0 h1:VC6/Wqqm3Qlp4/utM7Zts3cv4A2HPn8rVFp/XZKTWgE= +github.com/aerospike/aerospike-client-go v1.27.0/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9 h1:FXrPTd8Rdlc94dKccl7KPmdmIbVh/OjelJ8/vgMRzcQ= +github.com/amir/raidman v0.0.0-20170415203553-1ccc43bfb9c9/go.mod h1:eliMa/PW+RDr2QLWRmLH1R1ZA4RInpmvOzDDXtaIZkc= +github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3 h1:Bmjk+DjIi3tTAU0wxGaFbfjGUqlxxSXARq9A96Kgoos= +github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm+qXFKs/xjSoWn30NrWBBvdTTQq+UjkhjEJHfSFA= +github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740 h1:FD4/ikKOFxwP8muWDypbmBWc634+YcAs3eBrYAmRdZY= +github.com/aristanetworks/goarista v0.0.0-20190325233358-a123909ec740/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= +github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= +github.com/aws/aws-sdk-go v1.30.9 h1:DntpBUKkchINPDbhEzDRin1eEn1TG9TZFlzWPf0i8to= +github.com/aws/aws-sdk-go v1.30.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/benbjohnson/clock v1.0.2 h1:Z0CN0Yb4ig9sGPXkvAQcGJfnrrMQ5QYLCMPRi9iD7YE= +github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bitly/go-hostpool v0.1.0 h1:XKmsF6k5el6xHG3WPJ8U0Ku/ye7njX7W81Ng7O2ioR0= +github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/caio/go-tdigest v2.3.0+incompatible h1:zP6nR0nTSUzlSqqr7F/LhslPlSZX/fZeGmgmwj2cxxY= +github.com/caio/go-tdigest v2.3.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= +github.com/cenkalti/backoff v2.0.0+incompatible h1:5IIPUHhlnUZbcHQsQou5k1Tn58nJkeJL9U+ig5CHJbY= +github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6 h1:57RI0wFkG/smvVTcz7F43+R0k+Hvci3jAVQF9lyMoOo= +github.com/cisco-ie/nx-telemetry-proto v0.0.0-20190531143454-82441e232cf6/go.mod h1:ugEfq4B8T8ciw/h5mCkgdiDRFS4CkqqhH2dymDB4knc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037 h1:Dbz60fpCq04vRxVVVJLbQuL0G7pRt0Gyo2BkozFc4SQ= +github.com/couchbase/go-couchbase v0.0.0-20180501122049-16db1f1fe037/go.mod h1:TWI8EKQMs5u5jLKW/tsb9VwauIrMIxQG1r5fMsswK5U= +github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530 h1:F8nmbiuX+gCz9xvWMi6Ak8HQntB4ATFXP46gaxifbp4= +github.com/couchbase/gomemcached v0.0.0-20180502221210-0da75df14530/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c= +github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a h1:Y5XsLCEhtEI8qbD9RP3Qlv5FXdTDHxZM9UPUnMRgBp8= +github.com/couchbase/goutils v0.0.0-20180530154633-e865a1461c8a/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4 h1:YcpmyvADGYw5LqMnHqSkyIELsHCGF6PkrmM31V8rF7o= +github.com/denisenkom/go-mssqldb v0.0.0-20190707035753-2be1aa521ff4/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= +github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible h1:357nGVUC8gSpeSc2Axup8HfrfTLLUfWfCsCUhiQSKIg= +github.com/docker/distribution v2.6.0-rc.1.0.20170726174610-edc3ab29cdff+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133 h1:Kus8nU6ctI/u/l86ljUJl6GpUtmO7gtD/krn4u5dr0M= +github.com/docker/docker v1.4.2-0.20180327123150-ed7b6428c133/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166 h1:KgEcrKF0NWi9GT/OvDp9ioXZIrHRbP8S5o+sot9gznQ= +github.com/docker/libnetwork v0.8.0-dev.2.0.20181012153825-d7b61745d166/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI= +github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg= +github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= +github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= +github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96 h1:YpooqMW354GG47PXNBiaCv6yCQizyP3MXD9NUPrCEQ8= +github.com/glinton/ping v0.1.4-0.20200311211934-5ac87da8cd96/go.mod h1:uY+1eqFUyotrQxF1wYFNtMeHp/swbYRsoGzfcPZ8x3o= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-redis/redis v6.12.0+incompatible h1:s+64XI+z/RXqGHz2fQSgRJOEwqqSXeX3dliF7iVkMbE= +github.com/go-redis/redis v6.12.0+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goburrow/modbus v0.1.0 h1:DejRZY73nEM6+bt5JSP6IsFolJ9dVcqxsYbpLbeW/ro= +github.com/goburrow/modbus v0.1.0/go.mod h1:Kx552D5rLIS8E7TyUwQ/UdHEqvX5T8tyiGBTlzMcZBg= +github.com/goburrow/serial v0.1.0 h1:v2T1SQa/dlUqQiYIT8+Cu7YolfqAi3K96UmhwYyuSrA= +github.com/goburrow/serial v0.1.0/go.mod h1:sAiqG0nRVswsm1C97xsttiYCzSLBmUZ/VSlVLZJ8haA= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gofrs/uuid v2.1.0+incompatible h1:8oEj3gioPmmDAOLQUZdnW+h4FZu9aSE/SQIas1E9pzA= +github.com/gofrs/uuid v2.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXgptLmNLwynMSHUmU6besqtiw= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0 h1:U0KvGD9CJIl1nbgu9yLsfWxMT6WqL8fG0IBB7RvOZZQ= +github.com/harlow/kinesis-consumer v0.3.1-0.20181230152818-2f58b136fee0/go.mod h1:dk23l2BruuUzRP8wbybQbPn3J7sZga2QHICCeaEy5rQ= +github.com/hashicorp/consul v1.2.1 h1:66MuuTfV4aOXTQM7cjAIKUWFOITSk4XZlMhE09ymVbg= +github.com/hashicorp/consul v1.2.1/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90 h1:VBj0QYQ0u2MCJzBfeYXGexnAl17GsH1yidnoxCqqD9E= +github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM= +github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.1 h1:mYs6SMzu72+90OcPa5wr3nfznA4Dw9UyR791ZFNOIf4= +github.com/hashicorp/serf v0.8.1/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/influxdata/go-syslog/v2 v2.0.1 h1:l44S4l4Q8MhGQcoOxJpbo+QQYxJqp0vdgIVHh4+DO0s= +github.com/influxdata/go-syslog/v2 v2.0.1/go.mod h1:hjvie1UTaD5E1fTnDmxaCw8RRDrT4Ve+XHr5O2dKSCo= +github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41 h1:HxQo1NpNXQDpvEBzthbQLmePvTLFTa5GzSFUjL03aEs= +github.com/influxdata/tail v1.0.1-0.20180327235535-c43482518d41/go.mod h1:xTFF2SILpIYc5N+Srb0d5qpx7d+f733nBrbasb13DtQ= +github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65 h1:vvyMtD5LTJc1W9sQKjDkAWdcg0478CszSdzlHtiAXCY= +github.com/influxdata/toml v0.0.0-20190415235208-270119a8ce65/go.mod h1:zApaNFpP/bTpQItGZNNUMISDMDAnTXu9UqJ4yT3ocz8= +github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8 h1:W2IgzRCb0L9VzMujq/QuTaZUKcH8096jWwP519mHN6Q= +github.com/influxdata/wlog v0.0.0-20160411224016-7c63b0a71ef8/go.mod h1:/2NMgWB1DHM1ti/gqhOlg+LJeBVk6FqR5aVGYY0hlwI= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.6.0+incompatible h1:bJeo4JdVbDAW8KB2m8XkFeo8CPipREoG37BwEoKGz+Q= +github.com/jackc/pgx v3.6.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4 h1:nwOc1YaOrYJ37sEBrtWZrdqzK22hiJs3GpDmP3sR2Yw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kardianos/service v1.0.0 h1:HgQS3mFfOlyntWX8Oke98JcJLqt1DBcHR4kxShpYef0= +github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= +github.com/karrick/godirwalk v1.12.0 h1:nkS4xxsjiZMvVlazd0mFyiwD4BR9f3m6LXGhM2TUx3Y= +github.com/karrick/godirwalk v1.12.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY= +github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee h1:MB75LRhfeLER2RF7neSVpYuX/lL8aPi3yPtv5vdOJmk= +github.com/kubernetes/apimachinery v0.0.0-20190119020841-d41becfba9ee/go.mod h1:Pe/YBTPc3vqoMkbuIWPH8CF9ehINdvNyS0dP3J6HC0s= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4= +github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U= +github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 h1:bCiVCRCs1Heq84lurVinUPy19keqGEe4jh5vtK37jcg= +github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= +github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA= +github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe h1:yMrL+YorbzaBpj/h3BbLMP+qeslPZYMbzcpHFBNy1Yk= +github.com/mdlayher/apcupsd v0.0.0-20190314144147-eb3dd99a75fe/go.mod h1:y3mw3VG+t0m20OMqpG8RQqw8cDXvShVb+L8Z8FEnebw= +github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0 h1:mpdLgm+brq10nI9zM1BpX1kpDbh3NLl3RSnVq6ZSkfg= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/multiplay/go-ts3 v1.0.0 h1:loxtEFqvYtpoGh1jOqEt6aDzctYuQsi3vb3dMpvWiWw= +github.com/multiplay/go-ts3 v1.0.0/go.mod h1:14S6cS3fLNT3xOytrA/DkRyAFNuQLMLEqOYAsf87IbQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= +github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.4 h1:BILRnsJ2Yb/fefiFbBWADpViGF69uh4sxe8poVDQ06g= +github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= +github.com/nats-io/nats.go v1.9.1 h1:ik3HbLhZ0YABLto7iX80pZLPw/6dx3T+++MZJwLnMrQ= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3 h1:6JrEfig+HzTH85yxzhSVbjHRJv9cn0p6n3IngIcM5/k= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0 h1:W8+lNIfAldCScGiikToSprbf3DCaMXk0VIM9l73BIpY= +github.com/newrelic/newrelic-telemetry-sdk-go v0.2.0/go.mod h1:G9MqE/cHGv3Hx3qpYhfuyFUsGx2DpVcGi1iJIqTg+JQ= +github.com/nsqio/go-nsq v1.0.7 h1:O0pIZJYTf+x7cZBA0UMY8WxFG79lYTURmWzAAh48ljY= +github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029 h1:lXQqyLroROhwR2Yq/kXbLzVecgmVeZh2TFLg6OxCd+w= +github.com/openconfig/gnmi v0.0.0-20180912164834-33a1865c3029/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/opentracing-go v1.0.2 h1:3jA2P6O1F9UOrWVpwrIo17pu01KWvNWg4X946/Y5Zwg= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go-opentracing v0.3.4 h1:x/pBv/5VJNWkcHF1G9xqhug8Iw7X1y1zOMzDmyuvP2g= +github.com/openzipkin/zipkin-go-opentracing v0.3.4/go.mod h1:js2AbwmHW0YD9DwIw2JhQWmbfFi/UnWyYwdVhqbCDOE= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw= +github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664 h1:gvolwzuDhul9qK6/oHqxCHD5TEYfsWNBGidOeG6kvpk= +github.com/safchain/ethtool v0.0.0-20200218184317-f459e2d13664/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shirou/gopsutil v2.20.2+incompatible h1:ucK79BhBpgqQxPASyS2cu9HX8cfDVljBN1WWFvbNvgY= +github.com/shirou/gopsutil v2.20.2+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114 h1:Pm6R878vxWWWR+Sa3ppsLce/Zq+JNTs6aVvRu13jv9A= +github.com/shopspring/decimal v0.0.0-20200105231215-408a2507e114/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/soniah/gosnmp v1.25.0 h1:0y8vpjD07NPmnT+wojnUrKkYLX9Fxw1jI4cGTumWugQ= +github.com/soniah/gosnmp v1.25.0/go.mod h1:8YvfZxH388NIIw2A+X5z2Oh97VcNhtmxDLt5QeUzVuQ= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8 h1:l6epF6yBwuejBfhGkM5m8VSNM/QAm7ApGyH35ehA7eQ= +github.com/streadway/amqp v0.0.0-20180528204448-e5adc2ada8b8/go.mod h1:1WNBiOZtZQLpVAyu0iTduoJL9hEsMloAK5XWrtW0xdY= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62 h1:Oj2e7Sae4XrOsk3ij21QjjEgAcVSeo9nkp0dI//cD2o= +github.com/tbrandon/mbserver v0.0.0-20170611213546-993e1772cc62/go.mod h1:qUzPVlSj2UgxJkVbH0ZwuuiR46U8RBMDT5KLY78Ifpw= +github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00 h1:mujcChM89zOHwgZBBNr5WZ77mBXP1yR+gLThGCYZgAg= +github.com/tedsuo/ifrit v0.0.0-20191009134036-9a97d0632f00/go.mod h1:eyZnKCc955uh98WQvzOm0dgAeLnf2O0Rz0LPoC5ze+0= +github.com/tidwall/gjson v1.3.0 h1:kfpsw1W3trbg4Xm6doUtqSl9+LhLB6qJ9PkltVAQZYs= +github.com/tidwall/gjson v1.3.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= +github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc h1:R83G5ikgLMxrBvLh22JhdfI8K6YXEPHx5P03Uu3DRs4= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vjeantet/grok v1.0.0 h1:uxMqatJP6MOFXsj6C1tZBnqqAThQEeqnizUZ48gSJQQ= +github.com/vjeantet/grok v1.0.0/go.mod h1:/FWYEVYekkm+2VjcFmO9PufDU5FgXHUz9oy2EGqmQBo= +github.com/vmware/govmomi v0.19.0 h1:CR6tEByWCPOnRoRyhLzuHaU+6o2ybF3qufNRWS/MGrY= +github.com/vmware/govmomi v0.19.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/wavefronthq/wavefront-sdk-go v0.9.2 h1:/LvWgZYNjHFUg+ZUX+qv+7e+M8sEMi0lM15zPp681Gk= +github.com/wavefronthq/wavefront-sdk-go v0.9.2/go.mod h1:hQI6y8M9OtTCtc0xdwh+dCER4osxXdEAeCpacjpDZEU= +github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf h1:TOV5PC6fIWwFOFra9xJfRXZcL2pLhMI8oNuDugNxg9Q= +github.com/wvanbergen/kafka v0.0.0-20171203153745-e2edea948ddf/go.mod h1:nxx7XRXbR9ykhnC8lXqQyJS0rfvJGxKyKw/sT1YOttg= +github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a h1:ILoU84rj4AQ3q6cjQvtb9jBjx4xzR/Riq/zYhmDQiOk= +github.com/wvanbergen/kazoo-go v0.0.0-20180202103751-f72d8611297a/go.mod h1:vQQATAGxVK20DC1rRubTJbZDDhhpA4QfU02pMdPxGO4= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4 h1:f6CCNiTjQZ0uWK4jPwhwYB8QIGGfn0ssD9kVzRUUUpk= +github.com/yuin/gopher-lua v0.0.0-20180630135845-46796da1b0b4/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= +go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72 h1:+ELyKg6m8UBf0nPFSqD0mi7zUfwPyXo23HNjMnXPz7w= +golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191003212358-c178f38b412c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200317043434-63da46f3035e h1:8ogAbHWoJTPepnVbNRqXLOpzMkl0rtRsM7crbflc4XM= +golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.zx2c4.com/wireguard v0.0.20200121 h1:vcswa5Q6f+sylDfjqyrVNNrjsFUUbPsgAQTBCAg/Qf8= +golang.zx2c4.com/wireguard v0.0.20200121/go.mod h1:P2HsVp8SKwZEufsnezXZA4GRX/T49/HlU7DGuelXsU4= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4 h1:KTi97NIQGgSMaN0v/oxniJV0MEzfzmrDUOAWxombQVc= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20200205215550-e35592f146e4/go.mod h1:UdS9frhv65KTfwxME1xE8+rHYoFpbm36gOud1GhBe9c= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.2 h1:4r+yNT0+8SWcOkXP+63H2zQbN+USnC73cjGUxnDF94Q= +gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24 h1:IGPykv426z7LZSVPlaPufOyphngM4at5uZ7x5alaFvE= +google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fatih/pool.v2 v2.0.0 h1:xIFeWtxifuQJGk/IEPKsTduEKcKvPmhoiVDGpC40nKg= +gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gorethink/gorethink.v3 v3.0.5 h1:e2Uc/Xe+hpcVQFsj6MuHlYog3r0JYpnTzwDj/y2O4MU= +gopkg.in/gorethink/gorethink.v3 v3.0.5/go.mod h1:+3yIIHJUGMBK+wyPH+iN5TP+88ikFDfZdqTlK3Y9q8I= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= +gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= +gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= +gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/ldap.v3 v3.1.0 h1:DIDWEjI7vQWREh0S8X5/NFPCZ3MCVd55LmXKPW4XLGE= +gopkg.in/ldap.v3 v3.1.0/go.mod h1:dQjCc0R0kfyFjIlWNMH1DORwUASZyDxo2Ry1B51dXaQ= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU= +gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/olivere/elastic.v5 v5.0.70 h1:DqFG2Odzs74JCz6SssgJjd6qpGnsOAzNc7+l5EnvsnE= +gopkg.in/olivere/elastic.v5 v5.0.70/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/apimachinery v0.17.1 h1:zUjS3szTxoUjTDYNvdFkYt2uMEXLcthcbp+7uZvWhYM= +k8s.io/apimachinery v0.17.1/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/input.go b/input.go index f7e1493e2..08cfd75b9 100644 --- a/input.go +++ b/input.go @@ -1,11 +1,7 @@ package telegraf type Input interface { - // SampleConfig returns the default configuration of the Input - SampleConfig() string - - // Description returns a one-sentence description on the Input - Description() string + PluginDescriber // Gather takes in an accumulator and adds the metrics that the Input // gathers. This is called every "interval" @@ -13,17 +9,10 @@ type Input interface { } type ServiceInput interface { - // SampleConfig returns the default configuration of the Input - SampleConfig() string + Input - // Description returns a one-sentence description on the Input - Description() string - - // Gather takes in an accumulator and adds the metrics that the Input - // gathers. This is called every "interval" - Gather(Accumulator) error - - // Start starts the ServiceInput's service, whatever that may be + // Start the ServiceInput. The Accumulator may be retained and used until + // Stop returns. Start(Accumulator) error // Stop stops the services and closes any necessary channels and connections diff --git a/internal/buffer/buffer.go b/internal/buffer/buffer.go deleted file mode 100644 index cdc81fed3..000000000 --- a/internal/buffer/buffer.go +++ /dev/null @@ -1,76 +0,0 @@ -package buffer - -import ( - "sync" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/selfstat" -) - -var ( - MetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{}) - MetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{}) -) - -// Buffer is an object for storing metrics in a circular buffer. -type Buffer struct { - buf chan telegraf.Metric - - mu sync.Mutex -} - -// NewBuffer returns a Buffer -// size is the maximum number of metrics that Buffer will cache. If Add is -// called when the buffer is full, then the oldest metric(s) will be dropped. -func NewBuffer(size int) *Buffer { - return &Buffer{ - buf: make(chan telegraf.Metric, size), - } -} - -// IsEmpty returns true if Buffer is empty. -func (b *Buffer) IsEmpty() bool { - return len(b.buf) == 0 -} - -// Len returns the current length of the buffer. -func (b *Buffer) Len() int { - return len(b.buf) -} - -// Add adds metrics to the buffer. -func (b *Buffer) Add(metrics ...telegraf.Metric) { - for i, _ := range metrics { - MetricsWritten.Incr(1) - select { - case b.buf <- metrics[i]: - default: - b.mu.Lock() - MetricsDropped.Incr(1) - <-b.buf - b.buf <- metrics[i] - b.mu.Unlock() - } - } -} - -// Batch returns a batch of metrics of size batchSize. -// the batch will be of maximum length batchSize. It can be less than batchSize, -// if the length of Buffer is less than batchSize. -func (b *Buffer) Batch(batchSize int) []telegraf.Metric { - b.mu.Lock() - n := min(len(b.buf), batchSize) - out := make([]telegraf.Metric, n) - for i := 0; i < n; i++ { - out[i] = <-b.buf - } - b.mu.Unlock() - return out -} - -func min(a, b int) int { - if b < a { - return b - } - return a -} diff --git a/internal/buffer/buffer_test.go b/internal/buffer/buffer_test.go deleted file mode 100644 index f84d8c66d..000000000 --- a/internal/buffer/buffer_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package buffer - -import ( - "testing" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" -) - -var metricList = []telegraf.Metric{ - testutil.TestMetric(2, "mymetric1"), - testutil.TestMetric(1, "mymetric2"), - testutil.TestMetric(11, "mymetric3"), - testutil.TestMetric(15, "mymetric4"), - testutil.TestMetric(8, "mymetric5"), -} - -func BenchmarkAddMetrics(b *testing.B) { - buf := NewBuffer(10000) - m := testutil.TestMetric(1, "mymetric") - for n := 0; n < b.N; n++ { - buf.Add(m) - } -} - -func TestNewBufferBasicFuncs(t *testing.T) { - b := NewBuffer(10) - MetricsDropped.Set(0) - MetricsWritten.Set(0) - - assert.True(t, b.IsEmpty()) - assert.Zero(t, b.Len()) - assert.Zero(t, MetricsDropped.Get()) - assert.Zero(t, MetricsWritten.Get()) - - m := testutil.TestMetric(1, "mymetric") - b.Add(m) - assert.False(t, b.IsEmpty()) - assert.Equal(t, b.Len(), 1) - assert.Equal(t, int64(0), MetricsDropped.Get()) - assert.Equal(t, int64(1), MetricsWritten.Get()) - - b.Add(metricList...) - assert.False(t, b.IsEmpty()) - assert.Equal(t, b.Len(), 6) - assert.Equal(t, int64(0), MetricsDropped.Get()) - assert.Equal(t, int64(6), MetricsWritten.Get()) -} - -func TestDroppingMetrics(t *testing.T) { - b := NewBuffer(10) - MetricsDropped.Set(0) - MetricsWritten.Set(0) - - // Add up to the size of the buffer - b.Add(metricList...) - b.Add(metricList...) - assert.False(t, b.IsEmpty()) - assert.Equal(t, b.Len(), 10) - assert.Equal(t, int64(0), MetricsDropped.Get()) - assert.Equal(t, int64(10), MetricsWritten.Get()) - - // Add 5 more and verify they were dropped - b.Add(metricList...) - assert.False(t, b.IsEmpty()) - assert.Equal(t, b.Len(), 10) - assert.Equal(t, int64(5), MetricsDropped.Get()) - assert.Equal(t, int64(15), MetricsWritten.Get()) -} - -func TestGettingBatches(t *testing.T) { - b := NewBuffer(20) - MetricsDropped.Set(0) - MetricsWritten.Set(0) - - // Verify that the buffer returned is smaller than requested when there are - // not as many items as requested. - b.Add(metricList...) - batch := b.Batch(10) - assert.Len(t, batch, 5) - - // Verify that the buffer is now empty - assert.True(t, b.IsEmpty()) - assert.Zero(t, b.Len()) - assert.Zero(t, MetricsDropped.Get()) - assert.Equal(t, int64(5), MetricsWritten.Get()) - - // Verify that the buffer returned is not more than the size requested - b.Add(metricList...) - batch = b.Batch(3) - assert.Len(t, batch, 3) - - // Verify that buffer is not empty - assert.False(t, b.IsEmpty()) - assert.Equal(t, b.Len(), 2) - assert.Equal(t, int64(0), MetricsDropped.Get()) - assert.Equal(t, int64(10), MetricsWritten.Get()) -} diff --git a/internal/choice/choice.go b/internal/choice/choice.go new file mode 100644 index 000000000..33c26096d --- /dev/null +++ b/internal/choice/choice.go @@ -0,0 +1,36 @@ +// Package choice provides basic functions for working with +// plugin options that must be one of several values. +package choice + +import "fmt" + +// Contains return true if the choice in the list of choices. +func Contains(choice string, choices []string) bool { + for _, item := range choices { + if item == choice { + return true + } + } + return false +} + +// CheckSContains returns an error if a choice is not one of +// the available choices. +func Check(choice string, available []string) error { + if !Contains(choice, available) { + return fmt.Errorf("unknown choice %s", choice) + } + return nil +} + +// CheckSliceContains returns an error if the choices is not a subset of +// available. +func CheckSlice(choices, available []string) error { + for _, choice := range choices { + err := Check(choice, available) + if err != nil { + return err + } + } + return nil +} diff --git a/internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf b/internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf deleted file mode 100644 index aee9abdfe..000000000 --- a/internal/config/testdata/subconfig/..4984_10_04_08_28_06.119/invalid-config.conf +++ /dev/null @@ -1,4 +0,0 @@ -# This invalid config file should be skipped during testing -# as it is an ..data folder - -[[outputs.influxdb diff --git a/internal/content_coding.go b/internal/content_coding.go new file mode 100644 index 000000000..daefa20ee --- /dev/null +++ b/internal/content_coding.go @@ -0,0 +1,182 @@ +package internal + +import ( + "bufio" + "bytes" + "compress/gzip" + "errors" + "io" +) + +// NewStreamContentDecoder returns a reader that will decode the stream +// according to the encoding type. +func NewStreamContentDecoder(encoding string, r io.Reader) (io.Reader, error) { + switch encoding { + case "gzip": + return NewGzipReader(r) + case "identity", "": + return r, nil + default: + return nil, errors.New("invalid value for content_encoding") + } +} + +// GzipReader is similar to gzip.Reader but reads only a single gzip stream per read. +type GzipReader struct { + r io.Reader + z *gzip.Reader + endOfStream bool +} + +func NewGzipReader(r io.Reader) (io.Reader, error) { + // We need a read that implements ByteReader in order to line up the next + // stream. + br := bufio.NewReader(r) + + // Reads the first gzip stream header. + z, err := gzip.NewReader(br) + if err != nil { + return nil, err + } + + // Prevent future calls to Read from reading the following gzip header. + z.Multistream(false) + + return &GzipReader{r: br, z: z}, nil +} + +func (r *GzipReader) Read(b []byte) (int, error) { + if r.endOfStream { + // Reads the next gzip header and prepares for the next stream. + err := r.z.Reset(r.r) + if err != nil { + return 0, err + } + r.z.Multistream(false) + r.endOfStream = false + } + + n, err := r.z.Read(b) + + // Since multistream is disabled, io.EOF indicates the end of the gzip + // sequence. On the next read we must read the next gzip header. + if err == io.EOF { + r.endOfStream = true + return n, nil + } + return n, err + +} + +// NewContentEncoder returns a ContentEncoder for the encoding type. +func NewContentEncoder(encoding string) (ContentEncoder, error) { + switch encoding { + case "gzip": + return NewGzipEncoder() + case "identity", "": + return NewIdentityEncoder(), nil + default: + return nil, errors.New("invalid value for content_encoding") + } +} + +// NewContentDecoder returns a ContentDecoder for the encoding type. +func NewContentDecoder(encoding string) (ContentDecoder, error) { + switch encoding { + case "gzip": + return NewGzipDecoder() + case "identity", "": + return NewIdentityDecoder(), nil + default: + return nil, errors.New("invalid value for content_encoding") + } +} + +// ContentEncoder applies a wrapper encoding to byte buffers. +type ContentEncoder interface { + Encode([]byte) ([]byte, error) +} + +// GzipEncoder compresses the buffer using gzip at the default level. +type GzipEncoder struct { + writer *gzip.Writer + buf *bytes.Buffer +} + +func NewGzipEncoder() (*GzipEncoder, error) { + var buf bytes.Buffer + return &GzipEncoder{ + writer: gzip.NewWriter(&buf), + buf: &buf, + }, nil +} + +func (e *GzipEncoder) Encode(data []byte) ([]byte, error) { + e.buf.Reset() + e.writer.Reset(e.buf) + + _, err := e.writer.Write(data) + if err != nil { + return nil, err + } + err = e.writer.Close() + if err != nil { + return nil, err + } + return e.buf.Bytes(), nil +} + +// IdentityEncoder is a null encoder that applies no transformation. +type IdentityEncoder struct{} + +func NewIdentityEncoder() *IdentityEncoder { + return &IdentityEncoder{} +} + +func (*IdentityEncoder) Encode(data []byte) ([]byte, error) { + return data, nil +} + +// ContentDecoder removes a wrapper encoding from byte buffers. +type ContentDecoder interface { + Decode([]byte) ([]byte, error) +} + +// GzipDecoder decompresses buffers with gzip compression. +type GzipDecoder struct { + reader *gzip.Reader + buf *bytes.Buffer +} + +func NewGzipDecoder() (*GzipDecoder, error) { + return &GzipDecoder{ + reader: new(gzip.Reader), + buf: new(bytes.Buffer), + }, nil +} + +func (d *GzipDecoder) Decode(data []byte) ([]byte, error) { + d.reader.Reset(bytes.NewBuffer(data)) + d.buf.Reset() + + _, err := d.buf.ReadFrom(d.reader) + if err != nil && err != io.EOF { + return nil, err + } + err = d.reader.Close() + if err != nil { + return nil, err + } + return d.buf.Bytes(), nil +} + +// IdentityDecoder is a null decoder that returns the input. +type IdentityDecoder struct{} + +func NewIdentityDecoder() *IdentityDecoder { + return &IdentityDecoder{} +} + +func (*IdentityDecoder) Decode(data []byte) ([]byte, error) { + return data, nil +} diff --git a/internal/content_coding_test.go b/internal/content_coding_test.go new file mode 100644 index 000000000..85496df59 --- /dev/null +++ b/internal/content_coding_test.go @@ -0,0 +1,94 @@ +package internal + +import ( + "bytes" + "io/ioutil" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGzipEncodeDecode(t *testing.T) { + enc, err := NewGzipEncoder() + require.NoError(t, err) + dec, err := NewGzipDecoder() + require.NoError(t, err) + + payload, err := enc.Encode([]byte("howdy")) + require.NoError(t, err) + + actual, err := dec.Decode(payload) + require.NoError(t, err) + + require.Equal(t, "howdy", string(actual)) +} + +func TestGzipReuse(t *testing.T) { + enc, err := NewGzipEncoder() + require.NoError(t, err) + dec, err := NewGzipDecoder() + require.NoError(t, err) + + payload, err := enc.Encode([]byte("howdy")) + require.NoError(t, err) + + actual, err := dec.Decode(payload) + require.NoError(t, err) + + require.Equal(t, "howdy", string(actual)) + + payload, err = enc.Encode([]byte("doody")) + require.NoError(t, err) + + actual, err = dec.Decode(payload) + require.NoError(t, err) + + require.Equal(t, "doody", string(actual)) +} + +func TestIdentityEncodeDecode(t *testing.T) { + enc := NewIdentityEncoder() + dec := NewIdentityDecoder() + + payload, err := enc.Encode([]byte("howdy")) + require.NoError(t, err) + + actual, err := dec.Decode(payload) + require.NoError(t, err) + + require.Equal(t, "howdy", string(actual)) +} + +func TestStreamIdentityDecode(t *testing.T) { + var r bytes.Buffer + n, err := r.Write([]byte("howdy")) + require.NoError(t, err) + require.Equal(t, 5, n) + + dec, err := NewStreamContentDecoder("identity", &r) + require.NoError(t, err) + + data, err := ioutil.ReadAll(dec) + require.NoError(t, err) + + require.Equal(t, []byte("howdy"), data) +} + +func TestStreamGzipDecode(t *testing.T) { + enc, err := NewGzipEncoder() + require.NoError(t, err) + written, err := enc.Encode([]byte("howdy")) + require.NoError(t, err) + + w := bytes.NewBuffer(written) + + dec, err := NewStreamContentDecoder("gzip", w) + require.NoError(t, err) + + b := make([]byte, 10) + n, err := dec.Read(b) + require.NoError(t, err) + require.Equal(t, 5, n) + + require.Equal(t, []byte("howdy"), b[:n]) +} diff --git a/internal/docker/docker.go b/internal/docker/docker.go new file mode 100644 index 000000000..1808944ae --- /dev/null +++ b/internal/docker/docker.go @@ -0,0 +1,36 @@ +package docker + +import "strings" + +// Adapts some of the logic from the actual Docker library's image parsing +// routines: +// https://github.com/docker/distribution/blob/release/2.7/reference/normalize.go +func ParseImage(image string) (string, string) { + domain := "" + remainder := "" + + i := strings.IndexRune(image, '/') + + if i == -1 || (!strings.ContainsAny(image[:i], ".:") && image[:i] != "localhost") { + remainder = image + } else { + domain, remainder = image[:i], image[i+1:] + } + + imageName := "" + imageVersion := "unknown" + + i = strings.LastIndex(remainder, ":") + if i > -1 { + imageVersion = remainder[i+1:] + imageName = remainder[:i] + } else { + imageName = remainder + } + + if domain != "" { + imageName = domain + "/" + imageName + } + + return imageName, imageVersion +} diff --git a/internal/docker/docker_test.go b/internal/docker/docker_test.go new file mode 100644 index 000000000..14591ab87 --- /dev/null +++ b/internal/docker/docker_test.go @@ -0,0 +1,59 @@ +package docker_test + +import ( + "testing" + + "github.com/influxdata/telegraf/internal/docker" + "github.com/stretchr/testify/require" +) + +func TestParseImage(t *testing.T) { + tests := []struct { + image string + parsedName string + parsedVersion string + }{ + { + image: "postgres", + parsedName: "postgres", + parsedVersion: "unknown", + }, + { + image: "postgres:latest", + parsedName: "postgres", + parsedVersion: "latest", + }, + { + image: "coreos/etcd", + parsedName: "coreos/etcd", + parsedVersion: "unknown", + }, + { + image: "coreos/etcd:latest", + parsedName: "coreos/etcd", + parsedVersion: "latest", + }, + { + image: "quay.io/postgres", + parsedName: "quay.io/postgres", + parsedVersion: "unknown", + }, + { + image: "quay.io:4443/coreos/etcd", + parsedName: "quay.io:4443/coreos/etcd", + parsedVersion: "unknown", + }, + { + image: "quay.io:4443/coreos/etcd:latest", + parsedName: "quay.io:4443/coreos/etcd", + parsedVersion: "latest", + }, + } + for _, tt := range tests { + t.Run("parse name "+tt.image, func(t *testing.T) { + imageName, imageVersion := docker.ParseImage(tt.image) + require.Equal(t, tt.parsedName, imageName) + require.Equal(t, tt.parsedVersion, imageVersion) + }) + } +} diff --git a/internal/exec.go b/internal/exec.go new file mode 100644 index 000000000..795822f46 --- /dev/null +++ b/internal/exec.go @@ -0,0 +1,30 @@ +package internal + +import ( + "bytes" + "os/exec" + "time" +) + +// CombinedOutputTimeout runs the given command with the given timeout and +// returns the combined output of stdout and stderr. +// If the command times out, it attempts to kill the process. +func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) { + var b bytes.Buffer + c.Stdout = &b + c.Stderr = &b + if err := c.Start(); err != nil { + return nil, err + } + err := WaitTimeout(c, timeout) + return b.Bytes(), err +} + +// RunTimeout runs the given command with the given timeout. +// If the command times out, it attempts to kill the process. +func RunTimeout(c *exec.Cmd, timeout time.Duration) error { + if err := c.Start(); err != nil { + return err + } + return WaitTimeout(c, timeout) +} diff --git a/internal/exec_unix.go b/internal/exec_unix.go new file mode 100644 index 000000000..d41aae825 --- /dev/null +++ b/internal/exec_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package internal + +import ( + "log" + "os/exec" + "syscall" + "time" +) + +// KillGrace is the amount of time we allow a process to shutdown before +// sending a SIGKILL. +const KillGrace = 5 * time.Second + +// WaitTimeout waits for the given command to finish with a timeout. +// It assumes the command has already been started. +// If the command times out, it attempts to kill the process. +func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { + var kill *time.Timer + term := time.AfterFunc(timeout, func() { + err := c.Process.Signal(syscall.SIGTERM) + if err != nil { + log.Printf("E! [agent] Error terminating process: %s", err) + return + } + + kill = time.AfterFunc(KillGrace, func() { + err := c.Process.Kill() + if err != nil { + log.Printf("E! [agent] Error killing process: %s", err) + return + } + }) + }) + + err := c.Wait() + + // Shutdown all timers + if kill != nil { + kill.Stop() + } + termSent := !term.Stop() + + // If the process exited without error treat it as success. This allows a + // process to do a clean shutdown on signal. + if err == nil { + return nil + } + + // If SIGTERM was sent then treat any process error as a timeout. + if termSent { + return TimeoutErr + } + + // Otherwise there was an error unrelated to termination. + return err +} diff --git a/internal/exec_windows.go b/internal/exec_windows.go new file mode 100644 index 000000000..f010bdd96 --- /dev/null +++ b/internal/exec_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package internal + +import ( + "log" + "os/exec" + "time" +) + +// WaitTimeout waits for the given command to finish with a timeout. +// It assumes the command has already been started. +// If the command times out, it attempts to kill the process. +func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { + timer := time.AfterFunc(timeout, func() { + err := c.Process.Kill() + if err != nil { + log.Printf("E! [agent] Error killing process: %s", err) + return + } + }) + + err := c.Wait() + + // Shutdown all timers + termSent := !timer.Stop() + + // If the process exited without error treat it as success. This allows a + // process to do a clean shutdown on signal. + if err == nil { + return nil + } + + // If SIGTERM was sent then treat any process error as a timeout. + if termSent { + return TimeoutErr + } + + // Otherwise there was an error unrelated to termination. + return err +} diff --git a/internal/globpath/globpath.go b/internal/globpath/globpath.go index 6067f65b2..d4e7ffd87 100644 --- a/internal/globpath/globpath.go +++ b/internal/globpath/globpath.go @@ -1,110 +1,116 @@ package globpath import ( - "fmt" "os" "path/filepath" "strings" "github.com/gobwas/glob" + "github.com/karrick/godirwalk" ) -var sepStr = fmt.Sprintf("%v", string(os.PathSeparator)) - type GlobPath struct { path string hasMeta bool - hasSuperMeta bool + HasSuperMeta bool + rootGlob string g glob.Glob - root string } func Compile(path string) (*GlobPath, error) { out := GlobPath{ hasMeta: hasMeta(path), - hasSuperMeta: hasSuperMeta(path), - path: path, + HasSuperMeta: hasSuperMeta(path), + path: filepath.FromSlash(path), } // if there are no glob meta characters in the path, don't bother compiling - // a glob object or finding the root directory. (see short-circuit in Match) - if !out.hasMeta || !out.hasSuperMeta { + // a glob object + if !out.hasMeta || !out.HasSuperMeta { return &out, nil } + // find the root elements of the object path, the entry point for recursion + // when you have a super-meta in your path (which are : + // glob(/your/expression/until/first/star/of/super-meta)) + out.rootGlob = path[:strings.Index(path, "**")+1] var err error if out.g, err = glob.Compile(path, os.PathSeparator); err != nil { return nil, err } - // Get the root directory for this filepath - out.root = findRootDir(path) return &out, nil } -func (g *GlobPath) Match() map[string]os.FileInfo { +// Match returns all files matching the expression. +// If it's a static path, returns path. +// All returned path will have the host platform separator. +func (g *GlobPath) Match() []string { if !g.hasMeta { - out := make(map[string]os.FileInfo) - info, err := os.Stat(g.path) - if err == nil { - out[g.path] = info - } - return out + return []string{g.path} } - if !g.hasSuperMeta { - out := make(map[string]os.FileInfo) + if !g.HasSuperMeta { files, _ := filepath.Glob(g.path) - for _, file := range files { - info, err := os.Stat(file) - if err == nil { - out[file] = info - } - } - return out + return files } - return walkFilePath(g.root, g.g) -} - -// walk the filepath from the given root and return a list of files that match -// the given glob. -func walkFilePath(root string, g glob.Glob) map[string]os.FileInfo { - matchedFiles := make(map[string]os.FileInfo) - walkfn := func(path string, info os.FileInfo, _ error) error { - if g.Match(path) { - matchedFiles[path] = info + roots, err := filepath.Glob(g.rootGlob) + if err != nil { + return []string{} + } + out := []string{} + walkfn := func(path string, _ *godirwalk.Dirent) error { + if g.g.Match(path) { + out = append(out, path) } return nil - } - filepath.Walk(root, walkfn) - return matchedFiles -} -// find the root dir of the given path (could include globs). -// ie: -// /var/log/telegraf.conf -> /var/log -// /home/** -> /home -// /home/*/** -> /home -// /lib/share/*/*/**.txt -> /lib/share -func findRootDir(path string) string { - pathItems := strings.Split(path, sepStr) - out := sepStr - for i, item := range pathItems { - if i == len(pathItems)-1 { - break - } - if item == "" { + } + for _, root := range roots { + fileinfo, err := os.Stat(root) + if err != nil { continue } - if hasMeta(item) { - break + if !fileinfo.IsDir() { + if g.MatchString(root) { + out = append(out, root) + } + continue } - out += item + sepStr - } - if out != "/" { - out = strings.TrimSuffix(out, "/") + godirwalk.Walk(root, &godirwalk.Options{ + Callback: walkfn, + Unsorted: true, + }) } return out } +// MatchString tests the path string against the glob. The path should contain +// the host platform separator. +func (g *GlobPath) MatchString(path string) bool { + if !g.HasSuperMeta { + res, _ := filepath.Match(g.path, path) + return res + } + return g.g.Match(path) +} + +// GetRoots returns a list of files and directories which should be optimal +// prefixes of matching files when you have a super-meta in your expression : +// - any directory under these roots may contain a matching file +// - no file outside of these roots can match the pattern +// Note that it returns both files and directories. +// All returned path will have the host platform separator. +func (g *GlobPath) GetRoots() []string { + if !g.hasMeta { + return []string{g.path} + } + if !g.HasSuperMeta { + matches, _ := filepath.Glob(g.path) + return matches + } + roots, _ := filepath.Glob(g.rootGlob) + return roots +} + // hasMeta reports whether path contains any magic glob characters. func hasMeta(path string) bool { return strings.IndexAny(path, "*?[") >= 0 diff --git a/internal/globpath/globpath_test.go b/internal/globpath/globpath_test.go index 20bfbcbb9..60562d8f8 100644 --- a/internal/globpath/globpath_test.go +++ b/internal/globpath/globpath_test.go @@ -1,12 +1,10 @@ package globpath import ( - "os" "runtime" "strings" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -29,31 +27,32 @@ func TestCompileAndMatch(t *testing.T) { require.NoError(t, err) matches := g1.Match() - assert.Len(t, matches, 6) + require.Len(t, matches, 6) matches = g2.Match() - assert.Len(t, matches, 2) + require.Len(t, matches, 2) matches = g3.Match() - assert.Len(t, matches, 1) + require.Len(t, matches, 1) matches = g4.Match() - assert.Len(t, matches, 0) + require.Len(t, matches, 1) matches = g5.Match() - assert.Len(t, matches, 0) + require.Len(t, matches, 0) } -func TestFindRootDir(t *testing.T) { +func TestRootGlob(t *testing.T) { + dir := getTestdataDir() tests := []struct { input string output string }{ - {"/var/log/telegraf.conf", "/var/log"}, - {"/home/**", "/home"}, - {"/home/*/**", "/home"}, - {"/lib/share/*/*/**.txt", "/lib/share"}, + {dir + "/**", dir + "/*"}, + {dir + "/nested?/**", dir + "/nested?/*"}, + {dir + "/ne**/nest*", dir + "/ne*"}, + {dir + "/nested?/*", ""}, } for _, test := range tests { - actual := findRootDir(test.input) - assert.Equal(t, test.output, actual) + actual, _ := Compile(test.input) + require.Equal(t, actual.rootGlob, test.output) } } @@ -64,7 +63,7 @@ func TestFindNestedTextFile(t *testing.T) { require.NoError(t, err) matches := g1.Match() - assert.Len(t, matches, 1) + require.Len(t, matches, 1) } func getTestdataDir() string { @@ -75,10 +74,10 @@ func getTestdataDir() string { func TestMatch_ErrPermission(t *testing.T) { tests := []struct { input string - expected map[string]os.FileInfo + expected []string }{ - {"/root/foo", map[string]os.FileInfo{}}, - {"/root/f*", map[string]os.FileInfo{}}, + {"/root/foo", []string{"/root/foo"}}, + {"/root/f*", []string(nil)}, } for _, test := range tests { @@ -88,3 +87,14 @@ func TestMatch_ErrPermission(t *testing.T) { require.Equal(t, test.expected, actual) } } + +func TestWindowsSeparator(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("Skipping Windows only test") + } + + glob, err := Compile("testdata/nested1") + require.NoError(t, err) + ok := glob.MatchString("testdata\\nested1") + require.True(t, ok) +} diff --git a/internal/goplugin/noplugin.go b/internal/goplugin/noplugin.go new file mode 100644 index 000000000..23d8634c4 --- /dev/null +++ b/internal/goplugin/noplugin.go @@ -0,0 +1,9 @@ +// +build !goplugin + +package goplugin + +import "errors" + +func LoadExternalPlugins(rootDir string) error { + return errors.New("go plugin support is not enabled") +} diff --git a/internal/goplugin/plugin.go b/internal/goplugin/plugin.go new file mode 100644 index 000000000..7e58ec32e --- /dev/null +++ b/internal/goplugin/plugin.go @@ -0,0 +1,42 @@ +// +build goplugin + +package goplugin + +import ( + "fmt" + "os" + "path" + "path/filepath" + "plugin" + "strings" +) + +// loadExternalPlugins loads external plugins from shared libraries (.so, .dll, etc.) +// in the specified directory. +func LoadExternalPlugins(rootDir string) error { + return filepath.Walk(rootDir, func(pth string, info os.FileInfo, err error) error { + // Stop if there was an error. + if err != nil { + return err + } + + // Ignore directories. + if info.IsDir() { + return nil + } + + // Ignore files that aren't shared libraries. + ext := strings.ToLower(path.Ext(pth)) + if ext != ".so" && ext != ".dll" { + return nil + } + + // Load plugin. + _, err = plugin.Open(pth) + if err != nil { + return fmt.Errorf("error loading %s: %s", pth, err) + } + + return nil + }) +} diff --git a/internal/http.go b/internal/http.go new file mode 100644 index 000000000..04b8a9368 --- /dev/null +++ b/internal/http.go @@ -0,0 +1,108 @@ +package internal + +import ( + "crypto/subtle" + "net" + "net/http" + "net/url" +) + +type BasicAuthErrorFunc func(rw http.ResponseWriter) + +// AuthHandler returns a http handler that requires HTTP basic auth +// credentials to match the given username and password. +func AuthHandler(username, password, realm string, onError BasicAuthErrorFunc) func(h http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + return &basicAuthHandler{ + username: username, + password: password, + realm: realm, + onError: onError, + next: h, + } + } +} + +type basicAuthHandler struct { + username string + password string + realm string + onError BasicAuthErrorFunc + next http.Handler +} + +func (h *basicAuthHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if h.username != "" || h.password != "" { + reqUsername, reqPassword, ok := req.BasicAuth() + if !ok || + subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.username)) != 1 || + subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.password)) != 1 { + + rw.Header().Set("WWW-Authenticate", "Basic realm=\""+h.realm+"\"") + h.onError(rw) + http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) + return + } + } + + h.next.ServeHTTP(rw, req) +} + +// ErrorFunc is a callback for writing an error response. +type ErrorFunc func(rw http.ResponseWriter, code int) + +// IPRangeHandler returns a http handler that requires the remote address to be +// in the specified network. +func IPRangeHandler(network []*net.IPNet, onError ErrorFunc) func(h http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + return &ipRangeHandler{ + network: network, + onError: onError, + next: h, + } + } +} + +type ipRangeHandler struct { + network []*net.IPNet + onError ErrorFunc + next http.Handler +} + +func (h *ipRangeHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + if len(h.network) == 0 { + h.next.ServeHTTP(rw, req) + return + } + + remoteIPString, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + h.onError(rw, http.StatusForbidden) + return + } + + remoteIP := net.ParseIP(remoteIPString) + if remoteIP == nil { + h.onError(rw, http.StatusForbidden) + return + } + + for _, net := range h.network { + if net.Contains(remoteIP) { + h.next.ServeHTTP(rw, req) + return + } + } + + h.onError(rw, http.StatusForbidden) +} + +func OnClientError(client *http.Client, err error) { + // Close connection after a timeout error. If this is a HTTP2 + // connection this ensures that next interval a new connection will be + // used and name lookup will be performed. + // https://github.com/golang/go/issues/36026 + if err, ok := err.(*url.Error); ok && err.Timeout() { + client.CloseIdleConnections() + } +} diff --git a/internal/internal.go b/internal/internal.go index adc4df820..777128f66 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -3,17 +3,24 @@ package internal import ( "bufio" "bytes" - "crypto/rand" + "compress/gzip" + "context" "errors" - "log" - "math/big" + "fmt" + "io" + "math" + "math/rand" "os" "os/exec" + "runtime" "strconv" "strings" + "sync" "syscall" "time" "unicode" + + "github.com/alecthomas/units" ) const alphanum string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" @@ -22,13 +29,52 @@ var ( TimeoutErr = errors.New("Command timed out.") NotImplementedError = errors.New("not implemented yet") + + VersionAlreadySetError = errors.New("version has already been set") ) +// Set via the main module +var version string + // Duration just wraps time.Duration type Duration struct { Duration time.Duration } +// Size just wraps an int64 +type Size struct { + Size int64 +} + +type Number struct { + Value float64 +} + +type ReadWaitCloser struct { + pipeReader *io.PipeReader + wg sync.WaitGroup +} + +// SetVersion sets the telegraf agent version +func SetVersion(v string) error { + if version != "" { + return VersionAlreadySetError + } + version = v + return nil +} + +// Version returns the telegraf agent version +func Version() string { + return version +} + +// ProductToken returns a tag for Telegraf that can be used in user agents. +func ProductToken() string { + return fmt.Sprintf("Telegraf/%s Go/%s", + Version(), strings.TrimPrefix(runtime.Version(), "go")) +} + // UnmarshalTOML parses the duration from the TOML config file func (d *Duration) UnmarshalTOML(b []byte) error { var err error @@ -64,6 +110,37 @@ func (d *Duration) UnmarshalTOML(b []byte) error { return nil } +func (s *Size) UnmarshalTOML(b []byte) error { + var err error + b = bytes.Trim(b, `'`) + + val, err := strconv.ParseInt(string(b), 10, 64) + if err == nil { + s.Size = val + return nil + } + uq, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + val, err = units.ParseStrictBytes(uq) + if err != nil { + return err + } + s.Size = val + return nil +} + +func (n *Number) UnmarshalTOML(b []byte) error { + value, err := strconv.ParseFloat(string(b), 64) + if err != nil { + return err + } + + n.Value = value + return nil +} + // ReadLines reads contents from a file and splits them by new lines. // A convenience wrapper to ReadLinesOffsetN(filename, 0, -1). func ReadLines(filename string) ([]string, error) { @@ -126,51 +203,6 @@ func SnakeCase(in string) string { return string(out) } -// CombinedOutputTimeout runs the given command with the given timeout and -// returns the combined output of stdout and stderr. -// If the command times out, it attempts to kill the process. -func CombinedOutputTimeout(c *exec.Cmd, timeout time.Duration) ([]byte, error) { - var b bytes.Buffer - c.Stdout = &b - c.Stderr = &b - if err := c.Start(); err != nil { - return nil, err - } - err := WaitTimeout(c, timeout) - return b.Bytes(), err -} - -// RunTimeout runs the given command with the given timeout. -// If the command times out, it attempts to kill the process. -func RunTimeout(c *exec.Cmd, timeout time.Duration) error { - if err := c.Start(); err != nil { - return err - } - return WaitTimeout(c, timeout) -} - -// WaitTimeout waits for the given command to finish with a timeout. -// It assumes the command has already been started. -// If the command times out, it attempts to kill the process. -func WaitTimeout(c *exec.Cmd, timeout time.Duration) error { - timer := time.NewTimer(timeout) - done := make(chan error) - go func() { done <- c.Wait() }() - select { - case err := <-done: - timer.Stop() - return err - case <-timer.C: - if err := c.Process.Kill(); err != nil { - log.Printf("E! FATAL error killing process: %s", err) - return err - } - // wait for the command to return after killing it - <-done - return TimeoutErr - } -} - // RandomSleep will sleep for a random amount of time up to max. // If the shutdown channel is closed, it will return before it has finished // sleeping. @@ -178,12 +210,8 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) { if max == 0 { return } - maxSleep := big.NewInt(max.Nanoseconds()) - var sleepns int64 - if j, err := rand.Int(rand.Reader, maxSleep); err == nil { - sleepns = j.Int64() - } + sleepns := rand.Int63n(max.Nanoseconds()) t := time.NewTimer(time.Nanosecond * time.Duration(sleepns)) select { @@ -195,6 +223,49 @@ func RandomSleep(max time.Duration, shutdown chan struct{}) { } } +// RandomDuration returns a random duration between 0 and max. +func RandomDuration(max time.Duration) time.Duration { + if max == 0 { + return 0 + } + + sleepns := rand.Int63n(max.Nanoseconds()) + + return time.Duration(sleepns) +} + +// SleepContext sleeps until the context is closed or the duration is reached. +func SleepContext(ctx context.Context, duration time.Duration) error { + if duration == 0 { + return nil + } + + t := time.NewTimer(duration) + select { + case <-t.C: + return nil + case <-ctx.Done(): + t.Stop() + return ctx.Err() + } +} + +// AlignDuration returns the duration until next aligned interval. +// If the current time is aligned a 0 duration is returned. +func AlignDuration(tm time.Time, interval time.Duration) time.Duration { + return AlignTime(tm, interval).Sub(tm) +} + +// AlignTime returns the time of the next aligned interval. +// If the current time is aligned the current time is returned. +func AlignTime(tm time.Time, interval time.Duration) time.Time { + truncated := tm.Truncate(interval) + if truncated == tm { + return tm + } + return truncated.Add(interval) +} + // Exit status takes the error from exec.Command // and returns the exit status and true // if error is not exit status, will return 0 and false @@ -206,3 +277,148 @@ func ExitStatus(err error) (int, bool) { } return 0, false } + +func (r *ReadWaitCloser) Close() error { + err := r.pipeReader.Close() + r.wg.Wait() // wait for the gzip goroutine finish + return err +} + +// CompressWithGzip takes an io.Reader as input and pipes +// it through a gzip.Writer returning an io.Reader containing +// the gzipped data. +// An error is returned if passing data to the gzip.Writer fails +func CompressWithGzip(data io.Reader) (io.ReadCloser, error) { + pipeReader, pipeWriter := io.Pipe() + gzipWriter := gzip.NewWriter(pipeWriter) + + rc := &ReadWaitCloser{ + pipeReader: pipeReader, + } + + rc.wg.Add(1) + var err error + go func() { + _, err = io.Copy(gzipWriter, data) + gzipWriter.Close() + // subsequent reads from the read half of the pipe will + // return no bytes and the error err, or EOF if err is nil. + pipeWriter.CloseWithError(err) + rc.wg.Done() + }() + + return pipeReader, err +} + +// ParseTimestamp parses a Time according to the standard Telegraf options. +// These are generally displayed in the toml similar to: +// json_time_key= "timestamp" +// json_time_format = "2006-01-02T15:04:05Z07:00" +// json_timezone = "America/Los_Angeles" +// +// The format can be one of "unix", "unix_ms", "unix_us", "unix_ns", or a Go +// time layout suitable for time.Parse. +// +// When using the "unix" format, a optional fractional component is allowed. +// Specific unix time precisions cannot have a fractional component. +// +// Unix times may be an int64, float64, or string. When using a Go format +// string the timestamp must be a string. +// +// The location is a location string suitable for time.LoadLocation. Unix +// times do not use the location string, a unix time is always return in the +// UTC location. +func ParseTimestamp(format string, timestamp interface{}, location string) (time.Time, error) { + switch format { + case "unix", "unix_ms", "unix_us", "unix_ns": + return parseUnix(format, timestamp) + default: + if location == "" { + location = "UTC" + } + return parseTime(format, timestamp, location) + } +} + +func parseUnix(format string, timestamp interface{}) (time.Time, error) { + integer, fractional, err := parseComponents(timestamp) + if err != nil { + return time.Unix(0, 0), err + } + + switch strings.ToLower(format) { + case "unix": + return time.Unix(integer, fractional).UTC(), nil + case "unix_ms": + return time.Unix(0, integer*1e6).UTC(), nil + case "unix_us": + return time.Unix(0, integer*1e3).UTC(), nil + case "unix_ns": + return time.Unix(0, integer).UTC(), nil + default: + return time.Unix(0, 0), errors.New("unsupported type") + } +} + +// Returns the integers before and after an optional decimal point. Both '.' +// and ',' are supported for the decimal point. The timestamp can be an int64, +// float64, or string. +// ex: "42.5" -> (42, 5, nil) +func parseComponents(timestamp interface{}) (int64, int64, error) { + switch ts := timestamp.(type) { + case string: + parts := strings.SplitN(ts, ".", 2) + if len(parts) == 2 { + return parseUnixTimeComponents(parts[0], parts[1]) + } + + parts = strings.SplitN(ts, ",", 2) + if len(parts) == 2 { + return parseUnixTimeComponents(parts[0], parts[1]) + } + + integer, err := strconv.ParseInt(ts, 10, 64) + if err != nil { + return 0, 0, err + } + return integer, 0, nil + case int64: + return ts, 0, nil + case float64: + integer, fractional := math.Modf(ts) + return int64(integer), int64(fractional * 1e9), nil + default: + return 0, 0, errors.New("unsupported type") + } +} + +func parseUnixTimeComponents(first, second string) (int64, int64, error) { + integer, err := strconv.ParseInt(first, 10, 64) + if err != nil { + return 0, 0, err + } + + // Convert to nanoseconds, dropping any greater precision. + buf := []byte("000000000") + copy(buf, second) + + fractional, err := strconv.ParseInt(string(buf), 10, 64) + if err != nil { + return 0, 0, err + } + return integer, fractional, nil +} + +// ParseTime parses a string timestamp according to the format string. +func parseTime(format string, timestamp interface{}, location string) (time.Time, error) { + switch ts := timestamp.(type) { + case string: + loc, err := time.LoadLocation(location) + if err != nil { + return time.Unix(0, 0), err + } + return time.ParseInLocation(format, ts, loc) + default: + return time.Unix(0, 0), errors.New("unsupported type") + } +} diff --git a/internal/internal_test.go b/internal/internal_test.go index ee1d24418..cbfbabb22 100644 --- a/internal/internal_test.go +++ b/internal/internal_test.go @@ -1,11 +1,19 @@ package internal import ( + "bytes" + "compress/gzip" + "crypto/rand" + "io" + "io/ioutil" + "log" "os/exec" + "regexp" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type SnakeTest struct { @@ -60,6 +68,30 @@ func TestRunTimeout(t *testing.T) { assert.True(t, elapsed < time.Millisecond*75) } +// Verifies behavior of a command that doesn't get killed. +func TestRunTimeoutFastExit(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test due to random failures.") + } + if echobin == "" { + t.Skip("'echo' binary not available on OS, skipping.") + } + cmd := exec.Command(echobin) + start := time.Now() + err := RunTimeout(cmd, time.Millisecond*20) + buf := &bytes.Buffer{} + log.SetOutput(buf) + elapsed := time.Since(start) + + require.NoError(t, err) + // Verify that command gets killed in 20ms, with some breathing room + assert.True(t, elapsed < time.Millisecond*75) + + // Verify "process already finished" log doesn't occur. + time.Sleep(time.Millisecond * 75) + require.Equal(t, "", buf.String()) +} + func TestCombinedOutputTimeout(t *testing.T) { // TODO: Fix this test t.Skip("Test failing too often, skip for now and revisit later.") @@ -162,3 +194,298 @@ func TestDuration(t *testing.T) { d.UnmarshalTOML([]byte(`1.5`)) assert.Equal(t, time.Second, d.Duration) } + +func TestSize(t *testing.T) { + var s Size + + s.UnmarshalTOML([]byte(`"1B"`)) + assert.Equal(t, int64(1), s.Size) + + s = Size{} + s.UnmarshalTOML([]byte(`1`)) + assert.Equal(t, int64(1), s.Size) + + s = Size{} + s.UnmarshalTOML([]byte(`'1'`)) + assert.Equal(t, int64(1), s.Size) + + s = Size{} + s.UnmarshalTOML([]byte(`"1GB"`)) + assert.Equal(t, int64(1000*1000*1000), s.Size) + + s = Size{} + s.UnmarshalTOML([]byte(`"12GiB"`)) + assert.Equal(t, int64(12*1024*1024*1024), s.Size) +} + +func TestCompressWithGzip(t *testing.T) { + testData := "the quick brown fox jumps over the lazy dog" + inputBuffer := bytes.NewBuffer([]byte(testData)) + + outputBuffer, err := CompressWithGzip(inputBuffer) + assert.NoError(t, err) + + gzipReader, err := gzip.NewReader(outputBuffer) + assert.NoError(t, err) + defer gzipReader.Close() + + output, err := ioutil.ReadAll(gzipReader) + assert.NoError(t, err) + + assert.Equal(t, testData, string(output)) +} + +type mockReader struct { + readN uint64 // record the number of calls to Read +} + +func (r *mockReader) Read(p []byte) (n int, err error) { + r.readN++ + return rand.Read(p) +} + +func TestCompressWithGzipEarlyClose(t *testing.T) { + mr := &mockReader{} + + rc, err := CompressWithGzip(mr) + assert.NoError(t, err) + + n, err := io.CopyN(ioutil.Discard, rc, 10000) + assert.NoError(t, err) + assert.Equal(t, int64(10000), n) + + r1 := mr.readN + err = rc.Close() + assert.NoError(t, err) + + n, err = io.CopyN(ioutil.Discard, rc, 10000) + assert.Error(t, io.EOF, err) + assert.Equal(t, int64(0), n) + + r2 := mr.readN + // no more read to the source after closing + assert.Equal(t, r1, r2) +} + +func TestVersionAlreadySet(t *testing.T) { + err := SetVersion("foo") + assert.Nil(t, err) + + err = SetVersion("bar") + + assert.NotNil(t, err) + assert.IsType(t, VersionAlreadySetError, err) + + assert.Equal(t, "foo", Version()) +} + +func TestAlignDuration(t *testing.T) { + tests := []struct { + name string + now time.Time + interval time.Duration + expected time.Duration + }{ + { + name: "aligned", + now: time.Date(2018, 1, 1, 1, 1, 0, 0, time.UTC), + interval: 10 * time.Second, + expected: 0 * time.Second, + }, + { + name: "standard interval", + now: time.Date(2018, 1, 1, 1, 1, 1, 0, time.UTC), + interval: 10 * time.Second, + expected: 9 * time.Second, + }, + { + name: "odd interval", + now: time.Date(2018, 1, 1, 1, 1, 1, 0, time.UTC), + interval: 3 * time.Second, + expected: 2 * time.Second, + }, + { + name: "sub second interval", + now: time.Date(2018, 1, 1, 1, 1, 0, 5e8, time.UTC), + interval: 1 * time.Second, + expected: 500 * time.Millisecond, + }, + { + name: "non divisible not aligned on minutes", + now: time.Date(2018, 1, 1, 1, 0, 0, 0, time.UTC), + interval: 1*time.Second + 100*time.Millisecond, + expected: 400 * time.Millisecond, + }, + { + name: "long interval", + now: time.Date(2018, 1, 1, 1, 1, 0, 0, time.UTC), + interval: 1 * time.Hour, + expected: 59 * time.Minute, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := AlignDuration(tt.now, tt.interval) + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestAlignTime(t *testing.T) { + rfc3339 := func(value string) time.Time { + t, _ := time.Parse(time.RFC3339, value) + return t + } + + tests := []struct { + name string + now time.Time + interval time.Duration + expected time.Time + }{ + { + name: "aligned", + now: rfc3339("2018-01-01T01:01:00Z"), + interval: 10 * time.Second, + expected: rfc3339("2018-01-01T01:01:00Z"), + }, + { + name: "aligned", + now: rfc3339("2018-01-01T01:01:01Z"), + interval: 10 * time.Second, + expected: rfc3339("2018-01-01T01:01:10Z"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := AlignTime(tt.now, tt.interval) + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestParseTimestamp(t *testing.T) { + rfc3339 := func(value string) time.Time { + tm, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + panic(err) + } + return tm + } + + tests := []struct { + name string + format string + timestamp interface{} + location string + expected time.Time + err bool + }{ + { + name: "parse layout string in utc", + format: "2006-01-02 15:04:05", + timestamp: "2019-02-20 21:50:34", + location: "UTC", + expected: rfc3339("2019-02-20T21:50:34Z"), + }, + { + name: "parse layout string with invalid timezone", + format: "2006-01-02 15:04:05", + timestamp: "2019-02-20 21:50:34", + location: "InvalidTimeZone", + err: true, + }, + { + name: "layout regression 6386", + format: "02.01.2006 15:04:05", + timestamp: "09.07.2019 00:11:00", + expected: rfc3339("2019-07-09T00:11:00Z"), + }, + { + name: "default location is utc", + format: "2006-01-02 15:04:05", + timestamp: "2019-02-20 21:50:34", + expected: rfc3339("2019-02-20T21:50:34Z"), + }, + { + name: "unix seconds without fractional", + format: "unix", + timestamp: "1568338208", + expected: rfc3339("2019-09-13T01:30:08Z"), + }, + { + name: "unix seconds with fractional", + format: "unix", + timestamp: "1568338208.500", + expected: rfc3339("2019-09-13T01:30:08.500Z"), + }, + { + name: "unix seconds with fractional and comma decimal point", + format: "unix", + timestamp: "1568338208,500", + expected: rfc3339("2019-09-13T01:30:08.500Z"), + }, + { + name: "unix seconds extra precision", + format: "unix", + timestamp: "1568338208.00000050042", + expected: rfc3339("2019-09-13T01:30:08.000000500Z"), + }, + { + name: "unix seconds integer", + format: "unix", + timestamp: int64(1568338208), + expected: rfc3339("2019-09-13T01:30:08Z"), + }, + { + name: "unix seconds float", + format: "unix", + timestamp: float64(1568338208.500), + expected: rfc3339("2019-09-13T01:30:08.500Z"), + }, + { + name: "unix milliseconds", + format: "unix_ms", + timestamp: "1568338208500", + expected: rfc3339("2019-09-13T01:30:08.500Z"), + }, + { + name: "unix milliseconds with fractional is ignored", + format: "unix_ms", + timestamp: "1568338208500.42", + expected: rfc3339("2019-09-13T01:30:08.500Z"), + }, + { + name: "unix microseconds", + format: "unix_us", + timestamp: "1568338208000500", + expected: rfc3339("2019-09-13T01:30:08.000500Z"), + }, + { + name: "unix nanoseconds", + format: "unix_ns", + timestamp: "1568338208000000500", + expected: rfc3339("2019-09-13T01:30:08.000000500Z"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tm, err := ParseTimestamp(tt.format, tt.timestamp, tt.location) + if tt.err { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tt.expected, tm) + } + }) + } +} + +func TestProductToken(t *testing.T) { + token := ProductToken() + // Telegraf version depends on the call to SetVersion, it cannot be set + // multiple times and is not thread-safe. + re := regexp.MustCompile(`^Telegraf/[^\s]+ Go/\d+.\d+(.\d+)?$`) + require.True(t, re.MatchString(token), token) +} diff --git a/internal/models/makemetric.go b/internal/models/makemetric.go deleted file mode 100644 index b74e236cd..000000000 --- a/internal/models/makemetric.go +++ /dev/null @@ -1,86 +0,0 @@ -package models - -import ( - "log" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" -) - -// makemetric is used by both RunningAggregator & RunningInput -// to make metrics. -// nameOverride: override the name of the measurement being made. -// namePrefix: add this prefix to each measurement name. -// nameSuffix: add this suffix to each measurement name. -// pluginTags: these are tags that are specific to this plugin. -// daemonTags: these are daemon-wide global tags, and get applied after pluginTags. -// filter: this is a filter to apply to each metric being made. -// applyFilter: if false, the above filter is not applied to each metric. -// This is used by Aggregators, because aggregators use filters -// on incoming metrics instead of on created metrics. -// TODO refactor this to not have such a huge func signature. -func makemetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, - nameOverride string, - namePrefix string, - nameSuffix string, - pluginTags map[string]string, - daemonTags map[string]string, - filter Filter, - applyFilter bool, - mType telegraf.ValueType, - t time.Time, -) telegraf.Metric { - if len(fields) == 0 || len(measurement) == 0 { - return nil - } - if tags == nil { - tags = make(map[string]string) - } - - // Override measurement name if set - if len(nameOverride) != 0 { - measurement = nameOverride - } - // Apply measurement prefix and suffix if set - if len(namePrefix) != 0 { - measurement = namePrefix + measurement - } - if len(nameSuffix) != 0 { - measurement = measurement + nameSuffix - } - - // Apply plugin-wide tags if set - for k, v := range pluginTags { - if _, ok := tags[k]; !ok { - tags[k] = v - } - } - // Apply daemon-wide tags if set - for k, v := range daemonTags { - if _, ok := tags[k]; !ok { - tags[k] = v - } - } - - // Apply the metric filter(s) - // for aggregators, the filter does not get applied when the metric is made. - // instead, the filter is applied to metric incoming into the plugin. - // ie, it gets applied in the RunningAggregator.Apply function. - if applyFilter { - if ok := filter.Apply(measurement, fields, tags); !ok { - return nil - } - } - - m, err := metric.New(measurement, tags, fields, t, mType) - if err != nil { - log.Printf("Error adding point [%s]: %s\n", measurement, err.Error()) - return nil - } - - return m -} diff --git a/internal/models/running_aggregator.go b/internal/models/running_aggregator.go deleted file mode 100644 index 8cb04e4f6..000000000 --- a/internal/models/running_aggregator.go +++ /dev/null @@ -1,168 +0,0 @@ -package models - -import ( - "log" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" -) - -type RunningAggregator struct { - a telegraf.Aggregator - Config *AggregatorConfig - - metrics chan telegraf.Metric - - periodStart time.Time - periodEnd time.Time -} - -func NewRunningAggregator( - a telegraf.Aggregator, - conf *AggregatorConfig, -) *RunningAggregator { - return &RunningAggregator{ - a: a, - Config: conf, - metrics: make(chan telegraf.Metric, 100), - } -} - -// AggregatorConfig containing configuration parameters for the running -// aggregator plugin. -type AggregatorConfig struct { - Name string - - DropOriginal bool - NameOverride string - MeasurementPrefix string - MeasurementSuffix string - Tags map[string]string - Filter Filter - - Period time.Duration - Delay time.Duration -} - -func (r *RunningAggregator) Name() string { - return "aggregators." + r.Config.Name -} - -func (r *RunningAggregator) MakeMetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, - mType telegraf.ValueType, - t time.Time, -) telegraf.Metric { - m := makemetric( - measurement, - fields, - tags, - r.Config.NameOverride, - r.Config.MeasurementPrefix, - r.Config.MeasurementSuffix, - r.Config.Tags, - nil, - r.Config.Filter, - false, - mType, - t, - ) - - if m != nil { - m.SetAggregate(true) - } - - return m -} - -// Add applies the given metric to the aggregator. -// Before applying to the plugin, it will run any defined filters on the metric. -// Apply returns true if the original metric should be dropped. -func (r *RunningAggregator) Add(in telegraf.Metric) bool { - if r.Config.Filter.IsActive() { - // check if the aggregator should apply this metric - name := in.Name() - fields := in.Fields() - tags := in.Tags() - t := in.Time() - if ok := r.Config.Filter.Apply(name, fields, tags); !ok { - // aggregator should not apply this metric - return false - } - - in, _ = metric.New(name, tags, fields, t) - } - - r.metrics <- in - return r.Config.DropOriginal -} -func (r *RunningAggregator) add(in telegraf.Metric) { - r.a.Add(in) -} - -func (r *RunningAggregator) push(acc telegraf.Accumulator) { - r.a.Push(acc) -} - -func (r *RunningAggregator) reset() { - r.a.Reset() -} - -// Run runs the running aggregator, listens for incoming metrics, and waits -// for period ticks to tell it when to push and reset the aggregator. -func (r *RunningAggregator) Run( - acc telegraf.Accumulator, - shutdown chan struct{}, -) { - // The start of the period is truncated to the nearest second. - // - // Every metric then gets it's timestamp checked and is dropped if it - // is not within: - // - // start < t < end + truncation + delay - // - // So if we start at now = 00:00.2 with a 10s period and 0.3s delay: - // now = 00:00.2 - // start = 00:00 - // truncation = 00:00.2 - // end = 00:10 - // 1st interval: 00:00 - 00:10.5 - // 2nd interval: 00:10 - 00:20.5 - // etc. - // - now := time.Now() - r.periodStart = now.Truncate(time.Second) - truncation := now.Sub(r.periodStart) - r.periodEnd = r.periodStart.Add(r.Config.Period) - time.Sleep(r.Config.Delay) - periodT := time.NewTicker(r.Config.Period) - defer periodT.Stop() - - for { - select { - case <-shutdown: - if len(r.metrics) > 0 { - // wait until metrics are flushed before exiting - continue - } - return - case m := <-r.metrics: - if m.Time().Before(r.periodStart) || - m.Time().After(r.periodEnd.Add(truncation).Add(r.Config.Delay)) { - // the metric is outside the current aggregation period, so - // skip it. - log.Printf("D! aggregator: metric \"%s\" is not in the current timewindow, skipping", m.Name()) - continue - } - r.add(m) - case <-periodT.C: - r.periodStart = r.periodEnd - r.periodEnd = r.periodStart.Add(r.Config.Period) - r.push(acc) - r.reset() - } - } -} diff --git a/internal/models/running_aggregator_test.go b/internal/models/running_aggregator_test.go deleted file mode 100644 index cf92fe675..000000000 --- a/internal/models/running_aggregator_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package models - -import ( - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" -) - -func TestAdd(t *testing.T) { - a := &TestAggregator{} - ra := NewRunningAggregator(a, &AggregatorConfig{ - Name: "TestRunningAggregator", - Filter: Filter{ - NamePass: []string{"*"}, - }, - Period: time.Millisecond * 500, - }) - assert.NoError(t, ra.Config.Filter.Compile()) - acc := testutil.Accumulator{} - go ra.Run(&acc, make(chan struct{})) - - m := ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, - map[string]string{}, - telegraf.Untyped, - time.Now().Add(time.Millisecond*150), - ) - assert.False(t, ra.Add(m)) - - for { - time.Sleep(time.Millisecond) - if atomic.LoadInt64(&a.sum) > 0 { - break - } - } - assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum)) -} - -func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { - a := &TestAggregator{} - ra := NewRunningAggregator(a, &AggregatorConfig{ - Name: "TestRunningAggregator", - Filter: Filter{ - NamePass: []string{"*"}, - }, - Period: time.Millisecond * 500, - }) - assert.NoError(t, ra.Config.Filter.Compile()) - acc := testutil.Accumulator{} - go ra.Run(&acc, make(chan struct{})) - - // metric before current period - m := ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, - map[string]string{}, - telegraf.Untyped, - time.Now().Add(-time.Hour), - ) - assert.False(t, ra.Add(m)) - - // metric after current period - m = ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, - map[string]string{}, - telegraf.Untyped, - time.Now().Add(time.Hour), - ) - assert.False(t, ra.Add(m)) - - // "now" metric - m = ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, - map[string]string{}, - telegraf.Untyped, - time.Now().Add(time.Millisecond*50), - ) - assert.False(t, ra.Add(m)) - - for { - time.Sleep(time.Millisecond) - if atomic.LoadInt64(&a.sum) > 0 { - break - } - } - assert.Equal(t, int64(101), atomic.LoadInt64(&a.sum)) -} - -func TestAddAndPushOnePeriod(t *testing.T) { - a := &TestAggregator{} - ra := NewRunningAggregator(a, &AggregatorConfig{ - Name: "TestRunningAggregator", - Filter: Filter{ - NamePass: []string{"*"}, - }, - Period: time.Millisecond * 500, - }) - assert.NoError(t, ra.Config.Filter.Compile()) - acc := testutil.Accumulator{} - shutdown := make(chan struct{}) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - ra.Run(&acc, shutdown) - }() - - m := ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, - map[string]string{}, - telegraf.Untyped, - time.Now().Add(time.Millisecond*100), - ) - assert.False(t, ra.Add(m)) - - for { - time.Sleep(time.Millisecond) - if acc.NMetrics() > 0 { - break - } - } - acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)}) - - close(shutdown) - wg.Wait() -} - -func TestAddDropOriginal(t *testing.T) { - ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{ - Name: "TestRunningAggregator", - Filter: Filter{ - NamePass: []string{"RI*"}, - }, - DropOriginal: true, - }) - assert.NoError(t, ra.Config.Filter.Compile()) - - m := ra.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, - map[string]string{}, - telegraf.Untyped, - time.Now(), - ) - assert.True(t, ra.Add(m)) - - // this metric name doesn't match the filter, so Add will return false - m2 := ra.MakeMetric( - "foobar", - map[string]interface{}{"value": int(101)}, - map[string]string{}, - telegraf.Untyped, - time.Now(), - ) - assert.False(t, ra.Add(m2)) -} - -type TestAggregator struct { - sum int64 -} - -func (t *TestAggregator) Description() string { return "" } -func (t *TestAggregator) SampleConfig() string { return "" } -func (t *TestAggregator) Reset() { - atomic.StoreInt64(&t.sum, 0) -} - -func (t *TestAggregator) Push(acc telegraf.Accumulator) { - acc.AddFields("TestMetric", - map[string]interface{}{"sum": t.sum}, - map[string]string{}, - ) -} - -func (t *TestAggregator) Add(in telegraf.Metric) { - for _, v := range in.Fields() { - if vi, ok := v.(int64); ok { - atomic.AddInt64(&t.sum, vi) - } - } -} diff --git a/internal/models/running_input.go b/internal/models/running_input.go deleted file mode 100644 index ffe0b5f59..000000000 --- a/internal/models/running_input.go +++ /dev/null @@ -1,102 +0,0 @@ -package models - -import ( - "fmt" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/serializers/influx" - "github.com/influxdata/telegraf/selfstat" -) - -var GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{}) - -type RunningInput struct { - Input telegraf.Input - Config *InputConfig - - trace bool - defaultTags map[string]string - - MetricsGathered selfstat.Stat -} - -func NewRunningInput( - input telegraf.Input, - config *InputConfig, -) *RunningInput { - return &RunningInput{ - Input: input, - Config: config, - MetricsGathered: selfstat.Register( - "gather", - "metrics_gathered", - map[string]string{"input": config.Name}, - ), - } -} - -// InputConfig containing a name, interval, and filter -type InputConfig struct { - Name string - NameOverride string - MeasurementPrefix string - MeasurementSuffix string - Tags map[string]string - Filter Filter - Interval time.Duration -} - -func (r *RunningInput) Name() string { - return "inputs." + r.Config.Name -} - -// MakeMetric either returns a metric, or returns nil if the metric doesn't -// need to be created (because of filtering, an error, etc.) -func (r *RunningInput) MakeMetric( - measurement string, - fields map[string]interface{}, - tags map[string]string, - mType telegraf.ValueType, - t time.Time, -) telegraf.Metric { - m := makemetric( - measurement, - fields, - tags, - r.Config.NameOverride, - r.Config.MeasurementPrefix, - r.Config.MeasurementSuffix, - r.Config.Tags, - r.defaultTags, - r.Config.Filter, - true, - mType, - t, - ) - - if r.trace && m != nil { - s := influx.NewSerializer() - s.SetFieldSortOrder(influx.SortFields) - octets, err := s.Serialize(m) - if err == nil { - fmt.Print("> " + string(octets)) - } - } - - r.MetricsGathered.Incr(1) - GlobalMetricsGathered.Incr(1) - return m -} - -func (r *RunningInput) Trace() bool { - return r.trace -} - -func (r *RunningInput) SetTrace(trace bool) { - r.trace = trace -} - -func (r *RunningInput) SetDefaultTags(tags map[string]string) { - r.defaultTags = tags -} diff --git a/internal/models/running_output.go b/internal/models/running_output.go deleted file mode 100644 index 713c28cce..000000000 --- a/internal/models/running_output.go +++ /dev/null @@ -1,194 +0,0 @@ -package models - -import ( - "log" - "sync" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal/buffer" - "github.com/influxdata/telegraf/metric" - "github.com/influxdata/telegraf/selfstat" -) - -const ( - // Default size of metrics batch size. - DEFAULT_METRIC_BATCH_SIZE = 1000 - - // Default number of metrics kept. It should be a multiple of batch size. - DEFAULT_METRIC_BUFFER_LIMIT = 10000 -) - -// RunningOutput contains the output configuration -type RunningOutput struct { - Name string - Output telegraf.Output - Config *OutputConfig - MetricBufferLimit int - MetricBatchSize int - - MetricsFiltered selfstat.Stat - MetricsWritten selfstat.Stat - BufferSize selfstat.Stat - BufferLimit selfstat.Stat - WriteTime selfstat.Stat - - metrics *buffer.Buffer - failMetrics *buffer.Buffer - - // Guards against concurrent calls to the Output as described in #3009 - sync.Mutex -} - -func NewRunningOutput( - name string, - output telegraf.Output, - conf *OutputConfig, - batchSize int, - bufferLimit int, -) *RunningOutput { - if bufferLimit == 0 { - bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT - } - if batchSize == 0 { - batchSize = DEFAULT_METRIC_BATCH_SIZE - } - ro := &RunningOutput{ - Name: name, - metrics: buffer.NewBuffer(batchSize), - failMetrics: buffer.NewBuffer(bufferLimit), - Output: output, - Config: conf, - MetricBufferLimit: bufferLimit, - MetricBatchSize: batchSize, - MetricsWritten: selfstat.Register( - "write", - "metrics_written", - map[string]string{"output": name}, - ), - MetricsFiltered: selfstat.Register( - "write", - "metrics_filtered", - map[string]string{"output": name}, - ), - BufferSize: selfstat.Register( - "write", - "buffer_size", - map[string]string{"output": name}, - ), - BufferLimit: selfstat.Register( - "write", - "buffer_limit", - map[string]string{"output": name}, - ), - WriteTime: selfstat.RegisterTiming( - "write", - "write_time_ns", - map[string]string{"output": name}, - ), - } - ro.BufferLimit.Set(int64(ro.MetricBufferLimit)) - return ro -} - -// AddMetric adds a metric to the output. This function can also write cached -// points if FlushBufferWhenFull is true. -func (ro *RunningOutput) AddMetric(m telegraf.Metric) { - if m == nil { - return - } - // Filter any tagexclude/taginclude parameters before adding metric - if ro.Config.Filter.IsActive() { - // In order to filter out tags, we need to create a new metric, since - // metrics are immutable once created. - name := m.Name() - tags := m.Tags() - fields := m.Fields() - t := m.Time() - if ok := ro.Config.Filter.Apply(name, fields, tags); !ok { - ro.MetricsFiltered.Incr(1) - return - } - // error is not possible if creating from another metric, so ignore. - m, _ = metric.New(name, tags, fields, t) - } - - ro.metrics.Add(m) - if ro.metrics.Len() == ro.MetricBatchSize { - batch := ro.metrics.Batch(ro.MetricBatchSize) - err := ro.write(batch) - if err != nil { - ro.failMetrics.Add(batch...) - } - } -} - -// Write writes all cached points to this output. -func (ro *RunningOutput) Write() error { - nFails, nMetrics := ro.failMetrics.Len(), ro.metrics.Len() - ro.BufferSize.Set(int64(nFails + nMetrics)) - log.Printf("D! Output [%s] buffer fullness: %d / %d metrics. ", - ro.Name, nFails+nMetrics, ro.MetricBufferLimit) - var err error - if !ro.failMetrics.IsEmpty() { - // how many batches of failed writes we need to write. - nBatches := nFails/ro.MetricBatchSize + 1 - batchSize := ro.MetricBatchSize - - for i := 0; i < nBatches; i++ { - // If it's the last batch, only grab the metrics that have not had - // a write attempt already (this is primarily to preserve order). - if i == nBatches-1 { - batchSize = nFails % ro.MetricBatchSize - } - batch := ro.failMetrics.Batch(batchSize) - // If we've already failed previous writes, don't bother trying to - // write to this output again. We are not exiting the loop just so - // that we can rotate the metrics to preserve order. - if err == nil { - err = ro.write(batch) - } - if err != nil { - ro.failMetrics.Add(batch...) - } - } - } - - batch := ro.metrics.Batch(ro.MetricBatchSize) - // see comment above about not trying to write to an already failed output. - // if ro.failMetrics is empty then err will always be nil at this point. - if err == nil { - err = ro.write(batch) - } - - if err != nil { - ro.failMetrics.Add(batch...) - return err - } - return nil -} - -func (ro *RunningOutput) write(metrics []telegraf.Metric) error { - nMetrics := len(metrics) - if nMetrics == 0 { - return nil - } - ro.Lock() - defer ro.Unlock() - start := time.Now() - err := ro.Output.Write(metrics) - elapsed := time.Since(start) - if err == nil { - log.Printf("D! Output [%s] wrote batch of %d metrics in %s\n", - ro.Name, nMetrics, elapsed) - ro.MetricsWritten.Incr(int64(nMetrics)) - ro.WriteTime.Incr(elapsed.Nanoseconds()) - } - return err -} - -// OutputConfig containing name and filter -type OutputConfig struct { - Name string - Filter Filter -} diff --git a/internal/models/running_processor.go b/internal/models/running_processor.go deleted file mode 100644 index 92d3d44d0..000000000 --- a/internal/models/running_processor.go +++ /dev/null @@ -1,51 +0,0 @@ -package models - -import ( - "sync" - - "github.com/influxdata/telegraf" -) - -type RunningProcessor struct { - Name string - - sync.Mutex - Processor telegraf.Processor - Config *ProcessorConfig -} - -type RunningProcessors []*RunningProcessor - -func (rp RunningProcessors) Len() int { return len(rp) } -func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] } -func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order } - -// FilterConfig containing a name and filter -type ProcessorConfig struct { - Name string - Order int64 - Filter Filter -} - -func (rp *RunningProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { - rp.Lock() - defer rp.Unlock() - - ret := []telegraf.Metric{} - - for _, metric := range in { - if rp.Config.Filter.IsActive() { - // check if the filter should be applied to this metric - if ok := rp.Config.Filter.Apply(metric.Name(), metric.Fields(), metric.Tags()); !ok { - // this means filter should not be applied - ret = append(ret, metric) - continue - } - } - // This metric should pass through the filter, so call the filter Apply - // function and append results to the output slice. - ret = append(ret, rp.Processor.Apply(metric)...) - } - - return ret -} diff --git a/internal/models/running_processor_test.go b/internal/models/running_processor_test.go deleted file mode 100644 index 8a691a9b8..000000000 --- a/internal/models/running_processor_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package models - -import ( - "testing" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" -) - -type TestProcessor struct { -} - -func (f *TestProcessor) SampleConfig() string { return "" } -func (f *TestProcessor) Description() string { return "" } - -// Apply renames: -// "foo" to "fuz" -// "bar" to "baz" -// And it also drops measurements named "dropme" -func (f *TestProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { - out := make([]telegraf.Metric, 0) - for _, m := range in { - switch m.Name() { - case "foo": - out = append(out, testutil.TestMetric(1, "fuz")) - case "bar": - out = append(out, testutil.TestMetric(1, "baz")) - case "dropme": - // drop the metric! - default: - out = append(out, m) - } - } - return out -} - -func NewTestRunningProcessor() *RunningProcessor { - out := &RunningProcessor{ - Name: "test", - Processor: &TestProcessor{}, - Config: &ProcessorConfig{Filter: Filter{}}, - } - return out -} - -func TestRunningProcessor(t *testing.T) { - inmetrics := []telegraf.Metric{ - testutil.TestMetric(1, "foo"), - testutil.TestMetric(1, "bar"), - testutil.TestMetric(1, "baz"), - } - - expectedNames := []string{ - "fuz", - "baz", - "baz", - } - rfp := NewTestRunningProcessor() - filteredMetrics := rfp.Apply(inmetrics...) - - actualNames := []string{ - filteredMetrics[0].Name(), - filteredMetrics[1].Name(), - filteredMetrics[2].Name(), - } - assert.Equal(t, expectedNames, actualNames) -} - -func TestRunningProcessor_WithNameDrop(t *testing.T) { - inmetrics := []telegraf.Metric{ - testutil.TestMetric(1, "foo"), - testutil.TestMetric(1, "bar"), - testutil.TestMetric(1, "baz"), - } - - expectedNames := []string{ - "foo", - "baz", - "baz", - } - rfp := NewTestRunningProcessor() - - rfp.Config.Filter.NameDrop = []string{"foo"} - assert.NoError(t, rfp.Config.Filter.Compile()) - - filteredMetrics := rfp.Apply(inmetrics...) - - actualNames := []string{ - filteredMetrics[0].Name(), - filteredMetrics[1].Name(), - filteredMetrics[2].Name(), - } - assert.Equal(t, expectedNames, actualNames) -} - -func TestRunningProcessor_DroppedMetric(t *testing.T) { - inmetrics := []telegraf.Metric{ - testutil.TestMetric(1, "dropme"), - testutil.TestMetric(1, "foo"), - testutil.TestMetric(1, "bar"), - } - - expectedNames := []string{ - "fuz", - "baz", - } - rfp := NewTestRunningProcessor() - filteredMetrics := rfp.Apply(inmetrics...) - - actualNames := []string{ - filteredMetrics[0].Name(), - filteredMetrics[1].Name(), - } - assert.Equal(t, expectedNames, actualNames) -} diff --git a/internal/rotate/file_writer.go b/internal/rotate/file_writer.go new file mode 100644 index 000000000..a167b7cb7 --- /dev/null +++ b/internal/rotate/file_writer.go @@ -0,0 +1,185 @@ +package rotate + +// Rotating things +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +// FilePerm defines the permissions that Writer will use for all +// the files it creates. +const ( + FilePerm = os.FileMode(0644) + DateFormat = "2006-01-02" +) + +// FileWriter implements the io.Writer interface and writes to the +// filename specified. +// Will rotate at the specified interval and/or when the current file size exceeds maxSizeInBytes +// At rotation time, current file is renamed and a new file is created. +// If the number of archives exceeds maxArchives, older files are deleted. +type FileWriter struct { + filename string + filenameRotationTemplate string + current *os.File + interval time.Duration + maxSizeInBytes int64 + maxArchives int + expireTime time.Time + bytesWritten int64 + sync.Mutex +} + +// NewFileWriter creates a new file writer. +func NewFileWriter(filename string, interval time.Duration, maxSizeInBytes int64, maxArchives int) (io.WriteCloser, error) { + if interval == 0 && maxSizeInBytes <= 0 { + // No rotation needed so a basic io.Writer will do the trick + return openFile(filename) + } + + w := &FileWriter{ + filename: filename, + interval: interval, + maxSizeInBytes: maxSizeInBytes, + maxArchives: maxArchives, + filenameRotationTemplate: getFilenameRotationTemplate(filename), + } + + if err := w.openCurrent(); err != nil { + return nil, err + } + + return w, nil +} + +func openFile(filename string) (*os.File, error) { + return os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, FilePerm) +} + +func getFilenameRotationTemplate(filename string) string { + // Extract the file extension + fileExt := filepath.Ext(filename) + // Remove the file extension from the filename (if any) + stem := strings.TrimSuffix(filename, fileExt) + return stem + ".%s-%s" + fileExt +} + +// Write writes p to the current file, then checks to see if +// rotation is necessary. +func (w *FileWriter) Write(p []byte) (n int, err error) { + w.Lock() + defer w.Unlock() + if n, err = w.current.Write(p); err != nil { + return 0, err + } + w.bytesWritten += int64(n) + + if err = w.rotateIfNeeded(); err != nil { + return 0, err + } + + return n, nil +} + +// Close closes the current file. Writer is unusable after this +// is called. +func (w *FileWriter) Close() (err error) { + w.Lock() + defer w.Unlock() + + // Rotate before closing + if err = w.rotate(); err != nil { + return err + } + + w.current = nil + return nil +} + +func (w *FileWriter) openCurrent() (err error) { + // In case ModTime() fails, we use time.Now() + w.expireTime = time.Now().Add(w.interval) + w.bytesWritten = 0 + w.current, err = openFile(w.filename) + + if err != nil { + return err + } + + // Goal here is to rotate old pre-existing files. + // For that we use fileInfo.ModTime, instead of time.Now(). + // Example: telegraf is restarted every 23 hours and + // the rotation interval is set to 24 hours. + // With time.now() as a reference we'd never rotate the file. + if fileInfo, err := w.current.Stat(); err == nil { + w.expireTime = fileInfo.ModTime().Add(w.interval) + w.bytesWritten = fileInfo.Size() + } + + if err = w.rotateIfNeeded(); err != nil { + return err + } + return nil +} + +func (w *FileWriter) rotateIfNeeded() error { + if (w.interval > 0 && time.Now().After(w.expireTime)) || + (w.maxSizeInBytes > 0 && w.bytesWritten >= w.maxSizeInBytes) { + if err := w.rotate(); err != nil { + //Ignore rotation errors and keep the log open + fmt.Printf("unable to rotate the file '%s', %s", w.filename, err.Error()) + } + return w.openCurrent() + } + return nil +} + +func (w *FileWriter) rotate() (err error) { + if err = w.current.Close(); err != nil { + return err + } + + // Use year-month-date for readability, unix time to make the file name unique with second precision + now := time.Now() + rotatedFilename := fmt.Sprintf(w.filenameRotationTemplate, now.Format(DateFormat), strconv.FormatInt(now.Unix(), 10)) + if err = os.Rename(w.filename, rotatedFilename); err != nil { + return err + } + + if err = w.purgeArchivesIfNeeded(); err != nil { + return err + } + + return nil +} + +func (w *FileWriter) purgeArchivesIfNeeded() (err error) { + if w.maxArchives == -1 { + //Skip archiving + return nil + } + + var matches []string + if matches, err = filepath.Glob(fmt.Sprintf(w.filenameRotationTemplate, "*", "*")); err != nil { + return err + } + + //if there are more archives than the configured maximum, then purge older files + if len(matches) > w.maxArchives { + //sort files alphanumerically to delete older files first + sort.Strings(matches) + for _, filename := range matches[:len(matches)-w.maxArchives] { + if err = os.Remove(filename); err != nil { + return err + } + } + } + return nil +} diff --git a/internal/rotate/file_writer_test.go b/internal/rotate/file_writer_test.go new file mode 100644 index 000000000..ca29b9a2f --- /dev/null +++ b/internal/rotate/file_writer_test.go @@ -0,0 +1,148 @@ +package rotate + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFileWriter_NoRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationNo") + require.NoError(t, err) + writer, err := NewFileWriter(filepath.Join(tempDir, "test"), 0, 0, 0) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + _, err = writer.Write([]byte("Hello World")) + require.NoError(t, err) + _, err = writer.Write([]byte("Hello World 2")) + require.NoError(t, err) + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 1, len(files)) +} + +func TestFileWriter_TimeRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationTime") + require.NoError(t, err) + interval, _ := time.ParseDuration("1s") + writer, err := NewFileWriter(filepath.Join(tempDir, "test"), interval, 0, -1) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + _, err = writer.Write([]byte("Hello World")) + require.NoError(t, err) + time.Sleep(1 * time.Second) + _, err = writer.Write([]byte("Hello World 2")) + require.NoError(t, err) + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 2, len(files)) +} + +func TestFileWriter_ReopenTimeRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationTime") + require.NoError(t, err) + interval, _ := time.ParseDuration("1s") + filePath := filepath.Join(tempDir, "test.log") + err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + time.Sleep(1 * time.Second) + assert.NoError(t, err) + writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), interval, 0, -1) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 2, len(files)) +} + +func TestFileWriter_SizeRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationSize") + require.NoError(t, err) + maxSize := int64(9) + writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + _, err = writer.Write([]byte("Hello World")) + require.NoError(t, err) + _, err = writer.Write([]byte("World 2")) + require.NoError(t, err) + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 2, len(files)) +} + +func TestFileWriter_ReopenSizeRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationSize") + require.NoError(t, err) + maxSize := int64(12) + filePath := filepath.Join(tempDir, "test.log") + err = ioutil.WriteFile(filePath, []byte("Hello World"), 0644) + assert.NoError(t, err) + writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + _, err = writer.Write([]byte("Hello World Again")) + require.NoError(t, err) + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 2, len(files)) +} + +func TestFileWriter_DeleteArchives(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationDeleteArchives") + require.NoError(t, err) + maxSize := int64(5) + writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, 2) + require.NoError(t, err) + defer func() { writer.Close(); os.RemoveAll(tempDir) }() + + _, err = writer.Write([]byte("First file")) + require.NoError(t, err) + // File names include the date with second precision + // So, to force rotation with different file names + // we need to wait + time.Sleep(1 * time.Second) + _, err = writer.Write([]byte("Second file")) + require.NoError(t, err) + time.Sleep(1 * time.Second) + _, err = writer.Write([]byte("Third file")) + require.NoError(t, err) + + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 3, len(files)) + + for _, tempFile := range files { + var bytes []byte + var err error + path := filepath.Join(tempDir, tempFile.Name()) + if bytes, err = ioutil.ReadFile(path); err != nil { + t.Error(err.Error()) + return + } + contents := string(bytes) + + if contents != "" && contents != "Second file" && contents != "Third file" { + t.Error("Should have deleted the eldest log file") + return + } + } +} + +func TestFileWriter_CloseRotates(t *testing.T) { + tempDir, err := ioutil.TempDir("", "RotationClose") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + maxSize := int64(9) + writer, err := NewFileWriter(filepath.Join(tempDir, "test.log"), 0, maxSize, -1) + require.NoError(t, err) + + writer.Close() + + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 1, len(files)) + assert.Regexp(t, "^test\\.[^\\.]+\\.log$", files[0].Name()) +} diff --git a/internal/syslog/framing.go b/internal/syslog/framing.go new file mode 100644 index 000000000..6edfc7058 --- /dev/null +++ b/internal/syslog/framing.go @@ -0,0 +1,64 @@ +package syslog + +import ( + "fmt" + "strings" +) + +// Framing represents the framing technique we expect the messages to come. +type Framing int + +const ( + // OctetCounting indicates the transparent framing technique for syslog transport. + OctetCounting Framing = iota + // NonTransparent indicates the non-transparent framing technique for syslog transport. + NonTransparent +) + +func (f Framing) String() string { + switch f { + case OctetCounting: + return "OCTET-COUNTING" + case NonTransparent: + return "NON-TRANSPARENT" + } + return "" +} + +// UnmarshalTOML implements ability to unmarshal framing from TOML files. +func (f *Framing) UnmarshalTOML(data []byte) (err error) { + return f.UnmarshalText(data) +} + +// UnmarshalText implements encoding.TextUnmarshaler +func (f *Framing) UnmarshalText(data []byte) (err error) { + s := string(data) + switch strings.ToUpper(s) { + case `OCTET-COUNTING`: + fallthrough + case `"OCTET-COUNTING"`: + fallthrough + case `'OCTET-COUNTING'`: + *f = OctetCounting + return + + case `NON-TRANSPARENT`: + fallthrough + case `"NON-TRANSPARENT"`: + fallthrough + case `'NON-TRANSPARENT'`: + *f = NonTransparent + return + } + *f = -1 + return fmt.Errorf("unknown framing") +} + +// MarshalText implements encoding.TextMarshaler +func (f Framing) MarshalText() ([]byte, error) { + s := f.String() + if s != "" { + return []byte(s), nil + } + return nil, fmt.Errorf("unknown framing") +} diff --git a/internal/syslog/framing_test.go b/internal/syslog/framing_test.go new file mode 100644 index 000000000..1442eba7f --- /dev/null +++ b/internal/syslog/framing_test.go @@ -0,0 +1,37 @@ +package syslog + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestFraming(t *testing.T) { + var f1 Framing + f1.UnmarshalTOML([]byte(`"non-transparent"`)) + assert.Equal(t, NonTransparent, f1) + + var f2 Framing + f2.UnmarshalTOML([]byte(`non-transparent`)) + assert.Equal(t, NonTransparent, f2) + + var f3 Framing + f3.UnmarshalTOML([]byte(`'non-transparent'`)) + assert.Equal(t, NonTransparent, f3) + + var f4 Framing + f4.UnmarshalTOML([]byte(`"octet-counting"`)) + assert.Equal(t, OctetCounting, f4) + + var f5 Framing + f5.UnmarshalTOML([]byte(`octet-counting`)) + assert.Equal(t, OctetCounting, f5) + + var f6 Framing + f6.UnmarshalTOML([]byte(`'octet-counting'`)) + assert.Equal(t, OctetCounting, f6) + + var f7 Framing + err := f7.UnmarshalTOML([]byte(`nope`)) + assert.Equal(t, Framing(-1), f7) + assert.Error(t, err) +} diff --git a/internal/templating/engine_test.go b/internal/templating/engine_test.go new file mode 100644 index 000000000..0dfcb89d8 --- /dev/null +++ b/internal/templating/engine_test.go @@ -0,0 +1,77 @@ +package templating + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEngineAlternateSeparator(t *testing.T) { + defaultTemplate, _ := NewDefaultTemplateWithPattern("topic*") + engine, err := NewEngine("_", defaultTemplate, []string{ + "/ /*/*/* /measurement/origin/measurement*", + }) + require.NoError(t, err) + name, tags, field, err := engine.Apply("/telegraf/host01/cpu") + require.NoError(t, err) + require.Equal(t, "telegraf_cpu", name) + require.Equal(t, map[string]string{ + "origin": "host01", + }, tags) + require.Equal(t, "", field) +} + +func TestEngineWithWildcardTemplate(t *testing.T) { + var ( + defaultTmpl, err = NewDefaultTemplateWithPattern("measurement*") + templates = []string{ + "taskmanagerTask.alarm-detector.Assign.alarmDefinitionId metricsType.process.nodeId.x.alarmDefinitionId.measurement.field rule=1", + "taskmanagerTask.*.*.*.* metricsType.process.nodeId.measurement rule=2", + } + ) + require.NoError(t, err) + + engine, err := NewEngine(".", defaultTmpl, templates) + require.NoError(t, err) + + for _, testCase := range []struct { + line string + measurement string + field string + tags map[string]string + }{ + { + line: "taskmanagerTask.alarm-detector.Assign.alarmDefinitionId.timeout_errors.duration.p75", + measurement: "duration", + field: "p75", + tags: map[string]string{ + "metricsType": "taskmanagerTask", + "process": "alarm-detector", + "nodeId": "Assign", + "x": "alarmDefinitionId", + "alarmDefinitionId": "timeout_errors", + "rule": "1", + }, + }, + { + line: "taskmanagerTask.alarm-detector.Assign.numRecordsInPerSecond.m5_rate", + measurement: "numRecordsInPerSecond", + tags: map[string]string{ + "metricsType": "taskmanagerTask", + "process": "alarm-detector", + "nodeId": "Assign", + "rule": "2", + }, + }, + } { + t.Run(testCase.line, func(t *testing.T) { + measurement, tags, field, err := engine.Apply(testCase.line) + require.NoError(t, err) + + assert.Equal(t, testCase.measurement, measurement) + assert.Equal(t, testCase.field, field) + assert.Equal(t, testCase.tags, tags) + }) + } +} diff --git a/internal/templating/node.go b/internal/templating/node.go index 83ab1a40c..bf68509a0 100644 --- a/internal/templating/node.go +++ b/internal/templating/node.go @@ -55,32 +55,44 @@ func (n *node) search(line string) *Template { // recursiveSearch performs the actual recursive search func (n *node) recursiveSearch(lineParts []string) *Template { - // Nothing to search + // nothing to search if len(lineParts) == 0 || len(n.children) == 0 { return n.template } - // If last element is a wildcard, don't include it in this search since it's sorted - // to the end but lexicographically it would not always be and sort.Search assumes - // the slice is sorted. - length := len(n.children) - if n.children[length-1].value == "*" { + var ( + hasWildcard bool + length = len(n.children) + ) + + // exclude last child from search if it is a wildcard. sort.Search expects + // a lexicographically sorted set of children and we have artificially sorted + // wildcards to the end of the child set + // wildcards will be searched separately if no exact match is found + if hasWildcard = n.children[length-1].value == "*"; hasWildcard { length-- } - // Find the index of child with an exact match i := sort.Search(length, func(i int) bool { return n.children[i].value >= lineParts[0] }) - // Found an exact match, so search that child sub-tree - if i < len(n.children) && n.children[i].value == lineParts[0] { - return n.children[i].recursiveSearch(lineParts[1:]) + // given an exact match is found within children set + if i < length && n.children[i].value == lineParts[0] { + // descend into the matching node + if tmpl := n.children[i].recursiveSearch(lineParts[1:]); tmpl != nil { + // given a template is found return it + return tmpl + } } - // Not an exact match, see if we have a wildcard child to search - if n.children[len(n.children)-1].value == "*" { - return n.children[len(n.children)-1].recursiveSearch(lineParts[1:]) + + // given no template is found and the last child is a wildcard + if hasWildcard { + // also search the wildcard child node + return n.children[length].recursiveSearch(lineParts[1:]) } + + // fallback to returning template at this node return n.template } diff --git a/internal/templating/template.go b/internal/templating/template.go index 472bd2686..235d2f2a5 100644 --- a/internal/templating/template.go +++ b/internal/templating/template.go @@ -124,21 +124,16 @@ type templateSpecs []templateSpec // Less reports whether the element with // index j should sort before the element with index k. func (e templateSpecs) Less(j, k int) bool { - if len(e[j].filter) == 0 && len(e[k].filter) == 0 { - jlength := len(strings.Split(e[j].template, e[j].separator)) - klength := len(strings.Split(e[k].template, e[k].separator)) - return jlength < klength - } - if len(e[j].filter) == 0 { + jlen := len(e[j].filter) + klen := len(e[k].filter) + if jlen == 0 && klen != 0 { return true } - if len(e[k].filter) == 0 { + if klen == 0 && jlen != 0 { return false } - - jlength := len(strings.Split(e[j].template, e[j].separator)) - klength := len(strings.Split(e[k].template, e[k].separator)) - return jlength < klength + return strings.Count(e[j].template, e[j].separator) < + strings.Count(e[k].template, e[k].separator) } // Swap swaps the elements with indexes i and j. diff --git a/internal/templating/template_test.go b/internal/templating/template_test.go new file mode 100644 index 000000000..0a1aae5bc --- /dev/null +++ b/internal/templating/template_test.go @@ -0,0 +1,14 @@ +package templating + +import "testing" + +func BenchmarkTemplateLess(b *testing.B) { + a := templateSpec{ + template: "aa|bb|cc|dd|ee|ff", + separator: "|", + } + specs := templateSpecs{a, a} + for i := 0; i < b.N; i++ { + specs.Less(0, 1) + } +} diff --git a/internal/tls/common.go b/internal/tls/common.go new file mode 100644 index 000000000..1ceb20c3f --- /dev/null +++ b/internal/tls/common.go @@ -0,0 +1,38 @@ +package tls + +import "crypto/tls" + +var tlsVersionMap = map[string]uint16{ + "TLS10": tls.VersionTLS10, + "TLS11": tls.VersionTLS11, + "TLS12": tls.VersionTLS12, + "TLS13": tls.VersionTLS13, +} + +var tlsCipherMap = map[string]uint16{ + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, +} diff --git a/internal/tls/config.go b/internal/tls/config.go index ce7958343..59fbc4952 100644 --- a/internal/tls/config.go +++ b/internal/tls/config.go @@ -5,6 +5,7 @@ import ( "crypto/x509" "fmt" "io/ioutil" + "strings" ) // ClientConfig represents the standard client TLS config. @@ -25,6 +26,9 @@ type ServerConfig struct { TLSCert string `toml:"tls_cert"` TLSKey string `toml:"tls_key"` TLSAllowedCACerts []string `toml:"tls_allowed_cacerts"` + TLSCipherSuites []string `toml:"tls_cipher_suites"` + TLSMinVersion string `toml:"tls_min_version"` + TLSMaxVersion string `toml:"tls_max_version"` } // TLSConfig returns a tls.Config, may be nil without error if TLS is not @@ -97,6 +101,38 @@ func (c *ServerConfig) TLSConfig() (*tls.Config, error) { } } + if len(c.TLSCipherSuites) != 0 { + cipherSuites, err := ParseCiphers(c.TLSCipherSuites) + if err != nil { + return nil, fmt.Errorf( + "could not parse server cipher suites %s: %v", strings.Join(c.TLSCipherSuites, ","), err) + } + tlsConfig.CipherSuites = cipherSuites + } + + if c.TLSMaxVersion != "" { + version, err := ParseTLSVersion(c.TLSMaxVersion) + if err != nil { + return nil, fmt.Errorf( + "could not parse tls max version %q: %v", c.TLSMaxVersion, err) + } + tlsConfig.MaxVersion = version + } + + if c.TLSMinVersion != "" { + version, err := ParseTLSVersion(c.TLSMinVersion) + if err != nil { + return nil, fmt.Errorf( + "could not parse tls min version %q: %v", c.TLSMinVersion, err) + } + tlsConfig.MinVersion = version + } + + if tlsConfig.MinVersion != 0 && tlsConfig.MaxVersion != 0 && tlsConfig.MinVersion > tlsConfig.MaxVersion { + return nil, fmt.Errorf( + "tls min version %q can't be greater than tls max version %q", tlsConfig.MinVersion, tlsConfig.MaxVersion) + } + return tlsConfig, nil } diff --git a/internal/tls/config_test.go b/internal/tls/config_test.go index 31a70d9a1..66ccad70d 100644 --- a/internal/tls/config_test.go +++ b/internal/tls/config_test.go @@ -123,6 +123,47 @@ func TestServerConfig(t *testing.T) { TLSCert: pki.ServerCertPath(), TLSKey: pki.ServerKeyPath(), TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSMinVersion: pki.TLSMinVersion(), + TLSMaxVersion: pki.TLSMaxVersion(), + }, + }, + { + name: "missing tls cipher suites is okay", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + }, + }, + { + name: "missing tls max version is okay", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSMaxVersion: pki.TLSMaxVersion(), + }, + }, + { + name: "missing tls min version is okay", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSMinVersion: pki.TLSMinVersion(), + }, + }, + { + name: "missing tls min/max versions is okay", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, }, }, { @@ -172,6 +213,56 @@ func TestServerConfig(t *testing.T) { expNil: true, expErr: true, }, + { + name: "invalid cipher suites", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CACertPath()}, + }, + expNil: true, + expErr: true, + }, + { + name: "TLS Max Version less than TLS Min version", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CACertPath()}, + TLSMinVersion: pki.TLSMaxVersion(), + TLSMaxVersion: pki.TLSMinVersion(), + }, + expNil: true, + expErr: true, + }, + { + name: "invalid tls min version", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CipherSuite()}, + TLSMinVersion: pki.ServerKeyPath(), + TLSMaxVersion: pki.TLSMaxVersion(), + }, + expNil: true, + expErr: true, + }, + { + name: "invalid tls max version", + server: tls.ServerConfig{ + TLSCert: pki.ServerCertPath(), + TLSKey: pki.ServerKeyPath(), + TLSAllowedCACerts: []string{pki.CACertPath()}, + TLSCipherSuites: []string{pki.CACertPath()}, + TLSMinVersion: pki.TLSMinVersion(), + TLSMaxVersion: pki.ServerCertPath(), + }, + expNil: true, + expErr: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/internal/tls/utils.go b/internal/tls/utils.go new file mode 100644 index 000000000..ddc12d2c1 --- /dev/null +++ b/internal/tls/utils.go @@ -0,0 +1,30 @@ +package tls + +import ( + "fmt" +) + +// ParseCiphers returns a `[]uint16` by received `[]string` key that represents ciphers from crypto/tls. +// If some of ciphers in received list doesn't exists ParseCiphers returns nil with error +func ParseCiphers(ciphers []string) ([]uint16, error) { + suites := []uint16{} + + for _, cipher := range ciphers { + if v, ok := tlsCipherMap[cipher]; ok { + suites = append(suites, v) + } else { + return nil, fmt.Errorf("unsupported cipher %q", cipher) + } + } + + return suites, nil +} + +// ParseTLSVersion returns a `uint16` by received version string key that represents tls version from crypto/tls. +// If version isn't supported ParseTLSVersion returns 0 with error +func ParseTLSVersion(version string) (uint16, error) { + if v, ok := tlsVersionMap[version]; ok { + return v, nil + } + return 0, fmt.Errorf("unsupported version %q", version) +} diff --git a/internal/usage.go b/internal/usage.go index 4c00d8f83..6eff30e6b 100644 --- a/internal/usage.go +++ b/internal/usage.go @@ -13,15 +13,31 @@ The commands & flags are: config print out full sample configuration to stdout version print the version to stdout - --config configuration file to load - --test gather metrics once, print them to stdout, and exit - --config-directory directory containing additional *.conf files - --input-filter filter the input plugins to enable, separator is : - --output-filter filter the output plugins to enable, separator is : - --usage print usage for a plugin, ie, 'telegraf --usage mysql' - --debug print metrics as they're generated to stdout - --pprof-addr pprof address to listen on, format: localhost:6060 or :6060 - --quiet run in quiet mode + --aggregator-filter filter the aggregators to enable, separator is : + --config configuration file to load + --config-directory directory containing additional *.conf files + --plugin-directory directory containing *.so files, this directory will be + searched recursively. Any Plugin found will be loaded + and namespaced. + --debug turn on debug logging + --input-filter filter the inputs to enable, separator is : + --input-list print available input plugins. + --output-filter filter the outputs to enable, separator is : + --output-list print available output plugins. + --pidfile file to write our pid to + --pprof-addr
pprof address to listen on, don't activate pprof if empty + --processor-filter filter the processors to enable, separator is : + --quiet run in quiet mode + --section-filter filter config sections to output, separator is : + Valid values are 'agent', 'global_tags', 'outputs', + 'processors', 'aggregators' and 'inputs' + --sample-config print out full sample configuration + --once enable once mode: gather metrics once, write them, and exit + --test enable test mode: gather metrics once and print them + --test-wait wait up to this many seconds for service + inputs to complete in test or once mode + --usage print usage for a plugin, ie, 'telegraf --usage mysql' + --version display the version and exit Examples: @@ -31,7 +47,7 @@ Examples: # generate config with only cpu input & influxdb output plugins defined telegraf --input-filter cpu --output-filter influxdb config - # run a single telegraf collection, outputing metrics to stdout + # run a single telegraf collection, outputting metrics to stdout telegraf --config telegraf.conf --test # run telegraf with all plugins defined in config file diff --git a/internal/usage_windows.go b/internal/usage_windows.go index 109d309ed..7fee6a1f1 100644 --- a/internal/usage_windows.go +++ b/internal/usage_windows.go @@ -13,18 +13,33 @@ The commands & flags are: config print out full sample configuration to stdout version print the version to stdout - --config configuration file to load - --test gather metrics once, print them to stdout, and exit - --config-directory directory containing additional *.conf files - --input-filter filter the input plugins to enable, separator is : - --output-filter filter the output plugins to enable, separator is : - --usage print usage for a plugin, ie, 'telegraf --usage mysql' - --debug print metrics as they're generated to stdout - --pprof-addr pprof address to listen on, format: localhost:6060 or :6060 - --quiet run in quiet mode + --aggregator-filter filter the aggregators to enable, separator is : + --config configuration file to load + --config-directory directory containing additional *.conf files + --debug turn on debug logging + --input-filter filter the inputs to enable, separator is : + --input-list print available input plugins. + --output-filter filter the outputs to enable, separator is : + --output-list print available output plugins. + --pidfile file to write our pid to + --pprof-addr
pprof address to listen on, don't activate pprof if empty + --processor-filter filter the processors to enable, separator is : + --quiet run in quiet mode + --sample-config print out full sample configuration + --section-filter filter config sections to output, separator is : + Valid values are 'agent', 'global_tags', 'outputs', + 'processors', 'aggregators' and 'inputs' + --once enable once mode: gather metrics once, write them, and exit + --test enable test mode: gather metrics once and print them + --test-wait wait up to this many seconds for service + inputs to complete in test or once mode + --usage print usage for a plugin, ie, 'telegraf --usage mysql' + --version display the version and exit - --console run as console application - --service operate on service, one of: install, uninstall, start, stop + --console run as console application (windows only) + --service operate on the service (windows only) + --service-name service name (windows only) + --service-display-name service display name (windows only) Examples: @@ -34,7 +49,7 @@ Examples: # generate config with only cpu input & influxdb output plugins defined telegraf --input-filter cpu --output-filter influxdb config - # run a single telegraf collection, outputing metrics to stdout + # run a single telegraf collection, outputting metrics to stdout telegraf --config telegraf.conf --test # run telegraf with all plugins defined in config file @@ -51,4 +66,7 @@ Examples: # install telegraf service telegraf --service install --config "C:\Program Files\Telegraf\telegraf.conf" + + # install telegraf service with custom name + telegraf --service install --service-name=my-telegraf --service-display-name="My Telegraf" ` diff --git a/logger/event_logger.go b/logger/event_logger.go new file mode 100644 index 000000000..48b645dde --- /dev/null +++ b/logger/event_logger.go @@ -0,0 +1,49 @@ +package logger + +import ( + "io" + "strings" + + "github.com/influxdata/wlog" + "github.com/kardianos/service" +) + +const ( + LogTargetEventlog = "eventlog" +) + +type eventLogger struct { + logger service.Logger +} + +func (t *eventLogger) Write(b []byte) (n int, err error) { + loc := prefixRegex.FindIndex(b) + n = len(b) + if loc == nil { + err = t.logger.Info(b) + } else if n > 2 { //skip empty log messages + line := strings.Trim(string(b[loc[1]:]), " \t\r\n") + switch rune(b[loc[0]]) { + case 'I': + err = t.logger.Info(line) + case 'W': + err = t.logger.Warning(line) + case 'E': + err = t.logger.Error(line) + } + } + + return +} + +type eventLoggerCreator struct { + serviceLogger service.Logger +} + +func (e *eventLoggerCreator) CreateLogger(config LogConfig) (io.Writer, error) { + return wlog.NewWriter(&eventLogger{logger: e.serviceLogger}), nil +} + +func RegisterEventLogger(serviceLogger service.Logger) { + registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: serviceLogger}) +} diff --git a/logger/event_logger_test.go b/logger/event_logger_test.go new file mode 100644 index 000000000..f2d4eb420 --- /dev/null +++ b/logger/event_logger_test.go @@ -0,0 +1,100 @@ +//+build windows + +package logger + +import ( + "bytes" + "encoding/xml" + "log" + "os/exec" + "testing" + "time" + + "github.com/kardianos/service" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type Levels int + +const ( + Info Levels = iota + 1 + Warning + Error +) + +type Event struct { + Message string `xml:"EventData>Data"` + Level Levels `xml:"System>EventID"` +} + +func getEventLog(t *testing.T, since time.Time) []Event { + timeStr := since.UTC().Format(time.RFC3339) + cmd := exec.Command("wevtutil", "qe", "Application", "/rd:true", "/q:Event[System[TimeCreated[@SystemTime >= '"+timeStr+"'] and Provider[@Name='Telegraf']]]") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + require.NoError(t, err) + xmlStr := "" + out.String() + "" + var events struct { + Events []Event `xml:"Event"` + } + err = xml.Unmarshal([]byte(xmlStr), &events) + require.NoError(t, err) + return events.Events +} + +func TestEventLog(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + prepareLogger(t) + + config := LogConfig{ + LogTarget: LogTargetEventlog, + Logfile: "", + } + + SetupLogging(config) + now := time.Now() + log.Println("I! Info message") + log.Println("W! Warn message") + log.Println("E! Err message") + events := getEventLog(t, now) + assert.Len(t, events, 3) + assert.Contains(t, events, Event{Message: "Info message", Level: Info}) + assert.Contains(t, events, Event{Message: "Warn message", Level: Warning}) + assert.Contains(t, events, Event{Message: "Err message", Level: Error}) +} + +func TestRestrictedEventLog(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + prepareLogger(t) + + config := LogConfig{ + LogTarget: LogTargetEventlog, + Quiet: true, + } + + SetupLogging(config) + //separate previous log messages by small delay + time.Sleep(time.Second) + now := time.Now() + log.Println("I! Info message") + log.Println("W! Warning message") + log.Println("E! Error message") + events := getEventLog(t, now) + assert.Len(t, events, 1) + assert.Contains(t, events, Event{Message: "Error message", Level: Error}) +} + +func prepareLogger(t *testing.T) { + svc, err := service.New(nil, &service.Config{Name: "Telegraf"}) + require.NoError(t, err) + svcLogger, err := svc.SystemLogger(nil) + require.NoError(t, err) + require.NotNil(t, svcLogger) + registerLogger(LogTargetEventlog, &eventLoggerCreator{serviceLogger: svcLogger}) +} diff --git a/logger/logger.go b/logger/logger.go index 7ad1c8069..a276d2e80 100644 --- a/logger/logger.go +++ b/logger/logger.go @@ -1,26 +1,61 @@ package logger import ( + "errors" "io" "log" "os" "regexp" "time" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/wlog" ) var prefixRegex = regexp.MustCompile("^[DIWE]!") -// newTelegrafWriter returns a logging-wrapped writer. -func newTelegrafWriter(w io.Writer) io.Writer { - return &telegrafLog{ - writer: wlog.NewWriter(w), +const ( + LogTargetFile = "file" + LogTargetStderr = "stderr" +) + +// LogConfig contains the log configuration settings +type LogConfig struct { + // will set the log level to DEBUG + Debug bool + //will set the log level to ERROR + Quiet bool + //stderr, stdout, file or eventlog (Windows only) + LogTarget string + // will direct the logging output to a file. Empty string is + // interpreted as stderr. If there is an error opening the file the + // logger will fallback to stderr + Logfile string + // will rotate when current file at the specified time interval + RotationInterval internal.Duration + // will rotate when current file size exceeds this parameter. + RotationMaxSize internal.Size + // maximum rotated files to keep (older ones will be deleted) + RotationMaxArchives int +} + +type LoggerCreator interface { + CreateLogger(config LogConfig) (io.Writer, error) +} + +var loggerRegistry map[string]LoggerCreator + +func registerLogger(name string, loggerCreator LoggerCreator) { + if loggerRegistry == nil { + loggerRegistry = make(map[string]LoggerCreator) } + loggerRegistry[name] = loggerCreator } type telegrafLog struct { - writer io.Writer + writer io.Writer + internalWriter io.Writer } func (t *telegrafLog) Write(b []byte) (n int, err error) { @@ -33,37 +68,96 @@ func (t *telegrafLog) Write(b []byte) (n int, err error) { return t.writer.Write(line) } -// SetupLogging configures the logging output. -// debug will set the log level to DEBUG -// quiet will set the log level to ERROR -// logfile will direct the logging output to a file. Empty string is -// interpreted as stderr. If there is an error opening the file the -// logger will fallback to stderr. -func SetupLogging(debug, quiet bool, logfile string) { - log.SetFlags(0) - if debug { - wlog.SetLevel(wlog.DEBUG) - } - if quiet { - wlog.SetLevel(wlog.ERROR) +func (t *telegrafLog) Close() error { + var stdErrWriter io.Writer + stdErrWriter = os.Stderr + // avoid closing stderr + if t.internalWriter != stdErrWriter { + closer, isCloser := t.internalWriter.(io.Closer) + if !isCloser { + return errors.New("the underlying writer cannot be closed") + } + return closer.Close() } + return nil +} - var oFile *os.File - if logfile != "" { - if _, err := os.Stat(logfile); os.IsNotExist(err) { - if oFile, err = os.Create(logfile); err != nil { - log.Printf("E! Unable to create %s (%s), using stderr", logfile, err) - oFile = os.Stderr +// newTelegrafWriter returns a logging-wrapped writer. +func newTelegrafWriter(w io.Writer) io.Writer { + return &telegrafLog{ + writer: wlog.NewWriter(w), + internalWriter: w, + } +} + +// SetupLogging configures the logging output. +func SetupLogging(config LogConfig) { + newLogWriter(config) +} + +type telegrafLogCreator struct { +} + +func (t *telegrafLogCreator) CreateLogger(config LogConfig) (io.Writer, error) { + var writer, defaultWriter io.Writer + defaultWriter = os.Stderr + + switch config.LogTarget { + case LogTargetFile: + if config.Logfile != "" { + var err error + if writer, err = rotate.NewFileWriter(config.Logfile, config.RotationInterval.Duration, config.RotationMaxSize.Size, config.RotationMaxArchives); err != nil { + log.Printf("E! Unable to open %s (%s), using stderr", config.Logfile, err) + writer = defaultWriter } } else { - if oFile, err = os.OpenFile(logfile, os.O_APPEND|os.O_WRONLY, os.ModeAppend); err != nil { - log.Printf("E! Unable to append to %s (%s), using stderr", logfile, err) - oFile = os.Stderr - } + writer = defaultWriter } - } else { - oFile = os.Stderr + case LogTargetStderr, "": + writer = defaultWriter + default: + log.Printf("E! Unsupported logtarget: %s, using stderr", config.LogTarget) + writer = defaultWriter } - log.SetOutput(newTelegrafWriter(oFile)) + return newTelegrafWriter(writer), nil +} + +// Keep track what is actually set as a log output, because log package doesn't provide a getter. +// It allows closing previous writer if re-set and have possibility to test what is actually set +var actualLogger io.Writer + +func newLogWriter(config LogConfig) io.Writer { + log.SetFlags(0) + if config.Debug { + wlog.SetLevel(wlog.DEBUG) + } + if config.Quiet { + wlog.SetLevel(wlog.ERROR) + } + if !config.Debug && !config.Quiet { + wlog.SetLevel(wlog.INFO) + } + var logWriter io.Writer + if logCreator, ok := loggerRegistry[config.LogTarget]; ok { + logWriter, _ = logCreator.CreateLogger(config) + } + if logWriter == nil { + logWriter, _ = (&telegrafLogCreator{}).CreateLogger(config) + } + + if closer, isCloser := actualLogger.(io.Closer); isCloser { + closer.Close() + } + log.SetOutput(logWriter) + actualLogger = logWriter + + return logWriter +} + +func init() { + tlc := &telegrafLogCreator{} + registerLogger("", tlc) + registerLogger(LogTargetStderr, tlc) + registerLogger(LogTargetFile, tlc) } diff --git a/logger/logger_test.go b/logger/logger_test.go index 09c7c82eb..a5f53ca17 100644 --- a/logger/logger_test.go +++ b/logger/logger_test.go @@ -2,12 +2,16 @@ package logger import ( "bytes" + "io" "io/ioutil" "log" "os" + "path/filepath" "testing" + "github.com/influxdata/telegraf/internal" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestWriteLogToFile(t *testing.T) { @@ -15,7 +19,8 @@ func TestWriteLogToFile(t *testing.T) { assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - SetupLogging(false, false, tmpfile.Name()) + config := createBasicLogConfig(tmpfile.Name()) + SetupLogging(config) log.Printf("I! TEST") log.Printf("D! TEST") // <- should be ignored @@ -28,8 +33,9 @@ func TestDebugWriteLogToFile(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - - SetupLogging(true, false, tmpfile.Name()) + config := createBasicLogConfig(tmpfile.Name()) + config.Debug = true + SetupLogging(config) log.Printf("D! TEST") f, err := ioutil.ReadFile(tmpfile.Name()) @@ -41,8 +47,9 @@ func TestErrorWriteLogToFile(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - - SetupLogging(false, true, tmpfile.Name()) + config := createBasicLogConfig(tmpfile.Name()) + config.Quiet = true + SetupLogging(config) log.Printf("E! TEST") log.Printf("I! TEST") // <- should be ignored @@ -55,8 +62,9 @@ func TestAddDefaultLogLevel(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") assert.NoError(t, err) defer func() { os.Remove(tmpfile.Name()) }() - - SetupLogging(true, false, tmpfile.Name()) + config := createBasicLogConfig(tmpfile.Name()) + config.Debug = true + SetupLogging(config) log.Printf("TEST") f, err := ioutil.ReadFile(tmpfile.Name()) @@ -64,6 +72,68 @@ func TestAddDefaultLogLevel(t *testing.T) { assert.Equal(t, f[19:], []byte("Z I! TEST\n")) } +func TestWriteToTruncatedFile(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "") + assert.NoError(t, err) + defer func() { os.Remove(tmpfile.Name()) }() + config := createBasicLogConfig(tmpfile.Name()) + config.Debug = true + SetupLogging(config) + log.Printf("TEST") + + f, err := ioutil.ReadFile(tmpfile.Name()) + assert.NoError(t, err) + assert.Equal(t, f[19:], []byte("Z I! TEST\n")) + + tmpf, err := os.OpenFile(tmpfile.Name(), os.O_RDWR|os.O_TRUNC, 0644) + assert.NoError(t, err) + assert.NoError(t, tmpf.Close()) + + log.Printf("SHOULD BE FIRST") + + f, err = ioutil.ReadFile(tmpfile.Name()) + assert.NoError(t, err) + assert.Equal(t, f[19:], []byte("Z I! SHOULD BE FIRST\n")) +} + +func TestWriteToFileInRotation(t *testing.T) { + tempDir, err := ioutil.TempDir("", "LogRotation") + require.NoError(t, err) + config := createBasicLogConfig(filepath.Join(tempDir, "test.log")) + config.LogTarget = LogTargetFile + config.RotationMaxSize = internal.Size{Size: int64(30)} + writer := newLogWriter(config) + // Close the writer here, otherwise the temp folder cannot be deleted because the current log file is in use. + closer, isCloser := writer.(io.Closer) + assert.True(t, isCloser) + defer func() { closer.Close(); os.RemoveAll(tempDir) }() + + log.Printf("I! TEST 1") // Writes 31 bytes, will rotate + log.Printf("I! TEST") // Writes 29 byes, no rotation expected + files, _ := ioutil.ReadDir(tempDir) + assert.Equal(t, 2, len(files)) +} + +func TestLogTargetSettings(t *testing.T) { + config := LogConfig{ + LogTarget: "", + Quiet: true, + } + SetupLogging(config) + logger, isTelegrafLogger := actualLogger.(*telegrafLog) + assert.True(t, isTelegrafLogger) + assert.Equal(t, logger.internalWriter, os.Stderr) + + config = LogConfig{ + LogTarget: "stderr", + Quiet: true, + } + SetupLogging(config) + logger, isTelegrafLogger = actualLogger.(*telegrafLog) + assert.True(t, isTelegrafLogger) + assert.Equal(t, logger.internalWriter, os.Stderr) +} + func BenchmarkTelegrafLogWrite(b *testing.B) { var msg = []byte("test") var buf bytes.Buffer @@ -73,3 +143,11 @@ func BenchmarkTelegrafLogWrite(b *testing.B) { w.Write(msg) } } + +func createBasicLogConfig(filename string) LogConfig { + return LogConfig{ + Logfile: filename, + LogTarget: LogTargetFile, + RotationMaxArchives: -1, + } +} diff --git a/metric.go b/metric.go index b8da02931..6c7b1c6c5 100644 --- a/metric.go +++ b/metric.go @@ -17,43 +17,93 @@ const ( Histogram ) +// Tag represents a single tag key and value. type Tag struct { Key string Value string } +// Field represents a single field key and value. type Field struct { Key string Value interface{} } +// Metric is the type of data that is processed by Telegraf. Input plugins, +// and to a lesser degree, Processor and Aggregator plugins create new Metrics +// and Output plugins write them. type Metric interface { - // Getting data structure functions + // Name is the primary identifier for the Metric and corresponds to the + // measurement in the InfluxDB data model. Name() string + + // Tags returns the tags as a map. This method is deprecated, use TagList instead. Tags() map[string]string + + // TagList returns the tags as a slice ordered by the tag key in lexical + // bytewise ascending order. The returned value should not be modified, + // use the AddTag or RemoveTag methods instead. TagList() []*Tag + + // Fields returns the fields as a map. This method is deprecated, use FieldList instead. Fields() map[string]interface{} + + // FieldList returns the fields as a slice in an undefined order. The + // returned value should not be modified, use the AddField or RemoveField + // methods instead. FieldList() []*Field + + // Time returns the timestamp of the metric. Time() time.Time + + // Type returns a general type for the entire metric that describes how you + // might interpret, aggregate the values. + // + // This method may be removed in the future and its use is discouraged. Type() ValueType - // Name functions + // SetName sets the metric name. SetName(name string) + + // AddPrefix adds a string to the front of the metric name. It is + // equivalent to m.SetName(prefix + m.Name()). + // + // This method is deprecated, use SetName instead. AddPrefix(prefix string) + + // AddSuffix appends a string to the back of the metric name. It is + // equivalent to m.SetName(m.Name() + suffix). + // + // This method is deprecated, use SetName instead. AddSuffix(suffix string) - // Tag functions + // GetTag returns the value of a tag and a boolean to indicate if it was set. GetTag(key string) (string, bool) + + // HasTag returns true if the tag is set on the Metric. HasTag(key string) bool + + // AddTag sets the tag on the Metric. If the Metric already has the tag + // set then the current value is replaced. AddTag(key, value string) + + // RemoveTag removes the tag if it is set. RemoveTag(key string) - // Field functions + // GetField returns the value of a field and a boolean to indicate if it was set. GetField(key string) (interface{}, bool) + + // HasField returns true if the field is set on the Metric. HasField(key string) bool + + // AddField sets the field on the Metric. If the Metric already has the field + // set then the current value is replaced. AddField(key string, value interface{}) + + // RemoveField removes the tag if it is set. RemoveField(key string) + // SetTime sets the timestamp of the Metric. SetTime(t time.Time) // HashID returns an unique identifier for the series. @@ -62,7 +112,24 @@ type Metric interface { // Copy returns a deep copy of the Metric. Copy() Metric - // Mark Metric as an aggregate + // Accept marks the metric as processed successfully and written to an + // output. + Accept() + + // Reject marks the metric as processed unsuccessfully. + Reject() + + // Drop marks the metric as processed successfully without being written + // to any output. + Drop() + + // SetAggregate indicates the metric is an aggregated value. + // + // This method may be removed in the future and its use is discouraged. SetAggregate(bool) + + // IsAggregate returns true if the Metric is an aggregate. + // + // This method may be removed in the future and its use is discouraged. IsAggregate() bool } diff --git a/metric/builder.go b/metric/builder.go deleted file mode 100644 index c579046df..000000000 --- a/metric/builder.go +++ /dev/null @@ -1,53 +0,0 @@ -package metric - -import ( - "time" - - "github.com/influxdata/telegraf" -) - -type TimeFunc func() time.Time - -type Builder struct { - TimeFunc - TimePrecision time.Duration - - *metric -} - -func NewBuilder() *Builder { - b := &Builder{ - TimeFunc: time.Now, - TimePrecision: 1 * time.Nanosecond, - } - b.Reset() - return b -} - -func (b *Builder) SetName(name string) { - b.name = name -} - -func (b *Builder) AddTag(key string, value string) { - b.metric.AddTag(key, value) -} - -func (b *Builder) AddField(key string, value interface{}) { - b.metric.AddField(key, value) -} - -func (b *Builder) SetTime(tm time.Time) { - b.tm = tm -} - -func (b *Builder) Reset() { - b.metric = &metric{} -} - -func (b *Builder) Metric() (telegraf.Metric, error) { - if b.tm.IsZero() { - b.tm = b.TimeFunc().Truncate(b.TimePrecision) - } - - return b.metric, nil -} diff --git a/metric/metric.go b/metric/metric.go index 077b3a314..517645a83 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -50,18 +50,42 @@ func New( sort.Slice(m.tags, func(i, j int) bool { return m.tags[i].Key < m.tags[j].Key }) } - m.fields = make([]*telegraf.Field, 0, len(fields)) - for k, v := range fields { - v := convertField(v) - if v == nil { - continue + if len(fields) > 0 { + m.fields = make([]*telegraf.Field, 0, len(fields)) + for k, v := range fields { + v := convertField(v) + if v == nil { + continue + } + m.AddField(k, v) } - m.AddField(k, v) } return m, nil } +// FromMetric returns a deep copy of the metric with any tracking information +// removed. +func FromMetric(other telegraf.Metric) telegraf.Metric { + m := &metric{ + name: other.Name(), + tags: make([]*telegraf.Tag, len(other.TagList())), + fields: make([]*telegraf.Field, len(other.FieldList())), + tm: other.Time(), + tp: other.Type(), + aggregate: other.IsAggregate(), + } + + for i, tag := range other.TagList() { + m.tags[i] = &telegraf.Tag{Key: tag.Key, Value: tag.Value} + } + + for i, field := range other.FieldList() { + m.fields[i] = &telegraf.Field{Key: field.Key, Value: field.Value} + } + return m +} + func (m *metric) String() string { return fmt.Sprintf("%s %v %v %d", m.name, m.Tags(), m.Fields(), m.tm.UnixNano()) } @@ -168,6 +192,7 @@ func (m *metric) AddField(key string, value interface{}) { for i, field := range m.fields { if key == field.Key { m.fields[i] = &telegraf.Field{Key: key, Value: convertField(value)} + return } } m.fields = append(m.fields, &telegraf.Field{Key: key, Value: convertField(value)}) @@ -217,11 +242,11 @@ func (m *metric) Copy() telegraf.Metric { } for i, tag := range m.tags { - m2.tags[i] = tag + m2.tags[i] = &telegraf.Tag{Key: tag.Key, Value: tag.Value} } for i, field := range m.fields { - m2.fields[i] = field + m2.fields[i] = &telegraf.Field{Key: field.Key, Value: field.Value} } return m2 } @@ -247,6 +272,15 @@ func (m *metric) HashID() uint64 { return h.Sum64() } +func (m *metric) Accept() { +} + +func (m *metric) Reject() { +} + +func (m *metric) Drop() { +} + // Convert field to a supported type or nil if unconvertible func convertField(v interface{}) interface{} { switch v := v.(type) { @@ -280,7 +314,68 @@ func convertField(v interface{}) interface{} { return uint64(v) case float32: return float64(v) + case *float64: + if v != nil { + return *v + } + case *int64: + if v != nil { + return *v + } + case *string: + if v != nil { + return *v + } + case *bool: + if v != nil { + return *v + } + case *int: + if v != nil { + return int64(*v) + } + case *uint: + if v != nil { + return uint64(*v) + } + case *uint64: + if v != nil { + return uint64(*v) + } + case *[]byte: + if v != nil { + return string(*v) + } + case *int32: + if v != nil { + return int64(*v) + } + case *int16: + if v != nil { + return int64(*v) + } + case *int8: + if v != nil { + return int64(*v) + } + case *uint32: + if v != nil { + return uint64(*v) + } + case *uint16: + if v != nil { + return uint64(*v) + } + case *uint8: + if v != nil { + return uint64(*v) + } + case *float32: + if v != nil { + return float64(*v) + } default: return nil } + return nil } diff --git a/metric/metric_test.go b/metric/metric_test.go index 47d44f3ef..7033d3230 100644 --- a/metric/metric_test.go +++ b/metric/metric_test.go @@ -30,6 +30,7 @@ func TestNewMetric(t *testing.T) { require.Equal(t, now, m.Time()) } +// cpu value=1 func baseMetric() telegraf.Metric { tags := map[string]string{} fields := map[string]interface{}{ @@ -111,6 +112,8 @@ func TestAddFieldOverwrites(t *testing.T) { m.AddField("value", 1.0) m.AddField("value", 42.0) + require.Equal(t, 1, len(m.FieldList())) + value, ok := m.GetField("value") require.True(t, ok) require.Equal(t, 42.0, value) @@ -122,6 +125,8 @@ func TestAddFieldChangesType(t *testing.T) { m.AddField("value", 1.0) m.AddField("value", "xyzzy") + require.Equal(t, 1, len(m.FieldList())) + value, ok := m.GetField("value") require.True(t, ok) require.Equal(t, "xyzzy", value) @@ -329,7 +334,7 @@ func TestValueType(t *testing.T) { assert.Equal(t, telegraf.Gauge, m.Type()) } -func TestCopyAggreate(t *testing.T) { +func TestCopyAggregate(t *testing.T) { m1 := baseMetric() m1.SetAggregate(true) m2 := m1.Copy() diff --git a/metric/series_grouper.go b/metric/series_grouper.go new file mode 100644 index 000000000..5dc66e11b --- /dev/null +++ b/metric/series_grouper.go @@ -0,0 +1,86 @@ +package metric + +import ( + "hash/fnv" + "io" + "sort" + "strconv" + "time" + + "github.com/influxdata/telegraf" +) + +// NewSeriesGrouper returns a type that can be used to group fields by series +// and time, so that fields which share these values will be combined into a +// single telegraf.Metric. +// +// This is useful to build telegraf.Metric's when all fields for a series are +// not available at once. +// +// ex: +// - cpu,host=localhost usage_time=42 +// - cpu,host=localhost idle_time=42 +// + cpu,host=localhost idle_time=42,usage_time=42 +func NewSeriesGrouper() *SeriesGrouper { + return &SeriesGrouper{ + metrics: make(map[uint64]telegraf.Metric), + ordered: []telegraf.Metric{}, + } +} + +type SeriesGrouper struct { + metrics map[uint64]telegraf.Metric + ordered []telegraf.Metric +} + +// Add adds a field key and value to the series. +func (g *SeriesGrouper) Add( + measurement string, + tags map[string]string, + tm time.Time, + field string, + fieldValue interface{}, +) error { + var err error + id := groupID(measurement, tags, tm) + metric := g.metrics[id] + if metric == nil { + metric, err = New(measurement, tags, map[string]interface{}{field: fieldValue}, tm) + if err != nil { + return err + } + g.metrics[id] = metric + g.ordered = append(g.ordered, metric) + } else { + metric.AddField(field, fieldValue) + } + return nil +} + +// Metrics returns the metrics grouped by series and time. +func (g *SeriesGrouper) Metrics() []telegraf.Metric { + return g.ordered +} + +func groupID(measurement string, tags map[string]string, tm time.Time) uint64 { + h := fnv.New64a() + h.Write([]byte(measurement)) + h.Write([]byte("\n")) + + taglist := make([]*telegraf.Tag, 0, len(tags)) + for k, v := range tags { + taglist = append(taglist, + &telegraf.Tag{Key: k, Value: v}) + } + sort.Slice(taglist, func(i, j int) bool { return taglist[i].Key < taglist[j].Key }) + for _, tag := range taglist { + h.Write([]byte(tag.Key)) + h.Write([]byte("\n")) + h.Write([]byte(tag.Value)) + h.Write([]byte("\n")) + } + h.Write([]byte("\n")) + + io.WriteString(h, strconv.FormatInt(tm.UnixNano(), 10)) + return h.Sum64() +} diff --git a/metric/tracking.go b/metric/tracking.go new file mode 100644 index 000000000..e370d9f2a --- /dev/null +++ b/metric/tracking.go @@ -0,0 +1,178 @@ +package metric + +import ( + "log" + "runtime" + "sync/atomic" + + "github.com/influxdata/telegraf" +) + +// NotifyFunc is called when a tracking metric is done being processed with +// the tracking information. +type NotifyFunc = func(track telegraf.DeliveryInfo) + +// WithTracking adds tracking to the metric and registers the notify function +// to be called when processing is complete. +func WithTracking(metric telegraf.Metric, fn NotifyFunc) (telegraf.Metric, telegraf.TrackingID) { + return newTrackingMetric(metric, fn) +} + +// WithBatchTracking adds tracking to the metrics and registers the notify +// function to be called when processing is complete. +func WithGroupTracking(metric []telegraf.Metric, fn NotifyFunc) ([]telegraf.Metric, telegraf.TrackingID) { + return newTrackingMetricGroup(metric, fn) +} + +func EnableDebugFinalizer() { + finalizer = debugFinalizer +} + +var ( + lastID uint64 + finalizer func(*trackingData) +) + +func newTrackingID() telegraf.TrackingID { + return telegraf.TrackingID(atomic.AddUint64(&lastID, 1)) +} + +func debugFinalizer(d *trackingData) { + rc := atomic.LoadInt32(&d.rc) + if rc != 0 { + log.Fatalf("E! [agent] metric collected with non-zero reference count rc: %d", rc) + } +} + +type trackingData struct { + id telegraf.TrackingID + rc int32 + acceptCount int32 + rejectCount int32 + notifyFunc NotifyFunc +} + +func (d *trackingData) incr() { + atomic.AddInt32(&d.rc, 1) +} + +func (d *trackingData) decr() int32 { + return atomic.AddInt32(&d.rc, -1) +} + +func (d *trackingData) accept() { + atomic.AddInt32(&d.acceptCount, 1) +} + +func (d *trackingData) reject() { + atomic.AddInt32(&d.rejectCount, 1) +} + +func (d *trackingData) notify() { + d.notifyFunc( + &deliveryInfo{ + id: d.id, + accepted: int(d.acceptCount), + rejected: int(d.rejectCount), + }, + ) +} + +type trackingMetric struct { + telegraf.Metric + d *trackingData +} + +func newTrackingMetric(metric telegraf.Metric, fn NotifyFunc) (telegraf.Metric, telegraf.TrackingID) { + m := &trackingMetric{ + Metric: metric, + d: &trackingData{ + id: newTrackingID(), + rc: 1, + acceptCount: 0, + rejectCount: 0, + notifyFunc: fn, + }, + } + + if finalizer != nil { + runtime.SetFinalizer(m.d, finalizer) + } + return m, m.d.id +} + +func newTrackingMetricGroup(group []telegraf.Metric, fn NotifyFunc) ([]telegraf.Metric, telegraf.TrackingID) { + d := &trackingData{ + id: newTrackingID(), + rc: 0, + acceptCount: 0, + rejectCount: 0, + notifyFunc: fn, + } + + for i, m := range group { + d.incr() + dm := &trackingMetric{ + Metric: m, + d: d, + } + group[i] = dm + + } + if finalizer != nil { + runtime.SetFinalizer(d, finalizer) + } + + if len(group) == 0 { + d.notify() + } + + return group, d.id +} + +func (m *trackingMetric) Copy() telegraf.Metric { + m.d.incr() + return &trackingMetric{ + Metric: m.Metric.Copy(), + d: m.d, + } +} + +func (m *trackingMetric) Accept() { + m.d.accept() + m.decr() +} + +func (m *trackingMetric) Reject() { + m.d.reject() + m.decr() +} + +func (m *trackingMetric) Drop() { + m.decr() +} + +func (m *trackingMetric) decr() { + v := m.d.decr() + if v < 0 { + panic("negative refcount") + } + + if v == 0 { + m.d.notify() + } +} + +type deliveryInfo struct { + id telegraf.TrackingID + accepted int + rejected int +} + +func (r *deliveryInfo) ID() telegraf.TrackingID { + return r.id +} + +func (r *deliveryInfo) Delivered() bool { + return r.rejected == 0 +} diff --git a/metric/tracking_test.go b/metric/tracking_test.go new file mode 100644 index 000000000..0ca1ca4da --- /dev/null +++ b/metric/tracking_test.go @@ -0,0 +1,298 @@ +package metric + +import ( + "sync" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/stretchr/testify/require" +) + +func mustMetric( + name string, + tags map[string]string, + fields map[string]interface{}, + tm time.Time, + tp ...telegraf.ValueType, +) telegraf.Metric { + m, err := New(name, tags, fields, tm, tp...) + if err != nil { + panic("mustMetric") + } + return m +} + +type deliveries struct { + Info map[telegraf.TrackingID]telegraf.DeliveryInfo +} + +func (d *deliveries) onDelivery(info telegraf.DeliveryInfo) { + d.Info[info.ID()] = info +} + +func TestNewTrackingID(t *testing.T) { + var wg sync.WaitGroup + var a [100000]telegraf.TrackingID + var b [100000]telegraf.TrackingID + + wg.Add(2) + go func() { + for i := 0; i < len(a); i++ { + a[i] = newTrackingID() + } + wg.Done() + }() + go func() { + for i := 0; i < len(b); i++ { + b[i] = newTrackingID() + } + wg.Done() + }() + wg.Wait() + + // Find any duplicate TrackingIDs in arrays a and b. Arrays must be sorted in increasing order. + for i, j := 0, 0; i < len(a) && j < len(b); { + if a[i] == b[j] { + t.Errorf("Duplicate TrackingID: a[%d]==%d and b[%d]==%d.", i, a[i], j, b[j]) + break + } + if a[i] > b[j] { + j++ + continue + } + if a[i] < b[j] { + i++ + continue + } + } +} + +func TestTracking(t *testing.T) { + tests := []struct { + name string + metric telegraf.Metric + actions func(metric telegraf.Metric) + delivered bool + }{ + { + name: "accept", + metric: mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + actions: func(m telegraf.Metric) { + m.Accept() + }, + delivered: true, + }, + { + name: "reject", + metric: mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + actions: func(m telegraf.Metric) { + m.Reject() + }, + delivered: false, + }, + { + name: "accept copy", + metric: mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + actions: func(m telegraf.Metric) { + m2 := m.Copy() + m.Accept() + m2.Accept() + }, + delivered: true, + }, + { + name: "copy with accept and done", + metric: mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + actions: func(m telegraf.Metric) { + m2 := m.Copy() + m.Accept() + m2.Drop() + }, + delivered: true, + }, + { + name: "copy with mixed delivery", + metric: mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + actions: func(m telegraf.Metric) { + m2 := m.Copy() + m.Accept() + m2.Reject() + }, + delivered: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &deliveries{ + Info: make(map[telegraf.TrackingID]telegraf.DeliveryInfo), + } + metric, id := WithTracking(tt.metric, d.onDelivery) + tt.actions(metric) + + info := d.Info[id] + require.Equal(t, tt.delivered, info.Delivered()) + }) + } +} + +func TestGroupTracking(t *testing.T) { + tests := []struct { + name string + metrics []telegraf.Metric + actions func(metrics []telegraf.Metric) + delivered bool + }{ + { + name: "accept", + metrics: []telegraf.Metric{ + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + actions: func(metrics []telegraf.Metric) { + metrics[0].Accept() + metrics[1].Accept() + }, + delivered: true, + }, + { + name: "reject", + metrics: []telegraf.Metric{ + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + actions: func(metrics []telegraf.Metric) { + metrics[0].Reject() + metrics[1].Reject() + }, + delivered: false, + }, + { + name: "remove", + metrics: []telegraf.Metric{ + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + actions: func(metrics []telegraf.Metric) { + metrics[0].Drop() + metrics[1].Drop() + }, + delivered: true, + }, + { + name: "mixed", + metrics: []telegraf.Metric{ + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + mustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + actions: func(metrics []telegraf.Metric) { + metrics[0].Accept() + metrics[1].Reject() + }, + delivered: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &deliveries{ + Info: make(map[telegraf.TrackingID]telegraf.DeliveryInfo), + } + metrics, id := WithGroupTracking(tt.metrics, d.onDelivery) + tt.actions(metrics) + + info := d.Info[id] + require.Equal(t, tt.delivered, info.Delivered()) + }) + } +} diff --git a/metric/uint_support.go b/metric/uint_support.go deleted file mode 100644 index 98383fb23..000000000 --- a/metric/uint_support.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build uint64 - -package metric - -func init() { - EnableUintSupport() -} diff --git a/models/buffer.go b/models/buffer.go new file mode 100644 index 000000000..18e9987ca --- /dev/null +++ b/models/buffer.go @@ -0,0 +1,286 @@ +package models + +import ( + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" +) + +var ( + AgentMetricsWritten = selfstat.Register("agent", "metrics_written", map[string]string{}) + AgentMetricsDropped = selfstat.Register("agent", "metrics_dropped", map[string]string{}) +) + +// Buffer stores metrics in a circular buffer. +type Buffer struct { + sync.Mutex + buf []telegraf.Metric + first int // index of the first/oldest metric + last int // one after the index of the last/newest metric + size int // number of metrics currently in the buffer + cap int // the capacity of the buffer + + batchFirst int // index of the first metric in the batch + batchSize int // number of metrics currently in the batch + + MetricsAdded selfstat.Stat + MetricsWritten selfstat.Stat + MetricsDropped selfstat.Stat + BufferSize selfstat.Stat + BufferLimit selfstat.Stat +} + +// NewBuffer returns a new empty Buffer with the given capacity. +func NewBuffer(name string, alias string, capacity int) *Buffer { + tags := map[string]string{"output": name} + if alias != "" { + tags["alias"] = alias + } + + b := &Buffer{ + buf: make([]telegraf.Metric, capacity), + first: 0, + last: 0, + size: 0, + cap: capacity, + + MetricsAdded: selfstat.Register( + "write", + "metrics_added", + tags, + ), + MetricsWritten: selfstat.Register( + "write", + "metrics_written", + tags, + ), + MetricsDropped: selfstat.Register( + "write", + "metrics_dropped", + tags, + ), + BufferSize: selfstat.Register( + "write", + "buffer_size", + tags, + ), + BufferLimit: selfstat.Register( + "write", + "buffer_limit", + tags, + ), + } + b.BufferSize.Set(int64(0)) + b.BufferLimit.Set(int64(capacity)) + return b +} + +// Len returns the number of metrics currently in the buffer. +func (b *Buffer) Len() int { + b.Lock() + defer b.Unlock() + + return b.length() +} + +func (b *Buffer) length() int { + return min(b.size+b.batchSize, b.cap) +} + +func (b *Buffer) metricAdded() { + b.MetricsAdded.Incr(1) +} + +func (b *Buffer) metricWritten(metric telegraf.Metric) { + AgentMetricsWritten.Incr(1) + b.MetricsWritten.Incr(1) + metric.Accept() +} + +func (b *Buffer) metricDropped(metric telegraf.Metric) { + AgentMetricsDropped.Incr(1) + b.MetricsDropped.Incr(1) + metric.Reject() +} + +func (b *Buffer) add(m telegraf.Metric) int { + dropped := 0 + // Check if Buffer is full + if b.size == b.cap { + b.metricDropped(b.buf[b.last]) + dropped++ + + if b.last == b.batchFirst && b.batchSize > 0 { + b.batchSize-- + b.batchFirst = b.next(b.batchFirst) + } + } + + b.metricAdded() + + b.buf[b.last] = m + b.last = b.next(b.last) + + if b.size == b.cap { + b.first = b.next(b.first) + } + + b.size = min(b.size+1, b.cap) + return dropped +} + +// Add adds metrics to the buffer and returns number of dropped metrics. +func (b *Buffer) Add(metrics ...telegraf.Metric) int { + b.Lock() + defer b.Unlock() + + dropped := 0 + for i := range metrics { + if n := b.add(metrics[i]); n != 0 { + dropped += n + } + } + + b.BufferSize.Set(int64(b.length())) + return dropped +} + +// Batch returns a slice containing up to batchSize of the most recently added +// metrics. Metrics are ordered from newest to oldest in the batch. The +// batch must not be modified by the client. +func (b *Buffer) Batch(batchSize int) []telegraf.Metric { + b.Lock() + defer b.Unlock() + + outLen := min(b.size, batchSize) + out := make([]telegraf.Metric, outLen) + if outLen == 0 { + return out + } + + b.batchFirst = b.cap + b.last - outLen + b.batchFirst %= b.cap + b.batchSize = outLen + + batchIndex := b.batchFirst + for i := range out { + out[len(out)-1-i] = b.buf[batchIndex] + b.buf[batchIndex] = nil + batchIndex = b.next(batchIndex) + } + + b.last = b.batchFirst + b.size -= outLen + return out +} + +// Accept marks the batch, acquired from Batch(), as successfully written. +func (b *Buffer) Accept(batch []telegraf.Metric) { + b.Lock() + defer b.Unlock() + + for _, m := range batch { + b.metricWritten(m) + } + + b.resetBatch() + b.BufferSize.Set(int64(b.length())) +} + +// Reject returns the batch, acquired from Batch(), to the buffer and marks it +// as unsent. +func (b *Buffer) Reject(batch []telegraf.Metric) { + b.Lock() + defer b.Unlock() + + if len(batch) == 0 { + return + } + + older := b.dist(b.first, b.batchFirst) + free := b.cap - b.size + restore := min(len(batch), free+older) + + // Rotate newer metrics forward the number of metrics that we can restore. + rb := b.batchFirst + rp := b.last + re := b.nextby(rp, restore) + b.last = re + + for rb != rp && rp != re { + rp = b.prev(rp) + re = b.prev(re) + + if b.buf[re] != nil { + b.metricDropped(b.buf[re]) + b.first = b.next(b.first) + } + + b.buf[re] = b.buf[rp] + b.buf[rp] = nil + } + + // Copy metrics from the batch back into the buffer; recall that the + // batch is in reverse order compared to b.buf + for i := range batch { + if i < restore { + re = b.prev(re) + b.buf[re] = batch[i] + b.size = min(b.size+1, b.cap) + } else { + b.metricDropped(batch[i]) + } + } + + b.resetBatch() + b.BufferSize.Set(int64(b.length())) +} + +// dist returns the distance between two indexes. Because this data structure +// uses a half open range the arguments must both either left side or right +// side pairs. +func (b *Buffer) dist(begin, end int) int { + if begin <= end { + return end - begin + } else { + return b.cap - begin + end + } +} + +// next returns the next index with wrapping. +func (b *Buffer) next(index int) int { + index++ + if index == b.cap { + return 0 + } + return index +} + +// next returns the index that is count newer with wrapping. +func (b *Buffer) nextby(index, count int) int { + index += count + index %= b.cap + return index +} + +// next returns the prev index with wrapping. +func (b *Buffer) prev(index int) int { + index-- + if index < 0 { + return b.cap - 1 + } + return index +} + +func (b *Buffer) resetBatch() { + b.batchFirst = 0 + b.batchSize = 0 +} + +func min(a, b int) int { + if b < a { + return b + } + return a +} diff --git a/models/buffer_test.go b/models/buffer_test.go new file mode 100644 index 000000000..fa8fb1668 --- /dev/null +++ b/models/buffer_test.go @@ -0,0 +1,728 @@ +package models + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +type MockMetric struct { + telegraf.Metric + AcceptF func() + RejectF func() + DropF func() +} + +func (m *MockMetric) Accept() { + m.AcceptF() +} + +func (m *MockMetric) Reject() { + m.RejectF() +} + +func (m *MockMetric) Drop() { + m.DropF() +} + +func Metric() telegraf.Metric { + return MetricTime(0) +} + +func MetricTime(sec int64) telegraf.Metric { + m, err := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(sec, 0), + ) + if err != nil { + panic(err) + } + return m +} + +func BenchmarkAddMetrics(b *testing.B) { + buf := NewBuffer("test", "", 10000) + m := Metric() + for n := 0; n < b.N; n++ { + buf.Add(m) + } +} + +func setup(b *Buffer) *Buffer { + b.MetricsAdded.Set(0) + b.MetricsWritten.Set(0) + b.MetricsDropped.Set(0) + return b +} + +func TestBuffer_LenEmpty(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + + require.Equal(t, 0, b.Len()) +} + +func TestBuffer_LenOne(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m) + + require.Equal(t, 1, b.Len()) +} + +func TestBuffer_LenFull(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m, m, m, m, m) + + require.Equal(t, 5, b.Len()) +} + +func TestBuffer_LenOverfill(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + setup(b) + b.Add(m, m, m, m, m, m) + + require.Equal(t, 5, b.Len()) +} + +func TestBuffer_BatchLenZero(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + batch := b.Batch(0) + + require.Len(t, batch, 0) +} + +func TestBuffer_BatchLenBufferEmpty(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + batch := b.Batch(2) + + require.Len(t, batch, 0) +} + +func TestBuffer_BatchLenUnderfill(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m) + batch := b.Batch(2) + + require.Len(t, batch, 1) +} + +func TestBuffer_BatchLenFill(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m, m, m) + batch := b.Batch(2) + require.Len(t, batch, 2) +} + +func TestBuffer_BatchLenExact(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m, m) + batch := b.Batch(2) + require.Len(t, batch, 2) +} + +func TestBuffer_BatchLenLargerThanBuffer(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m, m, m, m, m) + batch := b.Batch(6) + require.Len(t, batch, 5) +} + +func TestBuffer_BatchWrap(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m, m, m, m, m) + batch := b.Batch(2) + b.Accept(batch) + b.Add(m, m) + batch = b.Batch(5) + require.Len(t, batch, 5) +} + +func TestBuffer_BatchLatest(t *testing.T) { + b := setup(NewBuffer("test", "", 4)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(2) + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(3), + MetricTime(2), + }, batch) +} + +func TestBuffer_BatchLatestWrap(t *testing.T) { + b := setup(NewBuffer("test", "", 4)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + batch := b.Batch(2) + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(5), + MetricTime(4), + }, batch) +} + +func TestBuffer_MultipleBatch(t *testing.T) { + b := setup(NewBuffer("test", "", 10)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Add(MetricTime(6)) + batch := b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(6), + MetricTime(5), + MetricTime(4), + MetricTime(3), + MetricTime(2), + }, batch) + b.Accept(batch) + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(1), + }, batch) + b.Accept(batch) +} + +func TestBuffer_RejectWithRoom(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(2) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Reject(batch) + + require.Equal(t, int64(0), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(5), + MetricTime(4), + MetricTime(3), + MetricTime(2), + MetricTime(1), + }, batch) +} + +func TestBuffer_RejectNothingNewFull(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + batch := b.Batch(2) + b.Reject(batch) + + require.Equal(t, int64(0), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(5), + MetricTime(4), + MetricTime(3), + MetricTime(2), + MetricTime(1), + }, batch) +} + +func TestBuffer_RejectNoRoom(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + b.Add(MetricTime(1)) + + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(2) + + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Add(MetricTime(6)) + b.Add(MetricTime(7)) + b.Add(MetricTime(8)) + + b.Reject(batch) + + require.Equal(t, int64(3), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(8), + MetricTime(7), + MetricTime(6), + MetricTime(5), + MetricTime(4), + }, batch) +} + +func TestBuffer_RejectRoomExact(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + batch := b.Batch(2) + b.Add(MetricTime(3)) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + + b.Reject(batch) + + require.Equal(t, int64(0), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(5), + MetricTime(4), + MetricTime(3), + MetricTime(2), + MetricTime(1), + }, batch) +} + +func TestBuffer_RejectRoomOverwriteOld(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(1) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Add(MetricTime(6)) + + b.Reject(batch) + + require.Equal(t, int64(1), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(6), + MetricTime(5), + MetricTime(4), + MetricTime(3), + MetricTime(2), + }, batch) +} + +func TestBuffer_RejectPartialRoom(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + b.Add(MetricTime(1)) + + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(2) + + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Add(MetricTime(6)) + b.Add(MetricTime(7)) + b.Reject(batch) + + require.Equal(t, int64(2), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(7), + MetricTime(6), + MetricTime(5), + MetricTime(4), + MetricTime(3), + }, batch) +} + +func TestBuffer_RejectNewMetricsWrapped(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(2) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + + // buffer: 1, 4, 5; batch: 2, 3 + require.Equal(t, int64(0), b.MetricsDropped.Get()) + + b.Add(MetricTime(6)) + b.Add(MetricTime(7)) + b.Add(MetricTime(8)) + b.Add(MetricTime(9)) + b.Add(MetricTime(10)) + + // buffer: 8, 9, 10, 6, 7; batch: 2, 3 + require.Equal(t, int64(3), b.MetricsDropped.Get()) + + b.Add(MetricTime(11)) + b.Add(MetricTime(12)) + b.Add(MetricTime(13)) + b.Add(MetricTime(14)) + b.Add(MetricTime(15)) + // buffer: 13, 14, 15, 11, 12; batch: 2, 3 + require.Equal(t, int64(8), b.MetricsDropped.Get()) + b.Reject(batch) + + require.Equal(t, int64(10), b.MetricsDropped.Get()) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(15), + MetricTime(14), + MetricTime(13), + MetricTime(12), + MetricTime(11), + }, batch) +} + +func TestBuffer_RejectWrapped(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + + b.Add(MetricTime(6)) + b.Add(MetricTime(7)) + b.Add(MetricTime(8)) + batch := b.Batch(3) + + b.Add(MetricTime(9)) + b.Add(MetricTime(10)) + b.Add(MetricTime(11)) + b.Add(MetricTime(12)) + + b.Reject(batch) + + batch = b.Batch(5) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(12), + MetricTime(11), + MetricTime(10), + MetricTime(9), + MetricTime(8), + }, batch) +} + +func TestBuffer_RejectAdjustFirst(t *testing.T) { + b := setup(NewBuffer("test", "", 10)) + b.Add(MetricTime(1)) + b.Add(MetricTime(2)) + b.Add(MetricTime(3)) + batch := b.Batch(3) + b.Add(MetricTime(4)) + b.Add(MetricTime(5)) + b.Add(MetricTime(6)) + b.Reject(batch) + + b.Add(MetricTime(7)) + b.Add(MetricTime(8)) + b.Add(MetricTime(9)) + batch = b.Batch(3) + b.Add(MetricTime(10)) + b.Add(MetricTime(11)) + b.Add(MetricTime(12)) + b.Reject(batch) + + b.Add(MetricTime(13)) + b.Add(MetricTime(14)) + b.Add(MetricTime(15)) + batch = b.Batch(3) + b.Add(MetricTime(16)) + b.Add(MetricTime(17)) + b.Add(MetricTime(18)) + b.Reject(batch) + + b.Add(MetricTime(19)) + + batch = b.Batch(10) + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + MetricTime(19), + MetricTime(18), + MetricTime(17), + MetricTime(16), + MetricTime(15), + MetricTime(14), + MetricTime(13), + MetricTime(12), + MetricTime(11), + MetricTime(10), + }, batch) +} + +func TestBuffer_AddDropsOverwrittenMetrics(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + + b.Add(m, m, m, m, m) + b.Add(m, m, m, m, m) + + require.Equal(t, int64(5), b.MetricsDropped.Get()) + require.Equal(t, int64(0), b.MetricsWritten.Get()) +} + +func TestBuffer_AcceptRemovesBatch(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m, m, m) + batch := b.Batch(2) + b.Accept(batch) + require.Equal(t, 1, b.Len()) +} + +func TestBuffer_RejectLeavesBatch(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m, m, m) + batch := b.Batch(2) + b.Reject(batch) + require.Equal(t, 3, b.Len()) +} + +func TestBuffer_AcceptWritesOverwrittenBatch(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + + b.Add(m, m, m, m, m) + batch := b.Batch(5) + b.Add(m, m, m, m, m) + b.Accept(batch) + + require.Equal(t, int64(0), b.MetricsDropped.Get()) + require.Equal(t, int64(5), b.MetricsWritten.Get()) +} + +func TestBuffer_BatchRejectDropsOverwrittenBatch(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + + b.Add(m, m, m, m, m) + batch := b.Batch(5) + b.Add(m, m, m, m, m) + b.Reject(batch) + + require.Equal(t, int64(5), b.MetricsDropped.Get()) + require.Equal(t, int64(0), b.MetricsWritten.Get()) +} + +func TestBuffer_MetricsOverwriteBatchAccept(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + + b.Add(m, m, m, m, m) + batch := b.Batch(3) + b.Add(m, m, m) + b.Accept(batch) + require.Equal(t, int64(0), b.MetricsDropped.Get(), "dropped") + require.Equal(t, int64(3), b.MetricsWritten.Get(), "written") +} + +func TestBuffer_MetricsOverwriteBatchReject(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + + b.Add(m, m, m, m, m) + batch := b.Batch(3) + b.Add(m, m, m) + b.Reject(batch) + require.Equal(t, int64(3), b.MetricsDropped.Get()) + require.Equal(t, int64(0), b.MetricsWritten.Get()) +} + +func TestBuffer_MetricsBatchAcceptRemoved(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + + b.Add(m, m, m, m, m) + batch := b.Batch(3) + b.Add(m, m, m, m, m) + b.Accept(batch) + require.Equal(t, int64(2), b.MetricsDropped.Get()) + require.Equal(t, int64(3), b.MetricsWritten.Get()) +} + +func TestBuffer_WrapWithBatch(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + + b.Add(m, m, m) + b.Batch(3) + b.Add(m, m, m, m, m, m) + + require.Equal(t, int64(1), b.MetricsDropped.Get()) +} + +func TestBuffer_BatchNotRemoved(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m, m, m, m, m) + b.Batch(2) + require.Equal(t, 5, b.Len()) +} + +func TestBuffer_BatchRejectAcceptNoop(t *testing.T) { + m := Metric() + b := setup(NewBuffer("test", "", 5)) + b.Add(m, m, m, m, m) + batch := b.Batch(2) + b.Reject(batch) + b.Accept(batch) + require.Equal(t, 5, b.Len()) +} + +func TestBuffer_AcceptCallsMetricAccept(t *testing.T) { + var accept int + mm := &MockMetric{ + Metric: Metric(), + AcceptF: func() { + accept++ + }, + } + b := setup(NewBuffer("test", "", 5)) + b.Add(mm, mm, mm) + batch := b.Batch(2) + b.Accept(batch) + require.Equal(t, 2, accept) +} + +func TestBuffer_AddCallsMetricRejectWhenNoBatch(t *testing.T) { + var reject int + mm := &MockMetric{ + Metric: Metric(), + RejectF: func() { + reject++ + }, + } + b := setup(NewBuffer("test", "", 5)) + setup(b) + b.Add(mm, mm, mm, mm, mm) + b.Add(mm, mm) + require.Equal(t, 2, reject) +} + +func TestBuffer_AddCallsMetricRejectWhenNotInBatch(t *testing.T) { + var reject int + mm := &MockMetric{ + Metric: Metric(), + RejectF: func() { + reject++ + }, + } + b := setup(NewBuffer("test", "", 5)) + setup(b) + b.Add(mm, mm, mm, mm, mm) + batch := b.Batch(2) + b.Add(mm, mm, mm, mm) + require.Equal(t, 2, reject) + b.Reject(batch) + require.Equal(t, 4, reject) +} + +func TestBuffer_RejectCallsMetricRejectWithOverwritten(t *testing.T) { + var reject int + mm := &MockMetric{ + Metric: Metric(), + RejectF: func() { + reject++ + }, + } + b := setup(NewBuffer("test", "", 5)) + b.Add(mm, mm, mm, mm, mm) + batch := b.Batch(5) + b.Add(mm, mm) + require.Equal(t, 0, reject) + b.Reject(batch) + require.Equal(t, 2, reject) +} + +func TestBuffer_AddOverwriteAndReject(t *testing.T) { + var reject int + mm := &MockMetric{ + Metric: Metric(), + RejectF: func() { + reject++ + }, + } + b := setup(NewBuffer("test", "", 5)) + b.Add(mm, mm, mm, mm, mm) + batch := b.Batch(5) + b.Add(mm, mm, mm, mm, mm) + b.Add(mm, mm, mm, mm, mm) + b.Add(mm, mm, mm, mm, mm) + b.Add(mm, mm, mm, mm, mm) + require.Equal(t, 15, reject) + b.Reject(batch) + require.Equal(t, 20, reject) +} + +func TestBuffer_AddOverwriteAndRejectOffset(t *testing.T) { + var reject int + var accept int + mm := &MockMetric{ + Metric: Metric(), + RejectF: func() { + reject++ + }, + AcceptF: func() { + accept++ + }, + } + b := setup(NewBuffer("test", "", 5)) + b.Add(mm, mm, mm) + b.Add(mm, mm, mm, mm) + require.Equal(t, 2, reject) + batch := b.Batch(5) + b.Add(mm, mm, mm, mm) + require.Equal(t, 2, reject) + b.Add(mm, mm, mm, mm) + require.Equal(t, 5, reject) + b.Add(mm, mm, mm, mm) + require.Equal(t, 9, reject) + b.Add(mm, mm, mm, mm) + require.Equal(t, 13, reject) + b.Accept(batch) + require.Equal(t, 13, reject) + require.Equal(t, 5, accept) +} + +func TestBuffer_RejectEmptyBatch(t *testing.T) { + b := setup(NewBuffer("test", "", 5)) + batch := b.Batch(2) + b.Add(MetricTime(1)) + b.Reject(batch) + b.Add(MetricTime(2)) + batch = b.Batch(2) + for _, m := range batch { + require.NotNil(t, m) + } +} diff --git a/internal/models/filter.go b/models/filter.go similarity index 68% rename from internal/models/filter.go rename to models/filter.go index 2848ccf09..13627daad 100644 --- a/internal/models/filter.go +++ b/models/filter.go @@ -3,6 +3,7 @@ package models import ( "fmt" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" ) @@ -78,13 +79,13 @@ func (f *Filter) Compile() error { return fmt.Errorf("Error compiling 'taginclude', %s", err) } - for i, _ := range f.TagDrop { + for i := range f.TagDrop { f.TagDrop[i].filter, err = filter.Compile(f.TagDrop[i].Filter) if err != nil { return fmt.Errorf("Error compiling 'tagdrop', %s", err) } } - for i, _ := range f.TagPass { + for i := range f.TagPass { f.TagPass[i].filter, err = filter.Compile(f.TagPass[i].Filter) if err != nil { return fmt.Errorf("Error compiling 'tagpass', %s", err) @@ -93,45 +94,35 @@ func (f *Filter) Compile() error { return nil } -// Apply applies the filter to the given measurement name, fields map, and -// tags map. It will return false if the metric should be "filtered out", and -// true if the metric should "pass". -// It will modify tags & fields in-place if they need to be deleted. -func (f *Filter) Apply( - measurement string, - fields map[string]interface{}, - tags map[string]string, -) bool { +// Select returns true if the metric matches according to the +// namepass/namedrop and tagpass/tagdrop filters. The metric is not modified. +func (f *Filter) Select(metric telegraf.Metric) bool { if !f.isActive { return true } - // check if the measurement name should pass - if !f.shouldNamePass(measurement) { + if !f.shouldNamePass(metric.Name()) { return false } - // check if the tags should pass - if !f.shouldTagsPass(tags) { + if !f.shouldTagsPass(metric.TagList()) { return false } - // filter fields - for fieldkey, _ := range fields { - if !f.shouldFieldPass(fieldkey) { - delete(fields, fieldkey) - } - } - if len(fields) == 0 { - return false - } - - // filter tags - f.filterTags(tags) - return true } +// Modify removes any tags and fields from the metric according to the +// fieldpass/fielddrop and taginclude/tagexclude filters. +func (f *Filter) Modify(metric telegraf.Metric) { + if !f.isActive { + return + } + + f.filterFields(metric) + f.filterTags(metric) +} + // IsActive checking if filter is active func (f *Filter) IsActive() bool { return f.isActive @@ -140,7 +131,6 @@ func (f *Filter) IsActive() bool { // shouldNamePass returns true if the metric should pass, false if should drop // based on the drop/pass filter parameters func (f *Filter) shouldNamePass(key string) bool { - pass := func(f *Filter) bool { if f.namePass.Match(key) { return true @@ -169,44 +159,29 @@ func (f *Filter) shouldNamePass(key string) bool { // shouldFieldPass returns true if the metric should pass, false if should drop // based on the drop/pass filter parameters func (f *Filter) shouldFieldPass(key string) bool { - - pass := func(f *Filter) bool { - if f.fieldPass.Match(key) { - return true - } - return false - } - - drop := func(f *Filter) bool { - if f.fieldDrop.Match(key) { - return false - } - return true - } - if f.fieldPass != nil && f.fieldDrop != nil { - return pass(f) && drop(f) + return f.fieldPass.Match(key) && !f.fieldDrop.Match(key) } else if f.fieldPass != nil { - return pass(f) + return f.fieldPass.Match(key) } else if f.fieldDrop != nil { - return drop(f) + return !f.fieldDrop.Match(key) } - return true } // shouldTagsPass returns true if the metric should pass, false if should drop // based on the tagdrop/tagpass filter parameters -func (f *Filter) shouldTagsPass(tags map[string]string) bool { - +func (f *Filter) shouldTagsPass(tags []*telegraf.Tag) bool { pass := func(f *Filter) bool { for _, pat := range f.TagPass { if pat.filter == nil { continue } - if tagval, ok := tags[pat.Name]; ok { - if pat.filter.Match(tagval) { - return true + for _, tag := range tags { + if tag.Key == pat.Name { + if pat.filter.Match(tag.Value) { + return true + } } } } @@ -218,9 +193,11 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool { if pat.filter == nil { continue } - if tagval, ok := tags[pat.Name]; ok { - if pat.filter.Match(tagval) { - return false + for _, tag := range tags { + if tag.Key == pat.Name { + if pat.filter.Match(tag.Value) { + return false + } } } } @@ -242,22 +219,42 @@ func (f *Filter) shouldTagsPass(tags map[string]string) bool { return true } -// Apply TagInclude and TagExclude filters. -// modifies the tags map in-place. -func (f *Filter) filterTags(tags map[string]string) { - if f.tagInclude != nil { - for k, _ := range tags { - if !f.tagInclude.Match(k) { - delete(tags, k) - } +// filterFields removes fields according to fieldpass/fielddrop. +func (f *Filter) filterFields(metric telegraf.Metric) { + filterKeys := []string{} + for _, field := range metric.FieldList() { + if !f.shouldFieldPass(field.Key) { + filterKeys = append(filterKeys, field.Key) } } - if f.tagExclude != nil { - for k, _ := range tags { - if f.tagExclude.Match(k) { - delete(tags, k) + for _, key := range filterKeys { + metric.RemoveField(key) + } +} + +// filterTags removes tags according to taginclude/tagexclude. +func (f *Filter) filterTags(metric telegraf.Metric) { + filterKeys := []string{} + if f.tagInclude != nil { + for _, tag := range metric.TagList() { + if !f.tagInclude.Match(tag.Key) { + filterKeys = append(filterKeys, tag.Key) } } } + for _, key := range filterKeys { + metric.RemoveTag(key) + } + + if f.tagExclude != nil { + for _, tag := range metric.TagList() { + if f.tagExclude.Match(tag.Key) { + filterKeys = append(filterKeys, tag.Key) + } + } + } + for _, key := range filterKeys { + metric.RemoveTag(key) + } } diff --git a/internal/models/filter_test.go b/models/filter_test.go similarity index 62% rename from internal/models/filter_test.go rename to models/filter_test.go index 46f16e835..d241244b9 100644 --- a/internal/models/filter_test.go +++ b/models/filter_test.go @@ -2,22 +2,30 @@ package models import ( "testing" + "time" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) func TestFilter_ApplyEmpty(t *testing.T) { f := Filter{} require.NoError(t, f.Compile()) - assert.False(t, f.IsActive()) + require.False(t, f.IsActive()) - assert.True(t, f.Apply("m", map[string]interface{}{"value": int64(1)}, map[string]string{})) + m, err := metric.New("m", + map[string]string{}, + map[string]interface{}{"value": int64(1)}, + time.Now()) + require.NoError(t, err) + require.True(t, f.Select(m)) } func TestFilter_ApplyTagsDontPass(t *testing.T) { filters := []TagFilter{ - TagFilter{ + { Name: "cpu", Filter: []string{"cpu-*"}, }, @@ -27,11 +35,14 @@ func TestFilter_ApplyTagsDontPass(t *testing.T) { } require.NoError(t, f.Compile()) require.NoError(t, f.Compile()) - assert.True(t, f.IsActive()) + require.True(t, f.IsActive()) - assert.False(t, f.Apply("m", + m, err := metric.New("m", + map[string]string{"cpu": "cpu-total"}, map[string]interface{}{"value": int64(1)}, - map[string]string{"cpu": "cpu-total"})) + time.Now()) + require.NoError(t, err) + require.False(t, f.Select(m)) } func TestFilter_ApplyDeleteFields(t *testing.T) { @@ -40,11 +51,19 @@ func TestFilter_ApplyDeleteFields(t *testing.T) { } require.NoError(t, f.Compile()) require.NoError(t, f.Compile()) - assert.True(t, f.IsActive()) + require.True(t, f.IsActive()) - fields := map[string]interface{}{"value": int64(1), "value2": int64(2)} - assert.True(t, f.Apply("m", fields, nil)) - assert.Equal(t, map[string]interface{}{"value2": int64(2)}, fields) + m, err := metric.New("m", + map[string]string{}, + map[string]interface{}{ + "value": int64(1), + "value2": int64(2), + }, + time.Now()) + require.NoError(t, err) + require.True(t, f.Select(m)) + f.Modify(m) + require.Equal(t, map[string]interface{}{"value2": int64(2)}, m.Fields()) } func TestFilter_ApplyDeleteAllFields(t *testing.T) { @@ -53,10 +72,19 @@ func TestFilter_ApplyDeleteAllFields(t *testing.T) { } require.NoError(t, f.Compile()) require.NoError(t, f.Compile()) - assert.True(t, f.IsActive()) + require.True(t, f.IsActive()) - fields := map[string]interface{}{"value": int64(1), "value2": int64(2)} - assert.False(t, f.Apply("m", fields, nil)) + m, err := metric.New("m", + map[string]string{}, + map[string]interface{}{ + "value": int64(1), + "value2": int64(2), + }, + time.Now()) + require.NoError(t, err) + require.True(t, f.Select(m)) + f.Modify(m) + require.Len(t, m.FieldList(), 0) } func TestFilter_Empty(t *testing.T) { @@ -69,7 +97,7 @@ func TestFilter_Empty(t *testing.T) { "foo_bar", "foo.bar", "foo-bar", - "supercalifradjulisticexpialidocious", + "supercalifragilisticexpialidocious", } for _, measurement := range measurements { @@ -217,11 +245,11 @@ func TestFilter_FieldDrop(t *testing.T) { func TestFilter_TagPass(t *testing.T) { filters := []TagFilter{ - TagFilter{ + { Name: "cpu", Filter: []string{"cpu-*"}, }, - TagFilter{ + { Name: "mem", Filter: []string{"mem_free"}, }} @@ -230,20 +258,20 @@ func TestFilter_TagPass(t *testing.T) { } require.NoError(t, f.Compile()) - passes := []map[string]string{ - {"cpu": "cpu-total"}, - {"cpu": "cpu-0"}, - {"cpu": "cpu-1"}, - {"cpu": "cpu-2"}, - {"mem": "mem_free"}, + passes := [][]*telegraf.Tag{ + {{Key: "cpu", Value: "cpu-total"}}, + {{Key: "cpu", Value: "cpu-0"}}, + {{Key: "cpu", Value: "cpu-1"}}, + {{Key: "cpu", Value: "cpu-2"}}, + {{Key: "mem", Value: "mem_free"}}, } - drops := []map[string]string{ - {"cpu": "cputotal"}, - {"cpu": "cpu0"}, - {"cpu": "cpu1"}, - {"cpu": "cpu2"}, - {"mem": "mem_used"}, + drops := [][]*telegraf.Tag{ + {{Key: "cpu", Value: "cputotal"}}, + {{Key: "cpu", Value: "cpu0"}}, + {{Key: "cpu", Value: "cpu1"}}, + {{Key: "cpu", Value: "cpu2"}}, + {{Key: "mem", Value: "mem_used"}}, } for _, tags := range passes { @@ -261,11 +289,11 @@ func TestFilter_TagPass(t *testing.T) { func TestFilter_TagDrop(t *testing.T) { filters := []TagFilter{ - TagFilter{ + { Name: "cpu", Filter: []string{"cpu-*"}, }, - TagFilter{ + { Name: "mem", Filter: []string{"mem_free"}, }} @@ -274,20 +302,20 @@ func TestFilter_TagDrop(t *testing.T) { } require.NoError(t, f.Compile()) - drops := []map[string]string{ - {"cpu": "cpu-total"}, - {"cpu": "cpu-0"}, - {"cpu": "cpu-1"}, - {"cpu": "cpu-2"}, - {"mem": "mem_free"}, + drops := [][]*telegraf.Tag{ + {{Key: "cpu", Value: "cpu-total"}}, + {{Key: "cpu", Value: "cpu-0"}}, + {{Key: "cpu", Value: "cpu-1"}}, + {{Key: "cpu", Value: "cpu-2"}}, + {{Key: "mem", Value: "mem_free"}}, } - passes := []map[string]string{ - {"cpu": "cputotal"}, - {"cpu": "cpu0"}, - {"cpu": "cpu1"}, - {"cpu": "cpu2"}, - {"mem": "mem_used"}, + passes := [][]*telegraf.Tag{ + {{Key: "cpu", Value: "cputotal"}}, + {{Key: "cpu", Value: "cpu0"}}, + {{Key: "cpu", Value: "cpu1"}}, + {{Key: "cpu", Value: "cpu2"}}, + {{Key: "mem", Value: "mem_used"}}, } for _, tags := range passes { @@ -304,58 +332,70 @@ func TestFilter_TagDrop(t *testing.T) { } func TestFilter_FilterTagsNoMatches(t *testing.T) { - pretags := map[string]string{ - "host": "localhost", - "mytag": "foobar", - } + m, err := metric.New("m", + map[string]string{ + "host": "localhost", + "mytag": "foobar", + }, + map[string]interface{}{"value": int64(1)}, + time.Now()) + require.NoError(t, err) f := Filter{ TagExclude: []string{"nomatch"}, } require.NoError(t, f.Compile()) - f.filterTags(pretags) - assert.Equal(t, map[string]string{ + f.filterTags(m) + require.Equal(t, map[string]string{ "host": "localhost", "mytag": "foobar", - }, pretags) + }, m.Tags()) f = Filter{ TagInclude: []string{"nomatch"}, } require.NoError(t, f.Compile()) - f.filterTags(pretags) - assert.Equal(t, map[string]string{}, pretags) + f.filterTags(m) + require.Equal(t, map[string]string{}, m.Tags()) } func TestFilter_FilterTagsMatches(t *testing.T) { - pretags := map[string]string{ - "host": "localhost", - "mytag": "foobar", - } + m, err := metric.New("m", + map[string]string{ + "host": "localhost", + "mytag": "foobar", + }, + map[string]interface{}{"value": int64(1)}, + time.Now()) + require.NoError(t, err) f := Filter{ TagExclude: []string{"ho*"}, } require.NoError(t, f.Compile()) - f.filterTags(pretags) - assert.Equal(t, map[string]string{ + f.filterTags(m) + require.Equal(t, map[string]string{ "mytag": "foobar", - }, pretags) + }, m.Tags()) - pretags = map[string]string{ - "host": "localhost", - "mytag": "foobar", - } + m, err = metric.New("m", + map[string]string{ + "host": "localhost", + "mytag": "foobar", + }, + map[string]interface{}{"value": int64(1)}, + time.Now()) + require.NoError(t, err) f = Filter{ TagInclude: []string{"my*"}, } require.NoError(t, f.Compile()) - f.filterTags(pretags) - assert.Equal(t, map[string]string{ + f.filterTags(m) + require.Equal(t, map[string]string{ "mytag": "foobar", - }, pretags) + }, m.Tags()) } // TestFilter_FilterNamePassAndDrop used for check case when @@ -374,7 +414,7 @@ func TestFilter_FilterNamePassAndDrop(t *testing.T) { require.NoError(t, f.Compile()) for i, name := range inputData { - assert.Equal(t, f.shouldNamePass(name), expectedResult[i]) + require.Equal(t, f.shouldNamePass(name), expectedResult[i]) } } @@ -394,7 +434,7 @@ func TestFilter_FilterFieldPassAndDrop(t *testing.T) { require.NoError(t, f.Compile()) for i, field := range inputData { - assert.Equal(t, f.shouldFieldPass(field), expectedResult[i]) + require.Equal(t, f.shouldFieldPass(field), expectedResult[i]) } } @@ -402,29 +442,28 @@ func TestFilter_FilterFieldPassAndDrop(t *testing.T) { // both parameters were defined // see: https://github.com/influxdata/telegraf/issues/2860 func TestFilter_FilterTagsPassAndDrop(t *testing.T) { - - inputData := []map[string]string{ - {"tag1": "1", "tag2": "3"}, - {"tag1": "1", "tag2": "2"}, - {"tag1": "2", "tag2": "1"}, - {"tag1": "4", "tag2": "1"}, + inputData := [][]*telegraf.Tag{ + {{Key: "tag1", Value: "1"}, {Key: "tag2", Value: "3"}}, + {{Key: "tag1", Value: "1"}, {Key: "tag2", Value: "2"}}, + {{Key: "tag1", Value: "2"}, {Key: "tag2", Value: "1"}}, + {{Key: "tag1", Value: "4"}, {Key: "tag2", Value: "1"}}, } expectedResult := []bool{false, true, false, false} filterPass := []TagFilter{ - TagFilter{ + { Name: "tag1", Filter: []string{"1", "4"}, }, } filterDrop := []TagFilter{ - TagFilter{ + { Name: "tag1", Filter: []string{"4"}, }, - TagFilter{ + { Name: "tag2", Filter: []string{"3"}, }, @@ -438,7 +477,49 @@ func TestFilter_FilterTagsPassAndDrop(t *testing.T) { require.NoError(t, f.Compile()) for i, tag := range inputData { - assert.Equal(t, f.shouldTagsPass(tag), expectedResult[i]) + require.Equal(t, f.shouldTagsPass(tag), expectedResult[i]) } } + +func BenchmarkFilter(b *testing.B) { + tests := []struct { + name string + filter Filter + metric telegraf.Metric + }{ + { + name: "empty filter", + filter: Filter{}, + metric: testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + { + name: "namepass", + filter: Filter{ + NamePass: []string{"cpu"}, + }, + metric: testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + } + + for _, tt := range tests { + b.Run(tt.name, func(b *testing.B) { + require.NoError(b, tt.filter.Compile()) + for n := 0; n < b.N; n++ { + tt.filter.Select(tt.metric) + } + }) + } +} diff --git a/models/log.go b/models/log.go new file mode 100644 index 000000000..a89b17763 --- /dev/null +++ b/models/log.go @@ -0,0 +1,102 @@ +package models + +import ( + "log" + "reflect" + + "github.com/influxdata/telegraf" +) + +// Logger defines a logging structure for plugins. +type Logger struct { + OnErrs []func() + Name string // Name is the plugin name, will be printed in the `[]`. +} + +// NewLogger creates a new logger instance +func NewLogger(pluginType, name, alias string) *Logger { + return &Logger{ + Name: logName(pluginType, name, alias), + } +} + +// OnErr defines a callback that triggers only when errors are about to be written to the log +func (l *Logger) OnErr(f func()) { + l.OnErrs = append(l.OnErrs, f) +} + +// Errorf logs an error message, patterned after log.Printf. +func (l *Logger) Errorf(format string, args ...interface{}) { + for _, f := range l.OnErrs { + f() + } + log.Printf("E! ["+l.Name+"] "+format, args...) +} + +// Error logs an error message, patterned after log.Print. +func (l *Logger) Error(args ...interface{}) { + for _, f := range l.OnErrs { + f() + } + log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...) +} + +// Debugf logs a debug message, patterned after log.Printf. +func (l *Logger) Debugf(format string, args ...interface{}) { + log.Printf("D! ["+l.Name+"] "+format, args...) +} + +// Debug logs a debug message, patterned after log.Print. +func (l *Logger) Debug(args ...interface{}) { + log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...) +} + +// Warnf logs a warning message, patterned after log.Printf. +func (l *Logger) Warnf(format string, args ...interface{}) { + log.Printf("W! ["+l.Name+"] "+format, args...) +} + +// Warn logs a warning message, patterned after log.Print. +func (l *Logger) Warn(args ...interface{}) { + log.Print(append([]interface{}{"W! [" + l.Name + "] "}, args...)...) +} + +// Infof logs an information message, patterned after log.Printf. +func (l *Logger) Infof(format string, args ...interface{}) { + log.Printf("I! ["+l.Name+"] "+format, args...) +} + +// Info logs an information message, patterned after log.Print. +func (l *Logger) Info(args ...interface{}) { + log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...) +} + +// logName returns the log-friendly name/type. +func logName(pluginType, name, alias string) string { + if alias == "" { + return pluginType + "." + name + } + return pluginType + "." + name + "::" + alias +} + +func setLogIfExist(i interface{}, log telegraf.Logger) { + valI := reflect.ValueOf(i) + + if valI.Type().Kind() != reflect.Ptr { + valI = reflect.New(reflect.TypeOf(i)) + } + + field := valI.Elem().FieldByName("Log") + if !field.IsValid() { + return + } + + switch field.Type().String() { + case "telegraf.Logger": + if field.CanSet() { + field.Set(reflect.ValueOf(log)) + } + } + + return +} diff --git a/models/log_test.go b/models/log_test.go new file mode 100644 index 000000000..2b5ec39c6 --- /dev/null +++ b/models/log_test.go @@ -0,0 +1,24 @@ +package models + +import ( + "testing" + + "github.com/influxdata/telegraf/selfstat" + "github.com/stretchr/testify/require" +) + +func TestErrorCounting(t *testing.T) { + reg := selfstat.Register( + "gather", + "errors", + map[string]string{"input": "test"}, + ) + iLog := Logger{Name: "inputs.test"} + iLog.OnErr(func() { + reg.Incr(1) + }) + iLog.Error("something went wrong") + iLog.Errorf("something went wrong") + + require.Equal(t, int64(2), reg.Get()) +} diff --git a/models/makemetric.go b/models/makemetric.go new file mode 100644 index 000000000..29ef5f452 --- /dev/null +++ b/models/makemetric.go @@ -0,0 +1,42 @@ +package models + +import ( + "github.com/influxdata/telegraf" +) + +// Makemetric applies new metric plugin and agent measurement and tag +// settings. +func makemetric( + metric telegraf.Metric, + nameOverride string, + namePrefix string, + nameSuffix string, + tags map[string]string, + globalTags map[string]string, +) telegraf.Metric { + if len(nameOverride) != 0 { + metric.SetName(nameOverride) + } + + if len(namePrefix) != 0 { + metric.AddPrefix(namePrefix) + } + if len(nameSuffix) != 0 { + metric.AddSuffix(nameSuffix) + } + + // Apply plugin-wide tags + for k, v := range tags { + if _, ok := metric.GetTag(k); !ok { + metric.AddTag(k, v) + } + } + // Apply global tags + for k, v := range globalTags { + if _, ok := metric.GetTag(k); !ok { + metric.AddTag(k, v) + } + } + + return metric +} diff --git a/models/running_aggregator.go b/models/running_aggregator.go new file mode 100644 index 000000000..d0ad944b1 --- /dev/null +++ b/models/running_aggregator.go @@ -0,0 +1,183 @@ +package models + +import ( + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/selfstat" +) + +type RunningAggregator struct { + sync.Mutex + Aggregator telegraf.Aggregator + Config *AggregatorConfig + periodStart time.Time + periodEnd time.Time + log telegraf.Logger + + MetricsPushed selfstat.Stat + MetricsFiltered selfstat.Stat + MetricsDropped selfstat.Stat + PushTime selfstat.Stat +} + +func NewRunningAggregator(aggregator telegraf.Aggregator, config *AggregatorConfig) *RunningAggregator { + tags := map[string]string{"aggregator": config.Name} + if config.Alias != "" { + tags["alias"] = config.Alias + } + + aggErrorsRegister := selfstat.Register("aggregate", "errors", tags) + logger := NewLogger("aggregators", config.Name, config.Alias) + logger.OnErr(func() { + aggErrorsRegister.Incr(1) + }) + + setLogIfExist(aggregator, logger) + + return &RunningAggregator{ + Aggregator: aggregator, + Config: config, + MetricsPushed: selfstat.Register( + "aggregate", + "metrics_pushed", + tags, + ), + MetricsFiltered: selfstat.Register( + "aggregate", + "metrics_filtered", + tags, + ), + MetricsDropped: selfstat.Register( + "aggregate", + "metrics_dropped", + tags, + ), + PushTime: selfstat.Register( + "aggregate", + "push_time_ns", + tags, + ), + log: logger, + } +} + +// AggregatorConfig is the common config for all aggregators. +type AggregatorConfig struct { + Name string + Alias string + DropOriginal bool + Period time.Duration + Delay time.Duration + Grace time.Duration + + NameOverride string + MeasurementPrefix string + MeasurementSuffix string + Tags map[string]string + Filter Filter +} + +func (r *RunningAggregator) LogName() string { + return logName("aggregators", r.Config.Name, r.Config.Alias) +} + +func (r *RunningAggregator) Init() error { + if p, ok := r.Aggregator.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return err + } + } + return nil +} + +func (r *RunningAggregator) Period() time.Duration { + return r.Config.Period +} + +func (r *RunningAggregator) EndPeriod() time.Time { + return r.periodEnd +} + +func (r *RunningAggregator) UpdateWindow(start, until time.Time) { + r.periodStart = start + r.periodEnd = until + r.log.Debugf("Updated aggregation range [%s, %s]", start, until) +} + +func (r *RunningAggregator) MakeMetric(metric telegraf.Metric) telegraf.Metric { + m := makemetric( + metric, + r.Config.NameOverride, + r.Config.MeasurementPrefix, + r.Config.MeasurementSuffix, + r.Config.Tags, + nil) + + if m != nil { + m.SetAggregate(true) + } + + r.MetricsPushed.Incr(1) + + return m +} + +// Add a metric to the aggregator and return true if the original metric +// should be dropped. +func (r *RunningAggregator) Add(m telegraf.Metric) bool { + if ok := r.Config.Filter.Select(m); !ok { + return false + } + + // Make a copy of the metric but don't retain tracking. We do not fail a + // delivery due to the aggregation not being sent because we can't create + // aggregations of historical data. Additionally, waiting for the + // aggregation to be pushed would introduce a hefty latency to delivery. + m = metric.FromMetric(m) + + r.Config.Filter.Modify(m) + if len(m.FieldList()) == 0 { + r.MetricsFiltered.Incr(1) + return r.Config.DropOriginal + } + + r.Lock() + defer r.Unlock() + + if m.Time().Before(r.periodStart.Add(-r.Config.Grace)) || m.Time().After(r.periodEnd.Add(r.Config.Delay)) { + r.log.Debugf("Metric is outside aggregation window; discarding. %s: m: %s e: %s g: %s", + m.Time(), r.periodStart, r.periodEnd, r.Config.Grace) + r.MetricsDropped.Incr(1) + return r.Config.DropOriginal + } + + r.Aggregator.Add(m) + return r.Config.DropOriginal +} + +func (r *RunningAggregator) Push(acc telegraf.Accumulator) { + r.Lock() + defer r.Unlock() + + since := r.periodEnd + until := r.periodEnd.Add(r.Config.Period) + r.UpdateWindow(since, until) + + r.push(acc) + r.Aggregator.Reset() +} + +func (r *RunningAggregator) push(acc telegraf.Accumulator) { + start := time.Now() + r.Aggregator.Push(acc) + elapsed := time.Since(start) + r.PushTime.Incr(elapsed.Nanoseconds()) +} + +func (r *RunningAggregator) Log() telegraf.Logger { + return r.log +} diff --git a/models/running_aggregator_test.go b/models/running_aggregator_test.go new file mode 100644 index 000000000..a85885965 --- /dev/null +++ b/models/running_aggregator_test.go @@ -0,0 +1,264 @@ +package models + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestAdd(t *testing.T) { + a := &TestAggregator{} + ra := NewRunningAggregator(a, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"*"}, + }, + Period: time.Millisecond * 500, + }) + require.NoError(t, ra.Config.Filter.Compile()) + acc := testutil.Accumulator{} + + now := time.Now() + ra.UpdateWindow(now, now.Add(ra.Config.Period)) + + m := testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + time.Now().Add(time.Millisecond*150), + telegraf.Untyped) + require.False(t, ra.Add(m)) + ra.Push(&acc) + + require.Equal(t, 1, len(acc.Metrics)) + require.Equal(t, int64(101), acc.Metrics[0].Fields["sum"]) +} + +func TestAddMetricsOutsideCurrentPeriod(t *testing.T) { + a := &TestAggregator{} + ra := NewRunningAggregator(a, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"*"}, + }, + Period: time.Millisecond * 500, + }) + require.NoError(t, ra.Config.Filter.Compile()) + acc := testutil.Accumulator{} + now := time.Now() + ra.UpdateWindow(now, now.Add(ra.Config.Period)) + + m := testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + now.Add(-time.Hour), + telegraf.Untyped, + ) + require.False(t, ra.Add(m)) + + // metric after current period + m = testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + now.Add(time.Hour), + telegraf.Untyped, + ) + require.False(t, ra.Add(m)) + + // "now" metric + m = testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + time.Now().Add(time.Millisecond*50), + telegraf.Untyped) + require.False(t, ra.Add(m)) + + ra.Push(&acc) + require.Equal(t, 1, len(acc.Metrics)) + require.Equal(t, int64(101), acc.Metrics[0].Fields["sum"]) +} + +func TestAddMetricsOutsideCurrentPeriodWithGrace(t *testing.T) { + a := &TestAggregator{} + ra := NewRunningAggregator(a, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"*"}, + }, + Period: time.Millisecond * 1500, + Grace: time.Millisecond * 500, + }) + require.NoError(t, ra.Config.Filter.Compile()) + acc := testutil.Accumulator{} + now := time.Now() + ra.UpdateWindow(now, now.Add(ra.Config.Period)) + + m := testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + now.Add(-time.Hour), + telegraf.Untyped, + ) + require.False(t, ra.Add(m)) + + // metric before current period (late) + m = testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(100), + }, + now.Add(-time.Millisecond*1000), + telegraf.Untyped, + ) + require.False(t, ra.Add(m)) + + // metric before current period, but within grace period (late) + m = testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(102), + }, + now.Add(-time.Millisecond*200), + telegraf.Untyped, + ) + require.False(t, ra.Add(m)) + + // "now" metric + m = testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + time.Now().Add(time.Millisecond*50), + telegraf.Untyped) + require.False(t, ra.Add(m)) + + ra.Push(&acc) + require.Equal(t, 1, len(acc.Metrics)) + require.Equal(t, int64(203), acc.Metrics[0].Fields["sum"]) +} + +func TestAddAndPushOnePeriod(t *testing.T) { + a := &TestAggregator{} + ra := NewRunningAggregator(a, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"*"}, + }, + Period: time.Millisecond * 500, + }) + require.NoError(t, ra.Config.Filter.Compile()) + acc := testutil.Accumulator{} + + now := time.Now() + ra.UpdateWindow(now, now.Add(ra.Config.Period)) + + m := testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + time.Now().Add(time.Millisecond*100), + telegraf.Untyped) + require.False(t, ra.Add(m)) + + ra.Push(&acc) + + acc.AssertContainsFields(t, "TestMetric", map[string]interface{}{"sum": int64(101)}) +} + +func TestAddDropOriginal(t *testing.T) { + ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + NamePass: []string{"RI*"}, + }, + DropOriginal: true, + }) + require.NoError(t, ra.Config.Filter.Compile()) + + now := time.Now() + ra.UpdateWindow(now, now.Add(ra.Config.Period)) + + m := testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + now, + telegraf.Untyped) + require.True(t, ra.Add(m)) + + // this metric name doesn't match the filter, so Add will return false + m2 := testutil.MustMetric("foobar", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, + now, + telegraf.Untyped) + require.False(t, ra.Add(m2)) +} + +func TestAddDoesNotModifyMetric(t *testing.T) { + ra := NewRunningAggregator(&TestAggregator{}, &AggregatorConfig{ + Name: "TestRunningAggregator", + Filter: Filter{ + FieldPass: []string{"a"}, + }, + DropOriginal: true, + }) + require.NoError(t, ra.Config.Filter.Compile()) + + now := time.Now() + + m := testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": int64(42), + "b": int64(42), + }, + now) + expected := m.Copy() + ra.Add(m) + + testutil.RequireMetricEqual(t, expected, m) +} + +type TestAggregator struct { + sum int64 +} + +func (t *TestAggregator) Description() string { return "" } +func (t *TestAggregator) SampleConfig() string { return "" } +func (t *TestAggregator) Reset() { + t.sum = 0 +} + +func (t *TestAggregator) Push(acc telegraf.Accumulator) { + acc.AddFields("TestMetric", + map[string]interface{}{"sum": t.sum}, + map[string]string{}, + ) +} + +func (t *TestAggregator) Add(in telegraf.Metric) { + for _, v := range in.Fields() { + if vi, ok := v.(int64); ok { + t.sum += vi + } + } +} diff --git a/models/running_input.go b/models/running_input.go new file mode 100644 index 000000000..bb1033fdd --- /dev/null +++ b/models/running_input.go @@ -0,0 +1,127 @@ +package models + +import ( + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" +) + +var ( + GlobalMetricsGathered = selfstat.Register("agent", "metrics_gathered", map[string]string{}) + GlobalGatherErrors = selfstat.Register("agent", "gather_errors", map[string]string{}) +) + +type RunningInput struct { + Input telegraf.Input + Config *InputConfig + + log telegraf.Logger + defaultTags map[string]string + + MetricsGathered selfstat.Stat + GatherTime selfstat.Stat +} + +func NewRunningInput(input telegraf.Input, config *InputConfig) *RunningInput { + tags := map[string]string{"input": config.Name} + if config.Alias != "" { + tags["alias"] = config.Alias + } + + inputErrorsRegister := selfstat.Register("gather", "errors", tags) + logger := NewLogger("inputs", config.Name, config.Alias) + logger.OnErr(func() { + inputErrorsRegister.Incr(1) + GlobalGatherErrors.Incr(1) + }) + setLogIfExist(input, logger) + + return &RunningInput{ + Input: input, + Config: config, + MetricsGathered: selfstat.Register( + "gather", + "metrics_gathered", + tags, + ), + GatherTime: selfstat.RegisterTiming( + "gather", + "gather_time_ns", + tags, + ), + log: logger, + } +} + +// InputConfig is the common config for all inputs. +type InputConfig struct { + Name string + Alias string + Interval time.Duration + + NameOverride string + MeasurementPrefix string + MeasurementSuffix string + Tags map[string]string + Filter Filter +} + +func (r *RunningInput) metricFiltered(metric telegraf.Metric) { + metric.Drop() +} + +func (r *RunningInput) LogName() string { + return logName("inputs", r.Config.Name, r.Config.Alias) +} + +func (r *RunningInput) Init() error { + if p, ok := r.Input.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return err + } + } + return nil +} + +func (r *RunningInput) MakeMetric(metric telegraf.Metric) telegraf.Metric { + if ok := r.Config.Filter.Select(metric); !ok { + r.metricFiltered(metric) + return nil + } + + m := makemetric( + metric, + r.Config.NameOverride, + r.Config.MeasurementPrefix, + r.Config.MeasurementSuffix, + r.Config.Tags, + r.defaultTags) + + r.Config.Filter.Modify(metric) + if len(metric.FieldList()) == 0 { + r.metricFiltered(metric) + return nil + } + + r.MetricsGathered.Incr(1) + GlobalMetricsGathered.Incr(1) + return m +} + +func (r *RunningInput) Gather(acc telegraf.Accumulator) error { + start := time.Now() + err := r.Input.Gather(acc) + elapsed := time.Since(start) + r.GatherTime.Incr(elapsed.Nanoseconds()) + return err +} + +func (r *RunningInput) SetDefaultTags(tags map[string]string) { + r.defaultTags = tags +} + +func (r *RunningInput) Log() telegraf.Logger { + return r.log +} diff --git a/internal/models/running_input_test.go b/models/running_input_test.go similarity index 57% rename from internal/models/running_input_test.go rename to models/running_input_test.go index 4d016851a..ff3747116 100644 --- a/internal/models/running_input_test.go +++ b/models/running_input_test.go @@ -4,26 +4,61 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/selfstat" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func TestMakeMetricFilterAfterApplyingGlobalTags(t *testing.T) { + now := time.Now() + ri := NewRunningInput(&testInput{}, &InputConfig{ + Filter: Filter{ + TagInclude: []string{"b"}, + }, + }) + require.NoError(t, ri.Config.Filter.Compile()) + ri.SetDefaultTags(map[string]string{"a": "x", "b": "y"}) + + m, err := metric.New("cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + now) + require.NoError(t, err) + + actual := ri.MakeMetric(m) + + expected, err := metric.New("cpu", + map[string]string{ + "b": "y", + }, + map[string]interface{}{ + "value": 42, + }, + now) + require.NoError(t, err) + + testutil.RequireMetricEqual(t, expected, actual) +} + func TestMakeMetricNoFields(t *testing.T) { now := time.Now() ri := NewRunningInput(&testInput{}, &InputConfig{ Name: "TestRunningInput", }) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{}, now, - ) + telegraf.Untyped) + m = ri.MakeMetric(m) + require.NoError(t, err) assert.Nil(t, m) } @@ -34,16 +69,16 @@ func TestMakeMetricNilFields(t *testing.T) { Name: "TestRunningInput", }) - m := ri.MakeMetric( - "RITest", + m, err := metric.New("RITest", + map[string]string{}, map[string]interface{}{ - "value": int(101), + "value": int64(101), "nil": nil, }, - map[string]string{}, - telegraf.Untyped, now, - ) + telegraf.Untyped) + require.NoError(t, err) + m = ri.MakeMetric(m) expected, err := metric.New("RITest", map[string]string{}, @@ -66,16 +101,14 @@ func TestMakeMetricWithPluginTags(t *testing.T) { }, }) - ri.SetTrace(true) - assert.Equal(t, true, ri.Trace()) - - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, - nil, - telegraf.Untyped, + m := testutil.MustMetric("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + m = ri.MakeMetric(m) expected, err := metric.New("RITest", map[string]string{ @@ -100,17 +133,17 @@ func TestMakeMetricFilteredOut(t *testing.T) { Filter: Filter{NamePass: []string{"foobar"}}, }) - ri.SetTrace(true) - assert.Equal(t, true, ri.Trace()) assert.NoError(t, ri.Config.Filter.Compile()) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, - nil, - telegraf.Untyped, + m, err := metric.New("RITest", + map[string]string{}, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + m = ri.MakeMetric(m) + require.NoError(t, err) assert.Nil(t, m) } @@ -123,16 +156,14 @@ func TestMakeMetricWithDaemonTags(t *testing.T) { "foo": "bar", }) - ri.SetTrace(true) - assert.Equal(t, true, ri.Trace()) - - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m := testutil.MustMetric("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + m = ri.MakeMetric(m) expected, err := metric.New("RITest", map[string]string{ "foo": "bar", @@ -153,13 +184,15 @@ func TestMakeMetricNameOverride(t *testing.T) { NameOverride: "foobar", }) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + require.NoError(t, err) + m = ri.MakeMetric(m) expected, err := metric.New("foobar", nil, map[string]interface{}{ @@ -178,13 +211,15 @@ func TestMakeMetricNamePrefix(t *testing.T) { MeasurementPrefix: "foobar_", }) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + require.NoError(t, err) + m = ri.MakeMetric(m) expected, err := metric.New("foobar_RITest", nil, map[string]interface{}{ @@ -203,13 +238,15 @@ func TestMakeMetricNameSuffix(t *testing.T) { MeasurementSuffix: "_foobar", }) - m := ri.MakeMetric( - "RITest", - map[string]interface{}{"value": int(101)}, + m, err := metric.New("RITest", map[string]string{}, - telegraf.Untyped, + map[string]interface{}{ + "value": int64(101), + }, now, - ) + telegraf.Untyped) + require.NoError(t, err) + m = ri.MakeMetric(m) expected, err := metric.New("RITest_foobar", nil, map[string]interface{}{ @@ -221,6 +258,35 @@ func TestMakeMetricNameSuffix(t *testing.T) { require.Equal(t, expected, m) } +func TestMetricErrorCounters(t *testing.T) { + ri := NewRunningInput(&testInput{}, &InputConfig{ + Name: "TestMetricErrorCounters", + }) + + getGatherErrors := func() int64 { + for _, r := range selfstat.Metrics() { + tag, hasTag := r.GetTag("input") + if r.Name() == "internal_gather" && hasTag && tag == "TestMetricErrorCounters" { + errCount, ok := r.GetField("errors") + if !ok { + t.Fatal("Expected error field") + } + return errCount.(int64) + } + } + return 0 + } + + before := getGatherErrors() + + ri.Log().Error("Oh no") + + after := getGatherErrors() + + require.Greater(t, after, before) + require.GreaterOrEqual(t, int64(1), GlobalGatherErrors.Get()) +} + type testInput struct{} func (t *testInput) Description() string { return "" } diff --git a/models/running_output.go b/models/running_output.go new file mode 100644 index 000000000..452ab796b --- /dev/null +++ b/models/running_output.go @@ -0,0 +1,267 @@ +package models + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" +) + +const ( + // Default size of metrics batch size. + DEFAULT_METRIC_BATCH_SIZE = 1000 + + // Default number of metrics kept. It should be a multiple of batch size. + DEFAULT_METRIC_BUFFER_LIMIT = 10000 +) + +// OutputConfig containing name and filter +type OutputConfig struct { + Name string + Alias string + Filter Filter + + FlushInterval time.Duration + FlushJitter *time.Duration + MetricBufferLimit int + MetricBatchSize int + + NameOverride string + NamePrefix string + NameSuffix string +} + +// RunningOutput contains the output configuration +type RunningOutput struct { + // Must be 64-bit aligned + newMetricsCount int64 + droppedMetrics int64 + + Output telegraf.Output + Config *OutputConfig + MetricBufferLimit int + MetricBatchSize int + + MetricsFiltered selfstat.Stat + WriteTime selfstat.Stat + + BatchReady chan time.Time + + buffer *Buffer + log telegraf.Logger + + aggMutex sync.Mutex +} + +func NewRunningOutput( + name string, + output telegraf.Output, + config *OutputConfig, + batchSize int, + bufferLimit int, +) *RunningOutput { + tags := map[string]string{"output": config.Name} + if config.Alias != "" { + tags["alias"] = config.Alias + } + + writeErrorsRegister := selfstat.Register("write", "errors", tags) + logger := NewLogger("outputs", config.Name, config.Alias) + logger.OnErr(func() { + writeErrorsRegister.Incr(1) + }) + setLogIfExist(output, logger) + + if config.MetricBufferLimit > 0 { + bufferLimit = config.MetricBufferLimit + } + if bufferLimit == 0 { + bufferLimit = DEFAULT_METRIC_BUFFER_LIMIT + } + if config.MetricBatchSize > 0 { + batchSize = config.MetricBatchSize + } + if batchSize == 0 { + batchSize = DEFAULT_METRIC_BATCH_SIZE + } + + ro := &RunningOutput{ + buffer: NewBuffer(config.Name, config.Alias, bufferLimit), + BatchReady: make(chan time.Time, 1), + Output: output, + Config: config, + MetricBufferLimit: bufferLimit, + MetricBatchSize: batchSize, + MetricsFiltered: selfstat.Register( + "write", + "metrics_filtered", + tags, + ), + WriteTime: selfstat.RegisterTiming( + "write", + "write_time_ns", + tags, + ), + log: logger, + } + + return ro +} + +func (r *RunningOutput) LogName() string { + return logName("outputs", r.Config.Name, r.Config.Alias) +} + +func (ro *RunningOutput) metricFiltered(metric telegraf.Metric) { + ro.MetricsFiltered.Incr(1) + metric.Drop() +} + +func (r *RunningOutput) Init() error { + if p, ok := r.Output.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return err + } + + } + return nil +} + +// AddMetric adds a metric to the output. +// +// Takes ownership of metric +func (ro *RunningOutput) AddMetric(metric telegraf.Metric) { + if ok := ro.Config.Filter.Select(metric); !ok { + ro.metricFiltered(metric) + return + } + + ro.Config.Filter.Modify(metric) + if len(metric.FieldList()) == 0 { + ro.metricFiltered(metric) + return + } + + if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { + ro.aggMutex.Lock() + output.Add(metric) + ro.aggMutex.Unlock() + return + } + + if len(ro.Config.NameOverride) > 0 { + metric.SetName(ro.Config.NameOverride) + } + + if len(ro.Config.NamePrefix) > 0 { + metric.AddPrefix(ro.Config.NamePrefix) + } + + if len(ro.Config.NameSuffix) > 0 { + metric.AddSuffix(ro.Config.NameSuffix) + } + + dropped := ro.buffer.Add(metric) + atomic.AddInt64(&ro.droppedMetrics, int64(dropped)) + + count := atomic.AddInt64(&ro.newMetricsCount, 1) + if count == int64(ro.MetricBatchSize) { + atomic.StoreInt64(&ro.newMetricsCount, 0) + select { + case ro.BatchReady <- time.Now(): + default: + } + } +} + +// Write writes all metrics to the output, stopping when all have been sent on +// or error. +func (ro *RunningOutput) Write() error { + if output, ok := ro.Output.(telegraf.AggregatingOutput); ok { + ro.aggMutex.Lock() + metrics := output.Push() + ro.buffer.Add(metrics...) + output.Reset() + ro.aggMutex.Unlock() + } + + atomic.StoreInt64(&ro.newMetricsCount, 0) + + // Only process the metrics in the buffer now. Metrics added while we are + // writing will be sent on the next call. + nBuffer := ro.buffer.Len() + nBatches := nBuffer/ro.MetricBatchSize + 1 + for i := 0; i < nBatches; i++ { + batch := ro.buffer.Batch(ro.MetricBatchSize) + if len(batch) == 0 { + break + } + + err := ro.write(batch) + if err != nil { + ro.buffer.Reject(batch) + return err + } + ro.buffer.Accept(batch) + } + return nil +} + +// WriteBatch writes a single batch of metrics to the output. +func (ro *RunningOutput) WriteBatch() error { + batch := ro.buffer.Batch(ro.MetricBatchSize) + if len(batch) == 0 { + return nil + } + + err := ro.write(batch) + if err != nil { + ro.buffer.Reject(batch) + return err + } + ro.buffer.Accept(batch) + + return nil +} + +// Close closes the output +func (r *RunningOutput) Close() { + err := r.Output.Close() + if err != nil { + r.log.Errorf("Error closing output: %v", err) + } +} + +func (r *RunningOutput) write(metrics []telegraf.Metric) error { + dropped := atomic.LoadInt64(&r.droppedMetrics) + if dropped > 0 { + r.log.Warnf("Metric buffer overflow; %d metrics have been dropped", dropped) + atomic.StoreInt64(&r.droppedMetrics, 0) + } + + start := time.Now() + err := r.Output.Write(metrics) + elapsed := time.Since(start) + r.WriteTime.Incr(elapsed.Nanoseconds()) + + if err == nil { + r.log.Debugf("Wrote batch of %d metrics in %s", len(metrics), elapsed) + } + return err +} + +func (r *RunningOutput) LogBufferStatus() { + nBuffer := r.buffer.Len() + r.log.Debugf("Buffer fullness: %d / %d metrics", nBuffer, r.MetricBufferLimit) +} + +func (r *RunningOutput) Log() telegraf.Logger { + return r.log +} + +func (r *RunningOutput) BufferLength() int { + return r.buffer.Len() +} diff --git a/internal/models/running_output_test.go b/models/running_output_test.go similarity index 80% rename from internal/models/running_output_test.go rename to models/running_output_test.go index bd39f2f9b..89cd3beec 100644 --- a/internal/models/running_output_test.go +++ b/models/running_output_test.go @@ -4,10 +4,11 @@ import ( "fmt" "sync" "testing" + "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -28,6 +29,14 @@ var next5 = []telegraf.Metric{ testutil.TestMetric(101, "metric10"), } +func reverse(metrics []telegraf.Metric) []telegraf.Metric { + result := make([]telegraf.Metric, 0, len(metrics)) + for i := len(metrics) - 1; i >= 0; i-- { + result = append(result, metrics[i]) + } + return result +} + // Benchmark adding metrics. func BenchmarkRunningOutputAddWrite(b *testing.B) { conf := &OutputConfig{ @@ -75,23 +84,6 @@ func BenchmarkRunningOutputAddFailWrites(b *testing.B) { } } -func TestAddingNilMetric(t *testing.T) { - conf := &OutputConfig{ - Filter: Filter{}, - } - - m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 1000, 10000) - - ro.AddMetric(nil) - ro.AddMetric(nil) - ro.AddMetric(nil) - - err := ro.Write() - assert.NoError(t, err) - assert.Len(t, m.Metrics(), 0) -} - // Test that NameDrop filters ger properly applied. func TestRunningOutput_DropFilter(t *testing.T) { conf := &OutputConfig{ @@ -226,6 +218,60 @@ func TestRunningOutput_TagIncludeMatch(t *testing.T) { assert.Len(t, m.Metrics()[0].Tags(), 1) } +// Test that measurement name overriding correctly +func TestRunningOutput_NameOverride(t *testing.T) { + conf := &OutputConfig{ + NameOverride: "new_metric_name", + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf, 1000, 10000) + + ro.AddMetric(testutil.TestMetric(101, "metric1")) + assert.Len(t, m.Metrics(), 0) + + err := ro.Write() + assert.NoError(t, err) + assert.Len(t, m.Metrics(), 1) + assert.Equal(t, "new_metric_name", m.Metrics()[0].Name()) +} + +// Test that measurement name prefix is added correctly +func TestRunningOutput_NamePrefix(t *testing.T) { + conf := &OutputConfig{ + NamePrefix: "prefix_", + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf, 1000, 10000) + + ro.AddMetric(testutil.TestMetric(101, "metric1")) + assert.Len(t, m.Metrics(), 0) + + err := ro.Write() + assert.NoError(t, err) + assert.Len(t, m.Metrics(), 1) + assert.Equal(t, "prefix_metric1", m.Metrics()[0].Name()) +} + +// Test that measurement name suffix is added correctly +func TestRunningOutput_NameSuffix(t *testing.T) { + conf := &OutputConfig{ + NameSuffix: "_suffix", + } + + m := &mockOutput{} + ro := NewRunningOutput("test", m, conf, 1000, 10000) + + ro.AddMetric(testutil.TestMetric(101, "metric1")) + assert.Len(t, m.Metrics(), 0) + + err := ro.Write() + assert.NoError(t, err) + assert.Len(t, m.Metrics(), 1) + assert.Equal(t, "metric1_suffix", m.Metrics()[0].Name()) +} + // Test that we can write metrics with simple default setup. func TestRunningOutputDefault(t *testing.T) { conf := &OutputConfig{ @@ -248,56 +294,6 @@ func TestRunningOutputDefault(t *testing.T) { assert.Len(t, m.Metrics(), 10) } -// Test that running output doesn't flush until it's full when -// FlushBufferWhenFull is set. -func TestRunningOutputFlushWhenFull(t *testing.T) { - conf := &OutputConfig{ - Filter: Filter{}, - } - - m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 6, 10) - - // Fill buffer to 1 under limit - for _, metric := range first5 { - ro.AddMetric(metric) - } - // no flush yet - assert.Len(t, m.Metrics(), 0) - - // add one more metric - ro.AddMetric(next5[0]) - // now it flushed - assert.Len(t, m.Metrics(), 6) - - // add one more metric and write it manually - ro.AddMetric(next5[1]) - err := ro.Write() - assert.NoError(t, err) - assert.Len(t, m.Metrics(), 7) -} - -// Test that running output doesn't flush until it's full when -// FlushBufferWhenFull is set, twice. -func TestRunningOutputMultiFlushWhenFull(t *testing.T) { - conf := &OutputConfig{ - Filter: Filter{}, - } - - m := &mockOutput{} - ro := NewRunningOutput("test", m, conf, 4, 12) - - // Fill buffer past limit twive - for _, metric := range first5 { - ro.AddMetric(metric) - } - for _, metric := range next5 { - ro.AddMetric(metric) - } - // flushed twice - assert.Len(t, m.Metrics(), 8) -} - func TestRunningOutputWriteFail(t *testing.T) { conf := &OutputConfig{ Filter: Filter{}, @@ -364,7 +360,7 @@ func TestRunningOutputWriteFailOrder(t *testing.T) { // Verify that 10 metrics were written assert.Len(t, m.Metrics(), 10) // Verify that they are in order - expected := append(first5, next5...) + expected := append(reverse(next5), reverse(first5)...) assert.Equal(t, expected, m.Metrics()) } @@ -422,24 +418,17 @@ func TestRunningOutputWriteFailOrder2(t *testing.T) { err = ro.Write() require.NoError(t, err) - // Verify that 10 metrics were written + // Verify that 20 metrics were written assert.Len(t, m.Metrics(), 20) // Verify that they are in order - expected := append(first5, next5...) - expected = append(expected, first5...) - expected = append(expected, next5...) + expected := append(reverse(next5), reverse(first5)...) + expected = append(expected, reverse(next5)...) + expected = append(expected, reverse(first5)...) assert.Equal(t, expected, m.Metrics()) } // Verify that the order of points is preserved when there is a remainder // of points for the batch. -// -// ie, with a batch size of 5: -// -// 1 2 3 4 5 6 <-- order, failed points -// 6 1 2 3 4 5 <-- order, after 1st write failure (1 2 3 4 5 was batch) -// 1 2 3 4 5 6 <-- order, after 2nd write failure, (6 was batch) -// func TestRunningOutputWriteFailOrder3(t *testing.T) { conf := &OutputConfig{ Filter: Filter{}, @@ -475,10 +464,54 @@ func TestRunningOutputWriteFailOrder3(t *testing.T) { // Verify that 6 metrics were written assert.Len(t, m.Metrics(), 6) // Verify that they are in order - expected := append(first5, next5[0]) + expected := []telegraf.Metric{next5[0], first5[4], first5[3], first5[2], first5[1], first5[0]} assert.Equal(t, expected, m.Metrics()) } +func TestInternalMetrics(t *testing.T) { + _ = NewRunningOutput( + "test_internal", + &mockOutput{}, + &OutputConfig{ + Filter: Filter{}, + Name: "test_name", + Alias: "test_alias", + }, + 5, + 10) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "internal_write", + map[string]string{ + "output": "test_name", + "alias": "test_alias", + }, + map[string]interface{}{ + "buffer_limit": 10, + "buffer_size": 0, + "errors": 0, + "metrics_added": 0, + "metrics_dropped": 0, + "metrics_filtered": 0, + "metrics_written": 0, + "write_time_ns": 0, + }, + time.Unix(0, 0), + ), + } + + var actual []telegraf.Metric + for _, m := range selfstat.Metrics() { + output, _ := m.GetTag("output") + if m.Name() == "internal_write" && output == "test_name" { + actual = append(actual, m) + } + } + + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + type mockOutput struct { sync.Mutex diff --git a/models/running_processor.go b/models/running_processor.go new file mode 100644 index 000000000..86b1887a1 --- /dev/null +++ b/models/running_processor.go @@ -0,0 +1,100 @@ +package models + +import ( + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/selfstat" +) + +type RunningProcessor struct { + sync.Mutex + log telegraf.Logger + Processor telegraf.StreamingProcessor + Config *ProcessorConfig +} + +type RunningProcessors []*RunningProcessor + +func (rp RunningProcessors) Len() int { return len(rp) } +func (rp RunningProcessors) Swap(i, j int) { rp[i], rp[j] = rp[j], rp[i] } +func (rp RunningProcessors) Less(i, j int) bool { return rp[i].Config.Order < rp[j].Config.Order } + +// FilterConfig containing a name and filter +type ProcessorConfig struct { + Name string + Alias string + Order int64 + Filter Filter +} + +func NewRunningProcessor(processor telegraf.StreamingProcessor, config *ProcessorConfig) *RunningProcessor { + tags := map[string]string{"processor": config.Name} + if config.Alias != "" { + tags["alias"] = config.Alias + } + + processErrorsRegister := selfstat.Register("process", "errors", tags) + logger := NewLogger("processors", config.Name, config.Alias) + logger.OnErr(func() { + processErrorsRegister.Incr(1) + }) + setLogIfExist(processor, logger) + + return &RunningProcessor{ + Processor: processor, + Config: config, + log: logger, + } +} + +func (rp *RunningProcessor) metricFiltered(metric telegraf.Metric) { + metric.Drop() +} + +func (r *RunningProcessor) Init() error { + if p, ok := r.Processor.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return err + } + } + return nil +} + +func (r *RunningProcessor) Log() telegraf.Logger { + return r.log +} + +func (r *RunningProcessor) LogName() string { + return logName("processors", r.Config.Name, r.Config.Alias) +} + +func (r *RunningProcessor) MakeMetric(metric telegraf.Metric) telegraf.Metric { + return metric +} + +func (r *RunningProcessor) Start(acc telegraf.Accumulator) error { + return r.Processor.Start(acc) +} + +func (r *RunningProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) { + if ok := r.Config.Filter.Select(m); !ok { + // pass downstream + acc.AddMetric(m) + return + } + + r.Config.Filter.Modify(m) + if len(m.FieldList()) == 0 { + // drop metric + r.metricFiltered(m) + return + } + + r.Processor.Add(m, acc) +} + +func (r *RunningProcessor) Stop() { + r.Processor.Stop() +} diff --git a/models/running_processor_test.go b/models/running_processor_test.go new file mode 100644 index 000000000..ee1d50ef2 --- /dev/null +++ b/models/running_processor_test.go @@ -0,0 +1,197 @@ +package models + +import ( + "sort" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +// MockProcessor is a Processor with an overridable Apply implementation. +type MockProcessor struct { + ApplyF func(in ...telegraf.Metric) []telegraf.Metric +} + +func (p *MockProcessor) SampleConfig() string { + return "" +} + +func (p *MockProcessor) Description() string { + return "" +} + +func (p *MockProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { + return p.ApplyF(in...) +} + +// TagProcessor returns a Processor whose Apply function adds the tag and +// value. +func TagProcessor(key, value string) *MockProcessor { + return &MockProcessor{ + ApplyF: func(in ...telegraf.Metric) []telegraf.Metric { + for _, m := range in { + m.AddTag(key, value) + } + return in + }, + } +} + +func TestRunningProcessor_Apply(t *testing.T) { + type args struct { + Processor telegraf.StreamingProcessor + Config *ProcessorConfig + } + + tests := []struct { + name string + args args + input []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "inactive filter applies metrics", + args: args{ + Processor: processors.NewStreamingProcessorFromProcessor(TagProcessor("apply", "true")), + Config: &ProcessorConfig{ + Filter: Filter{}, + }, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "apply": "true", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "filter applies", + args: args{ + Processor: processors.NewStreamingProcessorFromProcessor(TagProcessor("apply", "true")), + Config: &ProcessorConfig{ + Filter: Filter{ + NamePass: []string{"cpu"}, + }, + }, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "apply": "true", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "filter doesn't apply", + args: args{ + Processor: processors.NewStreamingProcessorFromProcessor(TagProcessor("apply", "true")), + Config: &ProcessorConfig{ + Filter: Filter{ + NameDrop: []string{"cpu"}, + }, + }, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rp := &RunningProcessor{ + Processor: tt.args.Processor, + Config: tt.args.Config, + } + rp.Config.Filter.Compile() + + acc := testutil.Accumulator{} + err := rp.Start(&acc) + require.NoError(t, err) + for _, m := range tt.input { + rp.Add(m, &acc) + } + rp.Stop() + + actual := acc.GetTelegrafMetrics() + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestRunningProcessor_Order(t *testing.T) { + rp1 := &RunningProcessor{ + Config: &ProcessorConfig{ + Order: 1, + }, + } + rp2 := &RunningProcessor{ + Config: &ProcessorConfig{ + Order: 2, + }, + } + rp3 := &RunningProcessor{ + Config: &ProcessorConfig{ + Order: 3, + }, + } + + procs := RunningProcessors{rp2, rp3, rp1} + sort.Sort(procs) + require.Equal(t, + RunningProcessors{rp1, rp2, rp3}, + procs) +} diff --git a/output.go b/output.go index d66ea4556..0045b2ca6 100644 --- a/output.go +++ b/output.go @@ -1,31 +1,26 @@ package telegraf type Output interface { + PluginDescriber + // Connect to the Output Connect() error // Close any connections to the Output Close() error - // Description returns a one-sentence description on the Output - Description() string - // SampleConfig returns the default configuration of the Output - SampleConfig() string // Write takes in group of points to be written to the Output Write(metrics []Metric) error } -type ServiceOutput interface { - // Connect to the Output - Connect() error - // Close any connections to the Output - Close() error - // Description returns a one-sentence description on the Output - Description() string - // SampleConfig returns the default configuration of the Output - SampleConfig() string - // Write takes in group of points to be written to the Output - Write(metrics []Metric) error - // Start the "service" that will provide an Output - Start() error - // Stop the "service" that will provide an Output - Stop() +// AggregatingOutput adds aggregating functionality to an Output. May be used +// if the Output only accepts a fixed set of aggregations over a time period. +// These functions may be called concurrently to the Write function. +type AggregatingOutput interface { + Output + + // Add the metric to the aggregator + Add(in Metric) + // Push returns the aggregated metrics and is called every flush interval. + Push() []Metric + // Reset signals the the aggregator period is completed. + Reset() } diff --git a/plugin.go b/plugin.go new file mode 100644 index 000000000..29e8bb683 --- /dev/null +++ b/plugin.go @@ -0,0 +1,40 @@ +package telegraf + +// Initializer is an interface that all plugin types: Inputs, Outputs, +// Processors, and Aggregators can optionally implement to initialize the +// plugin. +type Initializer interface { + // Init performs one time setup of the plugin and returns an error if the + // configuration is invalid. + Init() error +} + +// PluginDescriber contains the functions all plugins must implement to describe +// themselves to Telegraf +type PluginDescriber interface { + // SampleConfig returns the default configuration of the Processor + SampleConfig() string + + // Description returns a one-sentence description on the Processor + Description() string +} + +// Logger defines an interface for logging. +type Logger interface { + // Errorf logs an error message, patterned after log.Printf. + Errorf(format string, args ...interface{}) + // Error logs an error message, patterned after log.Print. + Error(args ...interface{}) + // Debugf logs a debug message, patterned after log.Printf. + Debugf(format string, args ...interface{}) + // Debug logs a debug message, patterned after log.Print. + Debug(args ...interface{}) + // Warnf logs a warning message, patterned after log.Printf. + Warnf(format string, args ...interface{}) + // Warn logs a warning message, patterned after log.Print. + Warn(args ...interface{}) + // Infof logs an information message, patterned after log.Printf. + Infof(format string, args ...interface{}) + // Info logs an information message, patterned after log.Print. + Info(args ...interface{}) +} diff --git a/plugins/aggregators/all/all.go b/plugins/aggregators/all/all.go index ff1bbfc70..eabfaa4bf 100644 --- a/plugins/aggregators/all/all.go +++ b/plugins/aggregators/all/all.go @@ -2,7 +2,9 @@ package all import ( _ "github.com/influxdata/telegraf/plugins/aggregators/basicstats" + _ "github.com/influxdata/telegraf/plugins/aggregators/final" _ "github.com/influxdata/telegraf/plugins/aggregators/histogram" + _ "github.com/influxdata/telegraf/plugins/aggregators/merge" _ "github.com/influxdata/telegraf/plugins/aggregators/minmax" _ "github.com/influxdata/telegraf/plugins/aggregators/valuecounter" ) diff --git a/plugins/aggregators/basicstats/README.md b/plugins/aggregators/basicstats/README.md index f5023dfc7..8fef0c6f4 100644 --- a/plugins/aggregators/basicstats/README.md +++ b/plugins/aggregators/basicstats/README.md @@ -1,6 +1,6 @@ # BasicStats Aggregator Plugin -The BasicStats aggregator plugin give us count,max,min,mean,sum,s2(variance), stdev for a set of values, +The BasicStats aggregator plugin give us count,diff,max,min,mean,non_negative_diff,sum,s2(variance), stdev for a set of values, emitting the aggregate every `period` seconds. ### Configuration: @@ -8,9 +8,6 @@ emitting the aggregate every `period` seconds. ```toml # Keep the aggregate basicstats of each metric passing through. [[aggregators.basicstats]] - - ## General Aggregator Arguments: - ## The period on which to flush & clear the aggregator. period = "30s" @@ -18,23 +15,23 @@ emitting the aggregate every `period` seconds. ## aggregator and will not get sent to the output plugins. drop_original = false - ## BasicStats Arguments: - ## Configures which basic stats to push as fields - stats = ["count","min","max","mean","stdev","s2","sum"] + # stats = ["count","diff","min","max","mean","non_negative_diff","stdev","s2","sum"] ``` - stats - - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum` is not aggregated by default to maintain backwards compatibility. + - If not specified, then `count`, `min`, `max`, `mean`, `stdev`, and `s2` are aggregated and pushed as fields. `sum`, `diff` and `non_negative_diff` are not aggregated by default to maintain backwards compatibility. - If empty array, no stats are aggregated ### Measurements & Fields: - measurement1 - field1_count + - field1_diff (difference) - field1_max - field1_min - field1_mean + - field1_non_negative_diff (non-negative difference) - field1_sum - field1_s2 (variance) - field1_stdev (standard deviation) @@ -49,8 +46,8 @@ No tags are applied by this aggregator. $ telegraf --config telegraf.conf --quiet system,host=tars load1=1 1475583980000000000 system,host=tars load1=1 1475583990000000000 -system,host=tars load1_count=2,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000 +system,host=tars load1_count=2,load1_diff=0,load1_max=1,load1_min=1,load1_mean=1,load1_sum=2,load1_s2=0,load1_stdev=0 1475584010000000000 system,host=tars load1=1 1475584020000000000 system,host=tars load1=3 1475584030000000000 -system,host=tars load1_count=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000 +system,host=tars load1_count=2,load1_diff=2,load1_max=3,load1_min=1,load1_mean=2,load1_sum=4,load1_s2=2,load1_stdev=1.414162 1475584010000000000 ``` diff --git a/plugins/aggregators/basicstats/basicstats.go b/plugins/aggregators/basicstats/basicstats.go index 42b795ab6..4e62ee311 100644 --- a/plugins/aggregators/basicstats/basicstats.go +++ b/plugins/aggregators/basicstats/basicstats.go @@ -1,7 +1,6 @@ package basicstats import ( - "log" "math" "github.com/influxdata/telegraf" @@ -10,25 +9,28 @@ import ( type BasicStats struct { Stats []string `toml:"stats"` + Log telegraf.Logger cache map[uint64]aggregate statsConfig *configuredStats } type configuredStats struct { - count bool - min bool - max bool - mean bool - variance bool - stdev bool - sum bool + count bool + min bool + max bool + mean bool + variance bool + stdev bool + sum bool + diff bool + non_negative_diff bool } func NewBasicStats() *BasicStats { - mm := &BasicStats{} - mm.Reset() - return mm + return &BasicStats{ + cache: make(map[uint64]aggregate), + } } type aggregate struct { @@ -43,65 +45,74 @@ type basicstats struct { max float64 sum float64 mean float64 - M2 float64 //intermedia value for variance/stdev + diff float64 + M2 float64 //intermediate value for variance/stdev + LAST float64 //intermediate value for diff } var sampleConfig = ` - ## General Aggregator Arguments: ## The period on which to flush & clear the aggregator. period = "30s" + ## If true, the original metric will be dropped by the ## aggregator and will not get sent to the output plugins. drop_original = false + + ## Configures which basic stats to push as fields + # stats = ["count", "min", "max", "mean", "stdev", "s2", "sum"] ` -func (m *BasicStats) SampleConfig() string { +func (*BasicStats) SampleConfig() string { return sampleConfig } -func (m *BasicStats) Description() string { +func (*BasicStats) Description() string { return "Keep the aggregate basicstats of each metric passing through." } -func (m *BasicStats) Add(in telegraf.Metric) { +func (b *BasicStats) Add(in telegraf.Metric) { id := in.HashID() - if _, ok := m.cache[id]; !ok { + if _, ok := b.cache[id]; !ok { // hit an uncached metric, create caches for first time: a := aggregate{ name: in.Name(), tags: in.Tags(), fields: make(map[string]basicstats), } - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - a.fields[k] = basicstats{ + for _, field := range in.FieldList() { + if fv, ok := convert(field.Value); ok { + a.fields[field.Key] = basicstats{ count: 1, min: fv, max: fv, mean: fv, sum: fv, + diff: 0.0, M2: 0.0, + LAST: fv, } } } - m.cache[id] = a + b.cache[id] = a } else { - for k, v := range in.Fields() { - if fv, ok := convert(v); ok { - if _, ok := m.cache[id].fields[k]; !ok { + for _, field := range in.FieldList() { + if fv, ok := convert(field.Value); ok { + if _, ok := b.cache[id].fields[field.Key]; !ok { // hit an uncached field of a cached metric - m.cache[id].fields[k] = basicstats{ + b.cache[id].fields[field.Key] = basicstats{ count: 1, min: fv, max: fv, mean: fv, sum: fv, + diff: 0.0, M2: 0.0, + LAST: fv, } continue } - tmp := m.cache[id].fields[k] + tmp := b.cache[id].fields[field.Key] //https://en.m.wikipedia.org/wiki/Algorithms_for_calculating_variance //variable initialization x := fv @@ -125,34 +136,33 @@ func (m *BasicStats) Add(in telegraf.Metric) { } //sum compute tmp.sum += fv + //diff compute + tmp.diff = fv - tmp.LAST //store final data - m.cache[id].fields[k] = tmp + b.cache[id].fields[field.Key] = tmp } } } } -func (m *BasicStats) Push(acc telegraf.Accumulator) { - - config := getConfiguredStats(m) - - for _, aggregate := range m.cache { +func (b *BasicStats) Push(acc telegraf.Accumulator) { + for _, aggregate := range b.cache { fields := map[string]interface{}{} for k, v := range aggregate.fields { - if config.count { + if b.statsConfig.count { fields[k+"_count"] = v.count } - if config.min { + if b.statsConfig.min { fields[k+"_min"] = v.min } - if config.max { + if b.statsConfig.max { fields[k+"_max"] = v.max } - if config.mean { + if b.statsConfig.mean { fields[k+"_mean"] = v.mean } - if config.sum { + if b.statsConfig.sum { fields[k+"_sum"] = v.sum } @@ -160,12 +170,19 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) { if v.count > 1 { variance := v.M2 / (v.count - 1) - if config.variance { + if b.statsConfig.variance { fields[k+"_s2"] = variance } - if config.stdev { + if b.statsConfig.stdev { fields[k+"_stdev"] = math.Sqrt(variance) } + if b.statsConfig.diff { + fields[k+"_diff"] = v.diff + } + if b.statsConfig.non_negative_diff && v.diff >= 0 { + fields[k+"_non_negative_diff"] = v.diff + } + } //if count == 1 StdDev = infinite => so I won't send data } @@ -176,14 +193,12 @@ func (m *BasicStats) Push(acc telegraf.Accumulator) { } } -func parseStats(names []string) *configuredStats { - +// member function for logging. +func (b *BasicStats) parseStats() *configuredStats { parsed := &configuredStats{} - for _, name := range names { - + for _, name := range b.Stats { switch name { - case "count": parsed.count = true case "min": @@ -198,46 +213,38 @@ func parseStats(names []string) *configuredStats { parsed.stdev = true case "sum": parsed.sum = true + case "diff": + parsed.diff = true + case "non_negative_diff": + parsed.non_negative_diff = true default: - log.Printf("W! Unrecognized basic stat '%s', ignoring", name) + b.Log.Warnf("Unrecognized basic stat %q, ignoring", name) } } return parsed } -func defaultStats() *configuredStats { - - defaults := &configuredStats{} - - defaults.count = true - defaults.min = true - defaults.max = true - defaults.mean = true - defaults.variance = true - defaults.stdev = true - defaults.sum = false - - return defaults -} - -func getConfiguredStats(m *BasicStats) *configuredStats { - - if m.statsConfig == nil { - - if m.Stats == nil { - m.statsConfig = defaultStats() - } else { - m.statsConfig = parseStats(m.Stats) +func (b *BasicStats) getConfiguredStats() { + if b.Stats == nil { + b.statsConfig = &configuredStats{ + count: true, + min: true, + max: true, + mean: true, + variance: true, + stdev: true, + sum: false, + non_negative_diff: false, } + } else { + b.statsConfig = b.parseStats() } - - return m.statsConfig } -func (m *BasicStats) Reset() { - m.cache = make(map[uint64]aggregate) +func (b *BasicStats) Reset() { + b.cache = make(map[uint64]aggregate) } func convert(in interface{}) (float64, bool) { @@ -253,6 +260,12 @@ func convert(in interface{}) (float64, bool) { } } +func (b *BasicStats) Init() error { + b.getConfiguredStats() + + return nil +} + func init() { aggregators.Add("basicstats", func() telegraf.Aggregator { return NewBasicStats() diff --git a/plugins/aggregators/basicstats/basicstats_test.go b/plugins/aggregators/basicstats/basicstats_test.go index 5c55284de..c5a093840 100644 --- a/plugins/aggregators/basicstats/basicstats_test.go +++ b/plugins/aggregators/basicstats/basicstats_test.go @@ -17,6 +17,7 @@ var m1, _ = metric.New("m1", "b": int64(1), "c": float64(2), "d": float64(2), + "g": int64(3), }, time.Now(), ) @@ -31,12 +32,15 @@ var m2, _ = metric.New("m1", "f": uint64(200), "ignoreme": "string", "andme": true, + "g": int64(1), }, time.Now(), ) func BenchmarkApply(b *testing.B) { minmax := NewBasicStats() + minmax.Log = testutil.Logger{} + minmax.getConfiguredStats() for n := 0; n < b.N; n++ { minmax.Add(m1) @@ -48,6 +52,8 @@ func BenchmarkApply(b *testing.B) { func TestBasicStatsWithPeriod(t *testing.T) { acc := testutil.Accumulator{} minmax := NewBasicStats() + minmax.Log = testutil.Logger{} + minmax.getConfiguredStats() minmax.Add(m1) minmax.Add(m2) @@ -86,6 +92,12 @@ func TestBasicStatsWithPeriod(t *testing.T) { "f_max": float64(200), "f_min": float64(200), "f_mean": float64(200), + "g_count": float64(2), //g + "g_max": float64(3), + "g_min": float64(1), + "g_mean": float64(2), + "g_s2": float64(2), + "g_stdev": math.Sqrt(2), } expectedTags := map[string]string{ "foo": "bar", @@ -98,6 +110,8 @@ func TestBasicStatsWithPeriod(t *testing.T) { func TestBasicStatsDifferentPeriods(t *testing.T) { acc := testutil.Accumulator{} minmax := NewBasicStats() + minmax.Log = testutil.Logger{} + minmax.getConfiguredStats() minmax.Add(m1) minmax.Push(&acc) @@ -118,6 +132,10 @@ func TestBasicStatsDifferentPeriods(t *testing.T) { "d_max": float64(2), "d_min": float64(2), "d_mean": float64(2), + "g_count": float64(1), //g + "g_max": float64(3), + "g_min": float64(3), + "g_mean": float64(3), } expectedTags := map[string]string{ "foo": "bar", @@ -153,6 +171,10 @@ func TestBasicStatsDifferentPeriods(t *testing.T) { "f_max": float64(200), "f_min": float64(200), "f_mean": float64(200), + "g_count": float64(1), //g + "g_max": float64(1), + "g_min": float64(1), + "g_mean": float64(1), } expectedTags = map[string]string{ "foo": "bar", @@ -165,6 +187,8 @@ func TestBasicStatsWithOnlyCount(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"count"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -179,6 +203,7 @@ func TestBasicStatsWithOnlyCount(t *testing.T) { "d_count": float64(2), "e_count": float64(1), "f_count": float64(1), + "g_count": float64(2), } expectedTags := map[string]string{ "foo": "bar", @@ -191,6 +216,8 @@ func TestBasicStatsWithOnlyMin(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"min"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -205,6 +232,7 @@ func TestBasicStatsWithOnlyMin(t *testing.T) { "d_min": float64(2), "e_min": float64(200), "f_min": float64(200), + "g_min": float64(1), } expectedTags := map[string]string{ "foo": "bar", @@ -217,6 +245,8 @@ func TestBasicStatsWithOnlyMax(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"max"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -231,6 +261,7 @@ func TestBasicStatsWithOnlyMax(t *testing.T) { "d_max": float64(6), "e_max": float64(200), "f_max": float64(200), + "g_max": float64(3), } expectedTags := map[string]string{ "foo": "bar", @@ -243,6 +274,8 @@ func TestBasicStatsWithOnlyMean(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"mean"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -257,6 +290,7 @@ func TestBasicStatsWithOnlyMean(t *testing.T) { "d_mean": float64(4), "e_mean": float64(200), "f_mean": float64(200), + "g_mean": float64(2), } expectedTags := map[string]string{ "foo": "bar", @@ -269,6 +303,8 @@ func TestBasicStatsWithOnlySum(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"sum"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -283,6 +319,7 @@ func TestBasicStatsWithOnlySum(t *testing.T) { "d_sum": float64(8), "e_sum": float64(200), "f_sum": float64(200), + "g_sum": float64(4), } expectedTags := map[string]string{ "foo": "bar", @@ -291,7 +328,7 @@ func TestBasicStatsWithOnlySum(t *testing.T) { } // Verify that sum doesn't suffer from floating point errors. Early -// implementations of sum were calulated from mean and count, which +// implementations of sum were calculated from mean and count, which // e.g. summed "1, 1, 5, 1" as "7.999999..." instead of 8. func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { @@ -326,6 +363,8 @@ func TestBasicStatsWithOnlySumFloatingPointErrata(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"sum"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(sum1) aggregator.Add(sum2) @@ -347,6 +386,8 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"s2"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -359,6 +400,7 @@ func TestBasicStatsWithOnlyVariance(t *testing.T) { "b_s2": float64(2), "c_s2": float64(2), "d_s2": float64(8), + "g_s2": float64(2), } expectedTags := map[string]string{ "foo": "bar", @@ -371,6 +413,8 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"stdev"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -383,6 +427,7 @@ func TestBasicStatsWithOnlyStandardDeviation(t *testing.T) { "b_stdev": math.Sqrt(2), "c_stdev": math.Sqrt(2), "d_stdev": math.Sqrt(8), + "g_stdev": math.Sqrt(2), } expectedTags := map[string]string{ "foo": "bar", @@ -395,6 +440,8 @@ func TestBasicStatsWithMinAndMax(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"min", "max"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -415,6 +462,61 @@ func TestBasicStatsWithMinAndMax(t *testing.T) { "e_min": float64(200), "f_max": float64(200), //f "f_min": float64(200), + "g_max": float64(3), //g + "g_min": float64(1), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +// Test only aggregating diff +func TestBasicStatsWithDiff(t *testing.T) { + + aggregator := NewBasicStats() + aggregator.Stats = []string{"diff"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_diff": float64(0), + "b_diff": float64(2), + "c_diff": float64(2), + "d_diff": float64(4), + "g_diff": float64(-2), + } + expectedTags := map[string]string{ + "foo": "bar", + } + acc.AssertContainsTaggedFields(t, "m1", expectedFields, expectedTags) +} + +// Test only aggregating non_negative_diff +func TestBasicStatsWithNonNegativeDiff(t *testing.T) { + + aggregator := NewBasicStats() + aggregator.Stats = []string{"non_negative_diff"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() + + aggregator.Add(m1) + aggregator.Add(m2) + + acc := testutil.Accumulator{} + aggregator.Push(&acc) + + expectedFields := map[string]interface{}{ + "a_non_negative_diff": float64(0), + "b_non_negative_diff": float64(2), + "c_non_negative_diff": float64(2), + "d_non_negative_diff": float64(4), } expectedTags := map[string]string{ "foo": "bar", @@ -426,7 +528,9 @@ func TestBasicStatsWithMinAndMax(t *testing.T) { func TestBasicStatsWithAllStats(t *testing.T) { acc := testutil.Accumulator{} minmax := NewBasicStats() + minmax.Log = testutil.Logger{} minmax.Stats = []string{"count", "min", "max", "mean", "stdev", "s2", "sum"} + minmax.getConfiguredStats() minmax.Add(m1) minmax.Add(m2) @@ -471,6 +575,13 @@ func TestBasicStatsWithAllStats(t *testing.T) { "f_min": float64(200), "f_mean": float64(200), "f_sum": float64(200), + "g_count": float64(2), //g + "g_max": float64(3), + "g_min": float64(1), + "g_mean": float64(2), + "g_s2": float64(2), + "g_stdev": math.Sqrt(2), + "g_sum": float64(4), } expectedTags := map[string]string{ "foo": "bar", @@ -483,6 +594,8 @@ func TestBasicStatsWithNoStats(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -498,6 +611,8 @@ func TestBasicStatsWithUnknownStat(t *testing.T) { aggregator := NewBasicStats() aggregator.Stats = []string{"crazy"} + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) @@ -509,12 +624,14 @@ func TestBasicStatsWithUnknownStat(t *testing.T) { } // Test that if Stats isn't supplied, then we only do count, min, max, mean, -// stdev, and s2. We purposely exclude sum for backwards compatability, +// stdev, and s2. We purposely exclude sum for backwards compatibility, // otherwise user's working systems will suddenly (and surprisingly) start // capturing sum without their input. func TestBasicStatsWithDefaultStats(t *testing.T) { aggregator := NewBasicStats() + aggregator.Log = testutil.Logger{} + aggregator.getConfiguredStats() aggregator.Add(m1) aggregator.Add(m2) diff --git a/plugins/aggregators/final/README.md b/plugins/aggregators/final/README.md new file mode 100644 index 000000000..444746d78 --- /dev/null +++ b/plugins/aggregators/final/README.md @@ -0,0 +1,48 @@ +# Final Aggregator Plugin + +The final aggregator emits the last metric of a contiguous series. A +contiguous series is defined as a series which receives updates within the +time period in `series_timeout`. The contiguous series may be longer than the +time interval defined by `period`. + +This is useful for getting the final value for data sources that produce +discrete time series such as procstat, cgroup, kubernetes etc. + +When a series has not been updated within the time defined in +`series_timeout`, the last metric is emitted with the `_final` appended. + +### Configuration + +```toml +[[aggregators.final]] + ## The period on which to flush & clear the aggregator. + period = "30s" + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## The time that a series is not updated until considering it final. + series_timeout = "5m" +``` + +### Metrics + +Measurement and tags are unchanged, fields are emitted with the suffix +`_final`. + +### Example Output + +``` +counter,host=bar i_final=3,j_final=6 1554281635115090133 +counter,host=foo i_final=3,j_final=6 1554281635112992012 +``` + +Original input: +``` +counter,host=bar i=1,j=4 1554281633101153300 +counter,host=foo i=1,j=4 1554281633099323601 +counter,host=bar i=2,j=5 1554281634107980073 +counter,host=foo i=2,j=5 1554281634105931116 +counter,host=bar i=3,j=6 1554281635115090133 +counter,host=foo i=3,j=6 1554281635112992012 +``` diff --git a/plugins/aggregators/final/final.go b/plugins/aggregators/final/final.go new file mode 100644 index 000000000..53ad0a47c --- /dev/null +++ b/plugins/aggregators/final/final.go @@ -0,0 +1,72 @@ +package final + +import ( + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +var sampleConfig = ` + ## The period on which to flush & clear the aggregator. + period = "30s" + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = false + + ## The time that a series is not updated until considering it final. + series_timeout = "5m" +` + +type Final struct { + SeriesTimeout internal.Duration `toml:"series_timeout"` + + // The last metric for all series which are active + metricCache map[uint64]telegraf.Metric +} + +func NewFinal() *Final { + return &Final{ + SeriesTimeout: internal.Duration{Duration: 5 * time.Minute}, + metricCache: make(map[uint64]telegraf.Metric), + } +} + +func (m *Final) SampleConfig() string { + return sampleConfig +} + +func (m *Final) Description() string { + return "Report the final metric of a series" +} + +func (m *Final) Add(in telegraf.Metric) { + id := in.HashID() + m.metricCache[id] = in +} + +func (m *Final) Push(acc telegraf.Accumulator) { + // Preserve timestamp of original metric + acc.SetPrecision(time.Nanosecond) + + for id, metric := range m.metricCache { + if time.Since(metric.Time()) > m.SeriesTimeout.Duration { + fields := map[string]interface{}{} + for _, field := range metric.FieldList() { + fields[field.Key+"_final"] = field.Value + } + acc.AddFields(metric.Name(), fields, metric.Tags(), metric.Time()) + delete(m.metricCache, id) + } + } +} + +func (m *Final) Reset() { +} + +func init() { + aggregators.Add("final", func() telegraf.Aggregator { + return NewFinal() + }) +} diff --git a/plugins/aggregators/final/final_test.go b/plugins/aggregators/final/final_test.go new file mode 100644 index 000000000..1b3367fa5 --- /dev/null +++ b/plugins/aggregators/final/final_test.go @@ -0,0 +1,144 @@ +package final + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" +) + +func TestSimple(t *testing.T) { + acc := testutil.Accumulator{} + final := NewFinal() + + tags := map[string]string{"foo": "bar"} + m1, _ := metric.New("m1", + tags, + map[string]interface{}{"a": int64(1)}, + time.Unix(1530939936, 0)) + m2, _ := metric.New("m1", + tags, + map[string]interface{}{"a": int64(2)}, + time.Unix(1530939937, 0)) + m3, _ := metric.New("m1", + tags, + map[string]interface{}{"a": int64(3)}, + time.Unix(1530939938, 0)) + final.Add(m1) + final.Add(m2) + final.Add(m3) + final.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "m1", + tags, + map[string]interface{}{ + "a_final": 3, + }, + time.Unix(1530939938, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestTwoTags(t *testing.T) { + acc := testutil.Accumulator{} + final := NewFinal() + + tags1 := map[string]string{"foo": "bar"} + tags2 := map[string]string{"foo": "baz"} + + m1, _ := metric.New("m1", + tags1, + map[string]interface{}{"a": int64(1)}, + time.Unix(1530939936, 0)) + m2, _ := metric.New("m1", + tags2, + map[string]interface{}{"a": int64(2)}, + time.Unix(1530939937, 0)) + m3, _ := metric.New("m1", + tags1, + map[string]interface{}{"a": int64(3)}, + time.Unix(1530939938, 0)) + final.Add(m1) + final.Add(m2) + final.Add(m3) + final.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "m1", + tags2, + map[string]interface{}{ + "a_final": 2, + }, + time.Unix(1530939937, 0), + ), + testutil.MustMetric( + "m1", + tags1, + map[string]interface{}{ + "a_final": 3, + }, + time.Unix(1530939938, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics()) +} + +func TestLongDifference(t *testing.T) { + acc := testutil.Accumulator{} + final := NewFinal() + final.SeriesTimeout = internal.Duration{Duration: 30 * time.Second} + tags := map[string]string{"foo": "bar"} + + now := time.Now() + + m1, _ := metric.New("m", + tags, + map[string]interface{}{"a": int64(1)}, + now.Add(time.Second*-290)) + m2, _ := metric.New("m", + tags, + map[string]interface{}{"a": int64(2)}, + now.Add(time.Second*-275)) + m3, _ := metric.New("m", + tags, + map[string]interface{}{"a": int64(3)}, + now.Add(time.Second*-100)) + m4, _ := metric.New("m", + tags, + map[string]interface{}{"a": int64(4)}, + now.Add(time.Second*-20)) + final.Add(m1) + final.Add(m2) + final.Push(&acc) + final.Add(m3) + final.Push(&acc) + final.Add(m4) + final.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "m", + tags, + map[string]interface{}{ + "a_final": 2, + }, + now.Add(time.Second*-275), + ), + testutil.MustMetric( + "m", + tags, + map[string]interface{}{ + "a_final": 3, + }, + now.Add(time.Second*-100), + ), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics()) +} diff --git a/plugins/aggregators/histogram/README.md b/plugins/aggregators/histogram/README.md index b4525681e..f0b6c15b1 100644 --- a/plugins/aggregators/histogram/README.md +++ b/plugins/aggregators/histogram/README.md @@ -3,19 +3,21 @@ The histogram aggregator plugin creates histograms containing the counts of field values within a range. -Values added to a bucket are also added to the larger buckets in the -distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg). +If `cumulative` is set to true, values added to a bucket are also added to the +larger buckets in the distribution. This creates a [cumulative histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg). +Otherwise, values are added to only one bucket, which creates an [ordinary histogram](https://en.wikipedia.org/wiki/Histogram#/media/File:Cumulative_vs_normal_histogram.svg) Like other Telegraf aggregators, the metric is emitted every `period` seconds. -Bucket counts however are not reset between periods and will be non-strictly -increasing while Telegraf is running. +By default bucket counts are not reset between periods and will be non-strictly +increasing while Telegraf is running. This behavior can be changed by setting the +`reset` parameter to true. #### Design Each metric is passed to the aggregator and this aggregator searches histogram buckets for those fields, which have been specified in the config. If buckets are found, the aggregator will increment +1 to the appropriate -bucket otherwise it will be added to the `+Inf` bucket. Every `period` +bucket. Otherwise, it will be added to the `+Inf` bucket. Every `period` seconds this data will be forwarded to the outputs. The algorithm of hit counting to buckets was implemented on the base @@ -34,16 +36,24 @@ of the algorithm which is implemented in the Prometheus ## aggregator and will not get sent to the output plugins. drop_original = false + ## If true, the histogram will be reset on flush instead + ## of accumulating the results. + reset = false + + ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. + ## Defaults to true. + cumulative = true + ## Example config that aggregates all fields of the metric. # [[aggregators.histogram.config]] - # ## The set of buckets. + # ## Right borders of buckets (with +Inf implicitly added). # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] # ## The name of metric. # measurement_name = "cpu" ## Example config that aggregates only specific fields of the metric. # [[aggregators.histogram.config]] - # ## The set of buckets. + # ## Right borders of buckets (with +Inf implicitly added). # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] # ## The name of metric. # measurement_name = "diskio" @@ -59,8 +69,9 @@ option. Optionally, if `fields` is set only the fields listed will be aggregated. If `fields` is not set all fields are aggregated. The `buckets` option contains a list of floats which specify the bucket -boundaries. Each float value defines the inclusive upper bound of the bucket. +boundaries. Each float value defines the inclusive upper (right) bound of the bucket. The `+Inf` bucket is added automatically and does not need to be defined. +(For left boundaries, these specified bucket borders and `-Inf` will be used). ### Measurements & Fields: @@ -72,26 +83,43 @@ The postfix `bucket` will be added to each field key. ### Tags: -All measurements are given the tag `le`. This tag has the border value of -bucket. It means that the metric value is less than or equal to the value of -this tag. For example, let assume that we have the metric value 10 and the -following buckets: [5, 10, 30, 70, 100]. Then the tag `le` will have the value -10, because the metrics value is passed into bucket with right border value -`10`. +* `cumulative = true` (default): + * `le`: Right bucket border. It means that the metric value is less than or + equal to the value of this tag. If a metric value is sorted into a bucket, + it is also sorted into all larger buckets. As a result, the value of + `_bucket` is rising with rising `le` value. When `le` is `+Inf`, + the bucket value is the count of all metrics, because all metric values are + less than or equal to positive infinity. +* `cumulative = false`: + * `gt`: Left bucket border. It means that the metric value is greater than + (and not equal to) the value of this tag. + * `le`: Right bucket border. It means that the metric value is less than or + equal to the value of this tag. + * As both `gt` and `le` are present, each metric is sorted in only exactly + one bucket. + ### Example Output: +Let assume we have the buckets [0, 10, 50, 100] and the following field values +for `usage_idle`: [50, 7, 99, 12] + +With `cumulative = true`: + ``` -cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=0i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=20.0 usage_idle_bucket=1i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=30.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=40.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=60.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=70.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=80.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=90.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=2i 1486998330000000000 -cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=2i 1486998330000000000 +cpu,cpu=cpu1,host=localhost,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none +cpu,cpu=cpu1,host=localhost,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7 +cpu,cpu=cpu1,host=localhost,le=50.0 usage_idle_bucket=2i 1486998330000000000 # 7, 12 +cpu,cpu=cpu1,host=localhost,le=100.0 usage_idle_bucket=4i 1486998330000000000 # 7, 12, 50, 99 +cpu,cpu=cpu1,host=localhost,le=+Inf usage_idle_bucket=4i 1486998330000000000 # 7, 12, 50, 99 +``` + +With `cumulative = false`: + +``` +cpu,cpu=cpu1,host=localhost,gt=-Inf,le=0.0 usage_idle_bucket=0i 1486998330000000000 # none +cpu,cpu=cpu1,host=localhost,gt=0.0,le=10.0 usage_idle_bucket=1i 1486998330000000000 # 7 +cpu,cpu=cpu1,host=localhost,gt=10.0,le=50.0 usage_idle_bucket=1i 1486998330000000000 # 12 +cpu,cpu=cpu1,host=localhost,gt=50.0,le=100.0 usage_idle_bucket=2i 1486998330000000000 # 50, 99 +cpu,cpu=cpu1,host=localhost,gt=100.0,le=+Inf usage_idle_bucket=0i 1486998330000000000 # none ``` diff --git a/plugins/aggregators/histogram/histogram.go b/plugins/aggregators/histogram/histogram.go index a60cede3d..dab524d62 100644 --- a/plugins/aggregators/histogram/histogram.go +++ b/plugins/aggregators/histogram/histogram.go @@ -8,15 +8,23 @@ import ( "github.com/influxdata/telegraf/plugins/aggregators" ) -// bucketTag is the tag, which contains right bucket border -const bucketTag = "le" +// bucketRightTag is the tag, which contains right bucket border +const bucketRightTag = "le" -// bucketInf is the right bucket border for infinite values -const bucketInf = "+Inf" +// bucketPosInf is the right bucket border for infinite values +const bucketPosInf = "+Inf" + +// bucketLeftTag is the tag, which contains left bucket border (exclusive) +const bucketLeftTag = "gt" + +// bucketNegInf is the left bucket border for infinite values +const bucketNegInf = "-Inf" // HistogramAggregator is aggregator with histogram configs and particular histograms for defined metrics type HistogramAggregator struct { - Configs []config `toml:"config"` + Configs []config `toml:"config"` + ResetBuckets bool `toml:"reset"` + Cumulative bool `toml:"cumulative"` buckets bucketsByMetrics cache map[uint64]metricHistogramCollection @@ -56,8 +64,10 @@ type groupedByCountFields struct { } // NewHistogramAggregator creates new histogram aggregator -func NewHistogramAggregator() telegraf.Aggregator { - h := &HistogramAggregator{} +func NewHistogramAggregator() *HistogramAggregator { + h := &HistogramAggregator{ + Cumulative: true, + } h.buckets = make(bucketsByMetrics) h.resetCache() @@ -72,16 +82,24 @@ var sampleConfig = ` ## aggregator and will not get sent to the output plugins. drop_original = false + ## If true, the histogram will be reset on flush instead + ## of accumulating the results. + reset = false + + ## Whether bucket values should be accumulated. If set to false, "gt" tag will be added. + ## Defaults to true. + cumulative = true + ## Example config that aggregates all fields of the metric. # [[aggregators.histogram.config]] - # ## The set of buckets. + # ## Right borders of buckets (with +Inf implicitly added). # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] # ## The name of metric. # measurement_name = "cpu" ## Example config that aggregates only specific fields of the metric. # [[aggregators.histogram.config]] - # ## The set of buckets. + # ## Right borders of buckets (with +Inf implicitly added). # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] # ## The name of metric. # measurement_name = "diskio" @@ -162,18 +180,27 @@ func (h *HistogramAggregator) groupFieldsByBuckets( tags map[string]string, counts []int64, ) { - count := int64(0) - for index, bucket := range h.getBuckets(name, field) { - count += counts[index] + sum := int64(0) + buckets := h.getBuckets(name, field) // note that len(buckets) + 1 == len(counts) - tags[bucketTag] = strconv.FormatFloat(bucket, 'f', -1, 64) - h.groupField(metricsWithGroupedFields, name, field, count, copyTags(tags)) + for index, count := range counts { + if !h.Cumulative { + sum = 0 // reset sum -> don't store cumulative counts + + tags[bucketLeftTag] = bucketNegInf + if index > 0 { + tags[bucketLeftTag] = strconv.FormatFloat(buckets[index-1], 'f', -1, 64) + } + } + + tags[bucketRightTag] = bucketPosInf + if index < len(buckets) { + tags[bucketRightTag] = strconv.FormatFloat(buckets[index], 'f', -1, 64) + } + + sum += count + h.groupField(metricsWithGroupedFields, name, field, sum, copyTags(tags)) } - - count += counts[len(counts)-1] - tags[bucketTag] = bucketInf - - h.groupField(metricsWithGroupedFields, name, field, count, tags) } // groupField groups field by count value @@ -201,9 +228,15 @@ func (h *HistogramAggregator) groupField( ) } -// Reset does nothing, because we need to collect counts for a long time, otherwise if config parameter 'reset' has -// small value, we will get a histogram with a small amount of the distribution. -func (h *HistogramAggregator) Reset() {} +// Reset does nothing by default, because we typically need to collect counts for a long time. +// Otherwise if config parameter 'reset' has 'true' value, we will get a histogram +// with a small amount of the distribution. However in some use cases a reset is useful. +func (h *HistogramAggregator) Reset() { + if h.ResetBuckets { + h.resetCache() + h.buckets = make(bucketsByMetrics) + } +} // resetCache resets cached counts(hits) in the buckets func (h *HistogramAggregator) resetCache() { diff --git a/plugins/aggregators/histogram/histogram_test.go b/plugins/aggregators/histogram/histogram_test.go index 8c4a2b9d3..dfb3f5d12 100644 --- a/plugins/aggregators/histogram/histogram_test.go +++ b/plugins/aggregators/histogram/histogram_test.go @@ -11,11 +11,15 @@ import ( "github.com/stretchr/testify/assert" ) +type fields map[string]interface{} +type tags map[string]string + // NewTestHistogram creates new test histogram aggregation with specified config -func NewTestHistogram(cfg []config) telegraf.Aggregator { - htm := &HistogramAggregator{Configs: cfg} - htm.buckets = make(bucketsByMetrics) - htm.resetCache() +func NewTestHistogram(cfg []config, reset bool, cumulative bool) telegraf.Aggregator { + htm := NewHistogramAggregator() + htm.Configs = cfg + htm.ResetBuckets = reset + htm.Cumulative = cumulative return htm } @@ -23,8 +27,8 @@ func NewTestHistogram(cfg []config) telegraf.Aggregator { // firstMetric1 is the first test metric var firstMetric1, _ = metric.New( "first_metric_name", - map[string]string{"tag_name": "tag_value"}, - map[string]interface{}{ + tags{}, + fields{ "a": float64(15.3), "b": float64(40), }, @@ -34,8 +38,8 @@ var firstMetric1, _ = metric.New( // firstMetric1 is the first test metric with other value var firstMetric2, _ = metric.New( "first_metric_name", - map[string]string{"tag_name": "tag_value"}, - map[string]interface{}{ + tags{}, + fields{ "a": float64(15.9), "c": float64(40), }, @@ -45,8 +49,8 @@ var firstMetric2, _ = metric.New( // secondMetric is the second metric var secondMetric, _ = metric.New( "second_metric_name", - map[string]string{"tag_name": "tag_value"}, - map[string]interface{}{ + tags{}, + fields{ "a": float64(105), "ignoreme": "string", "andme": true, @@ -65,35 +69,84 @@ func BenchmarkApply(b *testing.B) { } } -// TestHistogramWithPeriodAndOneField tests metrics for one period and for one field -func TestHistogramWithPeriodAndOneField(t *testing.T) { +// TestHistogram tests metrics for one period and for one field +func TestHistogram(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg) + histogram := NewTestHistogram(cfg, false, true) acc := &testutil.Accumulator{} histogram.Add(firstMetric1) + histogram.Reset() histogram.Add(firstMetric2) histogram.Push(acc) if len(acc.Metrics) != 6 { assert.Fail(t, "Incorrect number of metrics") } - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0)}, "10") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "20") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "30") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, "40") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2)}, bucketInf) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketRightTag: bucketPosInf}) } -// TestHistogramWithPeriodAndAllFields tests two metrics for one period and for all fields -func TestHistogramWithPeriodAndAllFields(t *testing.T) { +// TestHistogramNonCumulative tests metrics for one period and for one field +func TestHistogramNonCumulative(t *testing.T) { + var cfg []config + cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + histogram := NewTestHistogram(cfg, false, false) + + acc := &testutil.Accumulator{} + + histogram.Add(firstMetric1) + histogram.Reset() + histogram.Add(firstMetric2) + histogram.Push(acc) + + if len(acc.Metrics) != 6 { + assert.Fail(t, "Incorrect number of metrics") + } + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2)}, tags{bucketLeftTag: "10", bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "20", bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "30", bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketLeftTag: "40", bucketRightTag: bucketPosInf}) +} + +// TestHistogramWithReset tests metrics for one period and for one field, with reset between metrics adding +func TestHistogramWithReset(t *testing.T) { + var cfg []config + cfg = append(cfg, config{Metric: "first_metric_name", Fields: []string{"a"}, Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) + histogram := NewTestHistogram(cfg, true, true) + + acc := &testutil.Accumulator{} + + histogram.Add(firstMetric1) + histogram.Reset() + histogram.Add(firstMetric2) + histogram.Push(acc) + + if len(acc.Metrics) != 6 { + assert.Fail(t, "Incorrect number of metrics") + } + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf}) +} + +// TestHistogramWithAllFields tests two metrics for one period and for all fields +func TestHistogramWithAllFields(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}}) cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}) - histogram := NewTestHistogram(cfg) + histogram := NewTestHistogram(cfg, false, true) acc := &testutil.Accumulator{} @@ -106,50 +159,83 @@ func TestHistogramWithPeriodAndAllFields(t *testing.T) { assert.Fail(t, "Incorrect number of metrics") } - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, "15.5") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "15.5"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf}) - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "4") - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "10") - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "23") - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, "30") - assertContainsTaggedField(t, acc, "second_metric_name", map[string]interface{}{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, bucketInf) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "4"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "23"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketRightTag: bucketPosInf}) } -// TestHistogramDifferentPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates +// TestHistogramWithAllFieldsNonCumulative tests two metrics for one period and for all fields +func TestHistogramWithAllFieldsNonCumulative(t *testing.T) { + var cfg []config + cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 15.5, 20.0, 30.0, 40.0}}) + cfg = append(cfg, config{Metric: "second_metric_name", Buckets: []float64{0.0, 4.0, 10.0, 23.0, 30.0}}) + histogram := NewTestHistogram(cfg, false, false) + + acc := &testutil.Accumulator{} + + histogram.Add(firstMetric1) + histogram.Add(firstMetric2) + histogram.Add(secondMetric) + histogram.Push(acc) + + if len(acc.Metrics) != 12 { + assert.Fail(t, "Incorrect number of metrics") + } + + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "15.5"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "15.5", bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "20", bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketLeftTag: "30", bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketLeftTag: "40", bucketRightTag: bucketPosInf}) + + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: bucketNegInf, bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "0", bucketRightTag: "4"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "4", bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "10", bucketRightTag: "23"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(0), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "23", bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "second_metric_name", fields{"a_bucket": int64(1), "ignoreme_bucket": int64(0), "andme_bucket": int64(0)}, tags{bucketLeftTag: "30", bucketRightTag: bucketPosInf}) +} + +// TestHistogramWithTwoPeriodsAndAllFields tests two metrics getting added with a push/reset in between (simulates // getting added in different periods) for all fields -func TestHistogramDifferentPeriodsAndAllFields(t *testing.T) { +func TestHistogramWithTwoPeriodsAndAllFields(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 10.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg) + histogram := NewTestHistogram(cfg, false, true) acc := &testutil.Accumulator{} histogram.Add(firstMetric1) histogram.Push(acc) - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0)}, "10") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "20") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(0)}, "30") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, "40") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(1), "b_bucket": int64(1)}, bucketInf) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(0)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(1)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(1), "b_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf}) acc.ClearMetrics() histogram.Add(firstMetric2) histogram.Push(acc) - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "0") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, "10") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "20") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, "30") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, "40") - assertContainsTaggedField(t, acc, "first_metric_name", map[string]interface{}{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, bucketInf) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "0"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(0), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "10"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "20"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(0), "c_bucket": int64(0)}, tags{bucketRightTag: "30"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: "40"}) + assertContainsTaggedField(t, acc, "first_metric_name", fields{"a_bucket": int64(2), "b_bucket": int64(1), "c_bucket": int64(1)}, tags{bucketRightTag: bucketPosInf}) } // TestWrongBucketsOrder tests the calling panic with incorrect order of buckets @@ -166,35 +252,42 @@ func TestWrongBucketsOrder(t *testing.T) { var cfg []config cfg = append(cfg, config{Metric: "first_metric_name", Buckets: []float64{0.0, 90.0, 20.0, 30.0, 40.0}}) - histogram := NewTestHistogram(cfg) + histogram := NewTestHistogram(cfg, false, true) histogram.Add(firstMetric2) } // assertContainsTaggedField is help functions to test histogram data -func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, le string) { +func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricName string, fields map[string]interface{}, tags map[string]string) { acc.Lock() defer acc.Unlock() for _, checkedMetric := range acc.Metrics { - // check metric name + // filter by metric name if checkedMetric.Measurement != metricName { continue } - // check "le" tag - if checkedMetric.Tags[bucketTag] != le { - continue - } - - // check fields - isFieldsIdentical := true - for field := range fields { - if _, ok := checkedMetric.Fields[field]; !ok { - isFieldsIdentical = false + // filter by tags + isTagsIdentical := true + for tag := range tags { + if val, ok := checkedMetric.Tags[tag]; !ok || val != tags[tag] { + isTagsIdentical = false break } } - if !isFieldsIdentical { + if !isTagsIdentical { + continue + } + + // filter by field keys + isFieldKeysIdentical := true + for field := range fields { + if _, ok := checkedMetric.Fields[field]; !ok { + isFieldKeysIdentical = false + break + } + } + if !isFieldKeysIdentical { continue } @@ -203,8 +296,8 @@ func assertContainsTaggedField(t *testing.T, acc *testutil.Accumulator, metricNa return } - assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", fields, metricName)) + assert.Fail(t, fmt.Sprintf("incorrect fields %v of metric %s", checkedMetric.Fields, metricName)) } - assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, map[string]string{"le": le}, fields)) + assert.Fail(t, fmt.Sprintf("unknown measurement '%s' with tags: %v, fields: %v", metricName, tags, fields)) } diff --git a/plugins/aggregators/merge/README.md b/plugins/aggregators/merge/README.md new file mode 100644 index 000000000..89f7f0983 --- /dev/null +++ b/plugins/aggregators/merge/README.md @@ -0,0 +1,25 @@ +# Merge Aggregator + +Merge metrics together into a metric with multiple fields into the most memory +and network transfer efficient form. + +Use this plugin when fields are split over multiple metrics, with the same +measurement, tag set and timestamp. By merging into a single metric they can +be handled more efficiently by the output. + +### Configuration + +```toml +[[aggregators.merge]] + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = true +``` + +### Example + +```diff +- cpu,host=localhost usage_time=42 1567562620000000000 +- cpu,host=localhost idle_time=42 1567562620000000000 ++ cpu,host=localhost idle_time=42,usage_time=42 1567562620000000000 +``` diff --git a/plugins/aggregators/merge/merge.go b/plugins/aggregators/merge/merge.go new file mode 100644 index 000000000..083c8fd3e --- /dev/null +++ b/plugins/aggregators/merge/merge.go @@ -0,0 +1,66 @@ +package seriesgrouper + +import ( + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/aggregators" +) + +const ( + description = "Merge metrics into multifield metrics by series key" + sampleConfig = ` + ## If true, the original metric will be dropped by the + ## aggregator and will not get sent to the output plugins. + drop_original = true +` +) + +type Merge struct { + grouper *metric.SeriesGrouper + log telegraf.Logger +} + +func (a *Merge) Init() error { + a.grouper = metric.NewSeriesGrouper() + return nil +} + +func (a *Merge) Description() string { + return description +} + +func (a *Merge) SampleConfig() string { + return sampleConfig +} + +func (a *Merge) Add(m telegraf.Metric) { + tags := m.Tags() + for _, field := range m.FieldList() { + err := a.grouper.Add(m.Name(), tags, m.Time(), field.Key, field.Value) + if err != nil { + a.log.Errorf("Error adding metric: %v", err) + } + } +} + +func (a *Merge) Push(acc telegraf.Accumulator) { + // Always use nanosecond precision to avoid rounding metrics that were + // produced at a precision higher than the agent default. + acc.SetPrecision(time.Nanosecond) + + for _, m := range a.grouper.Metrics() { + acc.AddMetric(m) + } +} + +func (a *Merge) Reset() { + a.grouper = metric.NewSeriesGrouper() +} + +func init() { + aggregators.Add("merge", func() telegraf.Aggregator { + return &Merge{} + }) +} diff --git a/plugins/aggregators/merge/merge_test.go b/plugins/aggregators/merge/merge_test.go new file mode 100644 index 000000000..2f2703c8f --- /dev/null +++ b/plugins/aggregators/merge/merge_test.go @@ -0,0 +1,186 @@ +package seriesgrouper + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSimple(t *testing.T) { + plugin := &Merge{} + + err := plugin.Init() + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + "time_guest": 42, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestNanosecondPrecision(t *testing.T) { + plugin := &Merge{} + + err := plugin.Init() + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 1), + ), + ) + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 1), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + acc.SetPrecision(time.Second) + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + "time_guest": 42, + }, + time.Unix(0, 1), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestReset(t *testing.T) { + plugin := &Merge{} + + err := plugin.Init() + require.NoError(t, err) + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + var acc testutil.Accumulator + plugin.Push(&acc) + + plugin.Reset() + + plugin.Add( + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + ) + require.NoError(t, err) + + plugin.Push(&acc) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 42, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} diff --git a/plugins/aggregators/valuecounter/README.md b/plugins/aggregators/valuecounter/README.md index 3d132c3bb..ef68e0f4e 100644 --- a/plugins/aggregators/valuecounter/README.md +++ b/plugins/aggregators/valuecounter/README.md @@ -11,8 +11,9 @@ configuration directive. When no `fields` is provided the plugin will not count any fields. The results are emitted in fields in the format: `originalfieldname_fieldvalue = count`. -Valuecounter only works on fields of the type int, bool or string. Float fields -are being dropped to prevent the creating of too many fields. +Counting fields with a high number of potential values may produce significant +amounts of new fields and memory usage, take care to only count fields with a +limited set of values. ### Configuration: diff --git a/plugins/aggregators/valuecounter/valuecounter.go b/plugins/aggregators/valuecounter/valuecounter.go index c43b7723b..a25c9dcaf 100644 --- a/plugins/aggregators/valuecounter/valuecounter.go +++ b/plugins/aggregators/valuecounter/valuecounter.go @@ -2,7 +2,6 @@ package valuecounter import ( "fmt" - "log" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/aggregators" @@ -20,7 +19,7 @@ type ValueCounter struct { Fields []string } -// NewValueCounter create a new aggregation plugin which counts the occurances +// NewValueCounter create a new aggregation plugin which counts the occurrences // of fields and emits the count. func NewValueCounter() telegraf.Aggregator { vc := &ValueCounter{} @@ -46,7 +45,7 @@ func (vc *ValueCounter) SampleConfig() string { // Description returns the description of the ValueCounter plugin func (vc *ValueCounter) Description() string { - return "Count the occurance of values in fields." + return "Count the occurrence of values in fields." } // Add is run on every metric which passes the plugin @@ -68,14 +67,6 @@ func (vc *ValueCounter) Add(in telegraf.Metric) { for fk, fv := range in.Fields() { for _, cf := range vc.Fields { if fk == cf { - // Do not process float types to prevent memory from blowing up - switch fv.(type) { - default: - log.Printf("I! Valuecounter: Unsupported field type. " + - "Must be an int, string or bool. Ignoring.") - continue - case uint64, int64, string, bool: - } fn := fmt.Sprintf("%v_%v", fk, fv) vc.cache[id].fieldCount[fn]++ } diff --git a/plugins/aggregators/valuecounter/valuecounter_test.go b/plugins/aggregators/valuecounter/valuecounter_test.go index 01c68c496..8cec5f366 100644 --- a/plugins/aggregators/valuecounter/valuecounter_test.go +++ b/plugins/aggregators/valuecounter/valuecounter_test.go @@ -22,9 +22,8 @@ func NewTestValueCounter(fields []string) telegraf.Aggregator { var m1, _ = metric.New("m1", map[string]string{"foo": "bar"}, map[string]interface{}{ - "status": 200, - "somefield": 20.1, - "foobar": "bar", + "status": 200, + "foobar": "bar", }, time.Now(), ) diff --git a/plugins/common/kafka/sasl.go b/plugins/common/kafka/sasl.go new file mode 100644 index 000000000..cd3358b38 --- /dev/null +++ b/plugins/common/kafka/sasl.go @@ -0,0 +1,25 @@ +package kafka + +import ( + "errors" + + "github.com/Shopify/sarama" +) + +func SASLVersion(kafkaVersion sarama.KafkaVersion, saslVersion *int) (int16, error) { + if saslVersion == nil { + if kafkaVersion.IsAtLeast(sarama.V1_0_0_0) { + return sarama.SASLHandshakeV1, nil + } + return sarama.SASLHandshakeV0, nil + } + + switch *saslVersion { + case 0: + return sarama.SASLHandshakeV0, nil + case 1: + return sarama.SASLHandshakeV1, nil + default: + return 0, errors.New("invalid SASL version") + } +} diff --git a/plugins/common/logrus/hook.go b/plugins/common/logrus/hook.go new file mode 100644 index 000000000..a7f99023b --- /dev/null +++ b/plugins/common/logrus/hook.go @@ -0,0 +1,35 @@ +package logrus + +import ( + "io/ioutil" + "log" + "strings" + "sync" + + "github.com/sirupsen/logrus" +) + +var once sync.Once + +type LogHook struct { +} + +// Install a logging hook into the logrus standard logger, diverting all logs +// through the Telegraf logger at debug level. This is useful for libraries +// that directly log to the logrus system without providing an override method. +func InstallHook() { + once.Do(func() { + logrus.SetOutput(ioutil.Discard) + logrus.AddHook(&LogHook{}) + }) +} + +func (h *LogHook) Fire(entry *logrus.Entry) error { + msg := strings.ReplaceAll(entry.Message, "\n", " ") + log.Print("D! [logrus] ", msg) + return nil +} + +func (h *LogHook) Levels() []logrus.Level { + return logrus.AllLevels +} diff --git a/plugins/inputs/EXAMPLE_README.md b/plugins/inputs/EXAMPLE_README.md index 4ebedda87..6b86615b0 100644 --- a/plugins/inputs/EXAMPLE_README.md +++ b/plugins/inputs/EXAMPLE_README.md @@ -1,21 +1,28 @@ # Example Input Plugin -The example plugin gathers metrics about example things. This description +The `example` plugin gathers metrics about example things. This description explains at a high level what the plugin does and provides links to where additional information can be found. -### Configuration: +Telegraf minimum version: Telegraf x.x +Plugin minimum tested version: x.x + +### Configuration This section contains the default TOML to configure the plugin. You can generate it using `telegraf --usage `. ```toml -# Description [[inputs.example]] example_option = "example_value" ``` -### Metrics: +#### example_option + +A more in depth description of an option can be provided here, but only do so +if the option cannot be fully described in the sample config. + +### Metrics Here you should add an optional description and links to where the user can get more information about the measurements. @@ -32,16 +39,20 @@ mapped to the output. - field1 (type, unit) - field2 (float, percent) -- measurement2 ++ measurement2 - tags: - tag3 - fields: - field3 (integer, bytes) + - field4 (integer, green=1 yellow=2 red=3) + - field5 (string) + - field6 (float) + - field7 (boolean) -### Sample Queries: +### Sample Queries -This section should contain some useful InfluxDB queries that can be used to -get started with the plugin or to generate dashboards. For each query listed, +This section can contain some useful InfluxDB queries that can be used to get +started with the plugin or to generate dashboards. For each query listed, describe at a high level what data is returned. Get the max, mean, and min for the measurement in the last hour: @@ -49,7 +60,12 @@ Get the max, mean, and min for the measurement in the last hour: SELECT max(field1), mean(field1), min(field1) FROM measurement1 WHERE tag1=bar AND time > now() - 1h GROUP BY tag ``` -### Example Output: +### Troubleshooting + +This optional section can provide basic troubleshooting steps that a user can +perform. + +### Example Output This section shows example output in Line Protocol format. You can often use `telegraf --input-filter --test` or use the `file` output to get diff --git a/plugins/inputs/activemq/README.md b/plugins/inputs/activemq/README.md new file mode 100644 index 000000000..aba5a7f83 --- /dev/null +++ b/plugins/inputs/activemq/README.md @@ -0,0 +1,88 @@ +# ActiveMQ Input Plugin + +This plugin gather queues, topics & subscribers metrics using ActiveMQ Console API. + +### Configuration: + +```toml +# Description +[[inputs.activemq]] + ## ActiveMQ WebConsole URL + url = "http://127.0.0.1:8161" + + ## Required ActiveMQ Endpoint + ## deprecated in 1.11; use the url option + # server = "192.168.50.10" + # port = 8161 + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Required ActiveMQ webadmin root path + # webadmin = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Metrics + +Every effort was made to preserve the names based on the XML response from the ActiveMQ Console API. + +- activemq_queues + - tags: + - name + - source + - port + - fields: + - size + - consumer_count + - enqueue_count + - dequeue_count ++ activemq_topics + - tags: + - name + - source + - port + - fields: + - size + - consumer_count + - enqueue_count + - dequeue_count +- activemq_subscribers + - tags: + - client_id + - subscription_name + - connection_id + - destination_name + - selector + - active + - source + - port + - fields: + - pending_queue_size + - dispatched_queue_size + - dispatched_counter + - enqueue_counter + - dequeue_counter + +### Example Output + +``` +activemq_queues,name=sandra,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000 +activemq_queues,name=Test,host=88284b2fe51b,source=localhost,port=8161 dequeue_count=0i,size=0i,consumer_count=0i,enqueue_count=0i 1492610703000000000 +activemq_topics,name=ActiveMQ.Advisory.MasterBroker\ ,host=88284b2fe51b,source=localhost,port=8161 size=0i,consumer_count=0i,enqueue_count=1i,dequeue_count=0i 1492610703000000000 +activemq_topics,host=88284b2fe51b,name=AAA\,source=localhost,port=8161 size=0i,consumer_count=1i,enqueue_count=0i,dequeue_count=0i 1492610703000000000 +activemq_topics,name=ActiveMQ.Advisory.Topic\,source=localhost,port=8161 ,host=88284b2fe51b enqueue_count=1i,dequeue_count=0i,size=0i,consumer_count=0i 1492610703000000000 +activemq_topics,name=ActiveMQ.Advisory.Queue\,source=localhost,port=8161 ,host=88284b2fe51b size=0i,consumer_count=0i,enqueue_count=2i,dequeue_count=0i 1492610703000000000 +activemq_topics,name=AAAA\ ,host=88284b2fe51b,source=localhost,port=8161 consumer_count=0i,enqueue_count=0i,dequeue_count=0i,size=0i 1492610703000000000 +activemq_subscribers,connection_id=NOTSET,destination_name=AAA,,source=localhost,port=8161,selector=AA,active=no,host=88284b2fe51b,client_id=AAA,subscription_name=AAA pending_queue_size=0i,dispatched_queue_size=0i,dispatched_counter=0i,enqueue_counter=0i,dequeue_counter=0i 1492610703000000000 +``` diff --git a/plugins/inputs/activemq/activemq.go b/plugins/inputs/activemq/activemq.go new file mode 100644 index 000000000..9d08661b7 --- /dev/null +++ b/plugins/inputs/activemq/activemq.go @@ -0,0 +1,311 @@ +package activemq + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type ActiveMQ struct { + Server string `toml:"server"` + Port int `toml:"port"` + URL string `toml:"url"` + Username string `toml:"username"` + Password string `toml:"password"` + Webadmin string `toml:"webadmin"` + ResponseTimeout internal.Duration `toml:"response_timeout"` + tls.ClientConfig + + client *http.Client + baseURL *url.URL +} + +type Topics struct { + XMLName xml.Name `xml:"topics"` + TopicItems []Topic `xml:"topic"` +} + +type Topic struct { + XMLName xml.Name `xml:"topic"` + Name string `xml:"name,attr"` + Stats Stats `xml:"stats"` +} + +type Subscribers struct { + XMLName xml.Name `xml:"subscribers"` + SubscriberItems []Subscriber `xml:"subscriber"` +} + +type Subscriber struct { + XMLName xml.Name `xml:"subscriber"` + ClientId string `xml:"clientId,attr"` + SubscriptionName string `xml:"subscriptionName,attr"` + ConnectionId string `xml:"connectionId,attr"` + DestinationName string `xml:"destinationName,attr"` + Selector string `xml:"selector,attr"` + Active string `xml:"active,attr"` + Stats Stats `xml:"stats"` +} + +type Queues struct { + XMLName xml.Name `xml:"queues"` + QueueItems []Queue `xml:"queue"` +} + +type Queue struct { + XMLName xml.Name `xml:"queue"` + Name string `xml:"name,attr"` + Stats Stats `xml:"stats"` +} + +type Stats struct { + XMLName xml.Name `xml:"stats"` + Size int `xml:"size,attr"` + ConsumerCount int `xml:"consumerCount,attr"` + EnqueueCount int `xml:"enqueueCount,attr"` + DequeueCount int `xml:"dequeueCount,attr"` + PendingQueueSize int `xml:"pendingQueueSize,attr"` + DispatchedQueueSize int `xml:"dispatchedQueueSize,attr"` + DispatchedCounter int `xml:"dispatchedCounter,attr"` + EnqueueCounter int `xml:"enqueueCounter,attr"` + DequeueCounter int `xml:"dequeueCounter,attr"` +} + +var sampleConfig = ` + ## ActiveMQ WebConsole URL + url = "http://127.0.0.1:8161" + + ## Required ActiveMQ Endpoint + ## deprecated in 1.11; use the url option + # server = "127.0.0.1" + # port = 8161 + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Required ActiveMQ webadmin root path + # webadmin = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + ` + +func (a *ActiveMQ) Description() string { + return "Gather ActiveMQ metrics" +} + +func (a *ActiveMQ) SampleConfig() string { + return sampleConfig +} + +func (a *ActiveMQ) createHttpClient() (*http.Client, error) { + tlsCfg, err := a.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: a.ResponseTimeout.Duration, + } + + return client, nil +} + +func (a *ActiveMQ) Init() error { + if a.ResponseTimeout.Duration < time.Second { + a.ResponseTimeout.Duration = time.Second * 5 + } + + var err error + u := &url.URL{Scheme: "http", Host: a.Server + ":" + strconv.Itoa(a.Port)} + if a.URL != "" { + u, err = url.Parse(a.URL) + if err != nil { + return err + } + } + + if !strings.HasPrefix(u.Scheme, "http") { + return fmt.Errorf("invalid scheme %q", u.Scheme) + } + + if u.Hostname() == "" { + return fmt.Errorf("invalid hostname %q", u.Hostname()) + } + + a.baseURL = u + + a.client, err = a.createHttpClient() + if err != nil { + return err + } + return nil +} + +func (a *ActiveMQ) GetMetrics(u string) ([]byte, error) { + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + if a.Username != "" || a.Password != "" { + req.SetBasicAuth(a.Username, a.Password) + } + + resp, err := a.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("GET %s returned status %q", u, resp.Status) + } + + return ioutil.ReadAll(resp.Body) +} + +func (a *ActiveMQ) GatherQueuesMetrics(acc telegraf.Accumulator, queues Queues) { + for _, queue := range queues.QueueItems { + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = strings.TrimSpace(queue.Name) + tags["source"] = a.baseURL.Hostname() + tags["port"] = a.baseURL.Port() + + records["size"] = queue.Stats.Size + records["consumer_count"] = queue.Stats.ConsumerCount + records["enqueue_count"] = queue.Stats.EnqueueCount + records["dequeue_count"] = queue.Stats.DequeueCount + + acc.AddFields("activemq_queues", records, tags) + } +} + +func (a *ActiveMQ) GatherTopicsMetrics(acc telegraf.Accumulator, topics Topics) { + for _, topic := range topics.TopicItems { + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = topic.Name + tags["source"] = a.baseURL.Hostname() + tags["port"] = a.baseURL.Port() + + records["size"] = topic.Stats.Size + records["consumer_count"] = topic.Stats.ConsumerCount + records["enqueue_count"] = topic.Stats.EnqueueCount + records["dequeue_count"] = topic.Stats.DequeueCount + + acc.AddFields("activemq_topics", records, tags) + } +} + +func (a *ActiveMQ) GatherSubscribersMetrics(acc telegraf.Accumulator, subscribers Subscribers) { + for _, subscriber := range subscribers.SubscriberItems { + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["client_id"] = subscriber.ClientId + tags["subscription_name"] = subscriber.SubscriptionName + tags["connection_id"] = subscriber.ConnectionId + tags["destination_name"] = subscriber.DestinationName + tags["selector"] = subscriber.Selector + tags["active"] = subscriber.Active + tags["source"] = a.baseURL.Hostname() + tags["port"] = a.baseURL.Port() + + records["pending_queue_size"] = subscriber.Stats.PendingQueueSize + records["dispatched_queue_size"] = subscriber.Stats.DispatchedQueueSize + records["dispatched_counter"] = subscriber.Stats.DispatchedCounter + records["enqueue_counter"] = subscriber.Stats.EnqueueCounter + records["dequeue_counter"] = subscriber.Stats.DequeueCounter + + acc.AddFields("activemq_subscribers", records, tags) + } +} + +func (a *ActiveMQ) Gather(acc telegraf.Accumulator) error { + dataQueues, err := a.GetMetrics(a.QueuesURL()) + if err != nil { + return err + } + queues := Queues{} + err = xml.Unmarshal(dataQueues, &queues) + if err != nil { + return fmt.Errorf("queues XML unmarshal error: %v", err) + } + + dataTopics, err := a.GetMetrics(a.TopicsURL()) + if err != nil { + return err + } + topics := Topics{} + err = xml.Unmarshal(dataTopics, &topics) + if err != nil { + return fmt.Errorf("topics XML unmarshal error: %v", err) + } + + dataSubscribers, err := a.GetMetrics(a.SubscribersURL()) + if err != nil { + return err + } + subscribers := Subscribers{} + err = xml.Unmarshal(dataSubscribers, &subscribers) + if err != nil { + return fmt.Errorf("subscribers XML unmarshal error: %v", err) + } + + a.GatherQueuesMetrics(acc, queues) + a.GatherTopicsMetrics(acc, topics) + a.GatherSubscribersMetrics(acc, subscribers) + + return nil +} + +func (a *ActiveMQ) QueuesURL() string { + ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/queues.jsp")} + return a.baseURL.ResolveReference(&ref).String() +} + +func (a *ActiveMQ) TopicsURL() string { + ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/topics.jsp")} + return a.baseURL.ResolveReference(&ref).String() +} + +func (a *ActiveMQ) SubscribersURL() string { + ref := url.URL{Path: path.Join("/", a.Webadmin, "/xml/subscribers.jsp")} + return a.baseURL.ResolveReference(&ref).String() +} + +func init() { + inputs.Add("activemq", func() telegraf.Input { + return &ActiveMQ{ + Server: "localhost", + Port: 8161, + Webadmin: "admin", + } + }) +} diff --git a/plugins/inputs/activemq/activemq_test.go b/plugins/inputs/activemq/activemq_test.go new file mode 100644 index 000000000..407a38177 --- /dev/null +++ b/plugins/inputs/activemq/activemq_test.go @@ -0,0 +1,180 @@ +package activemq + +import ( + "encoding/xml" + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGatherQueuesMetrics(t *testing.T) { + + s := ` + + + +queueBrowse/sandra?view=rss&feedType=atom_1.0 +queueBrowse/sandra?view=rss&feedType=rss_2.0 + + + + + +queueBrowse/Test?view=rss&feedType=atom_1.0 +queueBrowse/Test?view=rss&feedType=rss_2.0 + + +` + + queues := Queues{} + + xml.Unmarshal([]byte(s), &queues) + + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = "Test" + tags["source"] = "localhost" + tags["port"] = "8161" + + records["size"] = 0 + records["consumer_count"] = 0 + records["enqueue_count"] = 0 + records["dequeue_count"] = 0 + + var acc testutil.Accumulator + + activeMQ := new(ActiveMQ) + activeMQ.Server = "localhost" + activeMQ.Port = 8161 + activeMQ.Init() + + activeMQ.GatherQueuesMetrics(&acc, queues) + acc.AssertContainsTaggedFields(t, "activemq_queues", records, tags) +} + +func TestGatherTopicsMetrics(t *testing.T) { + + s := ` + + + + + + + + + + + + + + + +` + + topics := Topics{} + + xml.Unmarshal([]byte(s), &topics) + + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = "ActiveMQ.Advisory.MasterBroker " + tags["source"] = "localhost" + tags["port"] = "8161" + + records["size"] = 0 + records["consumer_count"] = 0 + records["enqueue_count"] = 1 + records["dequeue_count"] = 0 + + var acc testutil.Accumulator + + activeMQ := new(ActiveMQ) + activeMQ.Server = "localhost" + activeMQ.Port = 8161 + activeMQ.Init() + + activeMQ.GatherTopicsMetrics(&acc, topics) + acc.AssertContainsTaggedFields(t, "activemq_topics", records, tags) +} + +func TestGatherSubscribersMetrics(t *testing.T) { + + s := ` + + + +` + + subscribers := Subscribers{} + + xml.Unmarshal([]byte(s), &subscribers) + + records := make(map[string]interface{}) + tags := make(map[string]string) + + tags["client_id"] = "AAA" + tags["subscription_name"] = "AAA" + tags["connection_id"] = "NOTSET" + tags["destination_name"] = "AAA" + tags["selector"] = "AA" + tags["active"] = "no" + tags["source"] = "localhost" + tags["port"] = "8161" + + records["pending_queue_size"] = 0 + records["dispatched_queue_size"] = 0 + records["dispatched_counter"] = 0 + records["enqueue_counter"] = 0 + records["dequeue_counter"] = 0 + + var acc testutil.Accumulator + + activeMQ := new(ActiveMQ) + activeMQ.Server = "localhost" + activeMQ.Port = 8161 + activeMQ.Init() + + activeMQ.GatherSubscribersMetrics(&acc, subscribers) + acc.AssertContainsTaggedFields(t, "activemq_subscribers", records, tags) +} + +func TestURLs(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/admin/xml/queues.jsp": + w.WriteHeader(http.StatusOK) + w.Write([]byte("")) + case "/admin/xml/topics.jsp": + w.WriteHeader(http.StatusOK) + w.Write([]byte("")) + case "/admin/xml/subscribers.jsp": + w.WriteHeader(http.StatusOK) + w.Write([]byte("")) + default: + w.WriteHeader(http.StatusNotFound) + t.Fatalf("unexpected path: " + r.URL.Path) + } + }) + + plugin := ActiveMQ{ + URL: "http://" + ts.Listener.Addr().String(), + Webadmin: "admin", + } + err := plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = plugin.Gather(&acc) + require.NoError(t, err) + + require.Len(t, acc.GetTelegrafMetrics(), 0) +} diff --git a/plugins/inputs/aerospike/aerospike.go b/plugins/inputs/aerospike/aerospike.go index 3caee7e7d..d4c4fce85 100644 --- a/plugins/inputs/aerospike/aerospike.go +++ b/plugins/inputs/aerospike/aerospike.go @@ -2,8 +2,6 @@ package aerospike import ( "crypto/tls" - "errors" - "log" "net" "strconv" "strings" @@ -120,12 +118,8 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro return err } for k, v := range stats { - val, err := parseValue(v) - if err == nil { - fields[strings.Replace(k, "-", "_", -1)] = val - } else { - log.Printf("I! skipping aerospike field %v with int64 overflow: %q", k, v) - } + val := parseValue(v) + fields[strings.Replace(k, "-", "_", -1)] = val } acc.AddFields("aerospike_node", fields, tags, time.Now()) @@ -152,12 +146,8 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro if len(parts) < 2 { continue } - val, err := parseValue(parts[1]) - if err == nil { - nFields[strings.Replace(parts[0], "-", "_", -1)] = val - } else { - log.Printf("I! skipping aerospike field %v with int64 overflow: %q", parts[0], parts[1]) - } + val := parseValue(parts[1]) + nFields[strings.Replace(parts[0], "-", "_", -1)] = val } acc.AddFields("aerospike_namespace", nFields, nTags, time.Now()) } @@ -165,16 +155,16 @@ func (a *Aerospike) gatherServer(hostport string, acc telegraf.Accumulator) erro return nil } -func parseValue(v string) (interface{}, error) { +func parseValue(v string) interface{} { if parsed, err := strconv.ParseInt(v, 10, 64); err == nil { - return parsed, nil - } else if _, err := strconv.ParseUint(v, 10, 64); err == nil { - // int64 overflow, yet valid uint64 - return nil, errors.New("Number is too large") + return parsed + } else if parsed, err := strconv.ParseUint(v, 10, 64); err == nil { + return parsed } else if parsed, err := strconv.ParseBool(v); err == nil { - return parsed, nil + return parsed } else { - return v, nil + // leave as string + return v } } diff --git a/plugins/inputs/aerospike/aerospike_test.go b/plugins/inputs/aerospike/aerospike_test.go index 078e148f5..724102195 100644 --- a/plugins/inputs/aerospike/aerospike_test.go +++ b/plugins/inputs/aerospike/aerospike_test.go @@ -52,17 +52,14 @@ func TestAerospikeStatisticsPartialErr(t *testing.T) { func TestAerospikeParseValue(t *testing.T) { // uint64 with value bigger than int64 max - val, err := parseValue("18446744041841121751") - assert.Nil(t, val) - assert.Error(t, err) + val := parseValue("18446744041841121751") + require.Equal(t, uint64(18446744041841121751), val) // int values - val, err = parseValue("42") - assert.NoError(t, err) - assert.Equal(t, val, int64(42), "must be parsed as int") + val = parseValue("42") + require.Equal(t, val, int64(42), "must be parsed as int") // string values - val, err = parseValue("BB977942A2CA502") - assert.NoError(t, err) - assert.Equal(t, val, `BB977942A2CA502`, "must be left as string") + val = parseValue("BB977942A2CA502") + require.Equal(t, val, `BB977942A2CA502`, "must be left as string") } diff --git a/plugins/inputs/all/all.go b/plugins/inputs/all/all.go index 4b52aa1f9..a328f4bf0 100644 --- a/plugins/inputs/all/all.go +++ b/plugins/inputs/all/all.go @@ -3,85 +3,135 @@ package all import ( _ "gitea.statsd.de/dom/telegraf/plugins/inputs/rss" _ "gitea.statsd.de/dom/telegraf/plugins/inputs/twitter" + _ "github.com/influxdata/telegraf/plugins/inputs/activemq" _ "github.com/influxdata/telegraf/plugins/inputs/aerospike" _ "github.com/influxdata/telegraf/plugins/inputs/amqp_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/apache" + _ "github.com/influxdata/telegraf/plugins/inputs/apcupsd" _ "github.com/influxdata/telegraf/plugins/inputs/aurora" + _ "github.com/influxdata/telegraf/plugins/inputs/azure_storage_queue" _ "github.com/influxdata/telegraf/plugins/inputs/bcache" + _ "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" + _ "github.com/influxdata/telegraf/plugins/inputs/bind" _ "github.com/influxdata/telegraf/plugins/inputs/bond" _ "github.com/influxdata/telegraf/plugins/inputs/burrow" _ "github.com/influxdata/telegraf/plugins/inputs/cassandra" _ "github.com/influxdata/telegraf/plugins/inputs/ceph" _ "github.com/influxdata/telegraf/plugins/inputs/cgroup" _ "github.com/influxdata/telegraf/plugins/inputs/chrony" + _ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_gnmi" + _ "github.com/influxdata/telegraf/plugins/inputs/cisco_telemetry_mdt" + _ "github.com/influxdata/telegraf/plugins/inputs/clickhouse" + _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub" + _ "github.com/influxdata/telegraf/plugins/inputs/cloud_pubsub_push" _ "github.com/influxdata/telegraf/plugins/inputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/inputs/conntrack" _ "github.com/influxdata/telegraf/plugins/inputs/consul" _ "github.com/influxdata/telegraf/plugins/inputs/couchbase" _ "github.com/influxdata/telegraf/plugins/inputs/couchdb" + _ "github.com/influxdata/telegraf/plugins/inputs/cpu" _ "github.com/influxdata/telegraf/plugins/inputs/dcos" + _ "github.com/influxdata/telegraf/plugins/inputs/disk" + _ "github.com/influxdata/telegraf/plugins/inputs/diskio" _ "github.com/influxdata/telegraf/plugins/inputs/disque" _ "github.com/influxdata/telegraf/plugins/inputs/dmcache" _ "github.com/influxdata/telegraf/plugins/inputs/dns_query" _ "github.com/influxdata/telegraf/plugins/inputs/docker" + _ "github.com/influxdata/telegraf/plugins/inputs/docker_log" _ "github.com/influxdata/telegraf/plugins/inputs/dovecot" + _ "github.com/influxdata/telegraf/plugins/inputs/ecs" _ "github.com/influxdata/telegraf/plugins/inputs/elasticsearch" + _ "github.com/influxdata/telegraf/plugins/inputs/ethtool" + _ "github.com/influxdata/telegraf/plugins/inputs/eventhub_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/exec" + _ "github.com/influxdata/telegraf/plugins/inputs/execd" _ "github.com/influxdata/telegraf/plugins/inputs/fail2ban" _ "github.com/influxdata/telegraf/plugins/inputs/fibaro" + _ "github.com/influxdata/telegraf/plugins/inputs/file" + _ "github.com/influxdata/telegraf/plugins/inputs/filecount" _ "github.com/influxdata/telegraf/plugins/inputs/filestat" + _ "github.com/influxdata/telegraf/plugins/inputs/fireboard" _ "github.com/influxdata/telegraf/plugins/inputs/fluentd" + _ "github.com/influxdata/telegraf/plugins/inputs/github" _ "github.com/influxdata/telegraf/plugins/inputs/graylog" _ "github.com/influxdata/telegraf/plugins/inputs/haproxy" _ "github.com/influxdata/telegraf/plugins/inputs/hddtemp" _ "github.com/influxdata/telegraf/plugins/inputs/http" - _ "github.com/influxdata/telegraf/plugins/inputs/http_listener" + _ "github.com/influxdata/telegraf/plugins/inputs/http_listener_v2" _ "github.com/influxdata/telegraf/plugins/inputs/http_response" _ "github.com/influxdata/telegraf/plugins/inputs/httpjson" + _ "github.com/influxdata/telegraf/plugins/inputs/icinga2" + _ "github.com/influxdata/telegraf/plugins/inputs/infiniband" _ "github.com/influxdata/telegraf/plugins/inputs/influxdb" + _ "github.com/influxdata/telegraf/plugins/inputs/influxdb_listener" _ "github.com/influxdata/telegraf/plugins/inputs/internal" _ "github.com/influxdata/telegraf/plugins/inputs/interrupts" _ "github.com/influxdata/telegraf/plugins/inputs/ipmi_sensor" _ "github.com/influxdata/telegraf/plugins/inputs/ipset" _ "github.com/influxdata/telegraf/plugins/inputs/iptables" + _ "github.com/influxdata/telegraf/plugins/inputs/ipvs" + _ "github.com/influxdata/telegraf/plugins/inputs/jenkins" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia" _ "github.com/influxdata/telegraf/plugins/inputs/jolokia2" _ "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry" _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/kafka_consumer_legacy" _ "github.com/influxdata/telegraf/plugins/inputs/kapacitor" + _ "github.com/influxdata/telegraf/plugins/inputs/kernel" + _ "github.com/influxdata/telegraf/plugins/inputs/kernel_vmstat" + _ "github.com/influxdata/telegraf/plugins/inputs/kibana" + _ "github.com/influxdata/telegraf/plugins/inputs/kinesis_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/kube_inventory" _ "github.com/influxdata/telegraf/plugins/inputs/kubernetes" + _ "github.com/influxdata/telegraf/plugins/inputs/lanz" _ "github.com/influxdata/telegraf/plugins/inputs/leofs" + _ "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" _ "github.com/influxdata/telegraf/plugins/inputs/logparser" + _ "github.com/influxdata/telegraf/plugins/inputs/logstash" _ "github.com/influxdata/telegraf/plugins/inputs/lustre2" _ "github.com/influxdata/telegraf/plugins/inputs/mailchimp" + _ "github.com/influxdata/telegraf/plugins/inputs/marklogic" _ "github.com/influxdata/telegraf/plugins/inputs/mcrouter" + _ "github.com/influxdata/telegraf/plugins/inputs/mem" _ "github.com/influxdata/telegraf/plugins/inputs/memcached" _ "github.com/influxdata/telegraf/plugins/inputs/mesos" _ "github.com/influxdata/telegraf/plugins/inputs/minecraft" + _ "github.com/influxdata/telegraf/plugins/inputs/modbus" _ "github.com/influxdata/telegraf/plugins/inputs/mongodb" + _ "github.com/influxdata/telegraf/plugins/inputs/monit" _ "github.com/influxdata/telegraf/plugins/inputs/mqtt_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/multifile" _ "github.com/influxdata/telegraf/plugins/inputs/mysql" _ "github.com/influxdata/telegraf/plugins/inputs/nats" _ "github.com/influxdata/telegraf/plugins/inputs/nats_consumer" + _ "github.com/influxdata/telegraf/plugins/inputs/neptune_apex" + _ "github.com/influxdata/telegraf/plugins/inputs/net" _ "github.com/influxdata/telegraf/plugins/inputs/net_response" _ "github.com/influxdata/telegraf/plugins/inputs/nginx" _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus" + _ "github.com/influxdata/telegraf/plugins/inputs/nginx_plus_api" + _ "github.com/influxdata/telegraf/plugins/inputs/nginx_upstream_check" + _ "github.com/influxdata/telegraf/plugins/inputs/nginx_vts" _ "github.com/influxdata/telegraf/plugins/inputs/nsq" _ "github.com/influxdata/telegraf/plugins/inputs/nsq_consumer" _ "github.com/influxdata/telegraf/plugins/inputs/nstat" _ "github.com/influxdata/telegraf/plugins/inputs/ntpq" _ "github.com/influxdata/telegraf/plugins/inputs/nvidia_smi" _ "github.com/influxdata/telegraf/plugins/inputs/openldap" + _ "github.com/influxdata/telegraf/plugins/inputs/openntpd" _ "github.com/influxdata/telegraf/plugins/inputs/opensmtpd" + _ "github.com/influxdata/telegraf/plugins/inputs/openweathermap" _ "github.com/influxdata/telegraf/plugins/inputs/passenger" _ "github.com/influxdata/telegraf/plugins/inputs/pf" + _ "github.com/influxdata/telegraf/plugins/inputs/pgbouncer" _ "github.com/influxdata/telegraf/plugins/inputs/phpfpm" _ "github.com/influxdata/telegraf/plugins/inputs/ping" _ "github.com/influxdata/telegraf/plugins/inputs/postfix" _ "github.com/influxdata/telegraf/plugins/inputs/postgresql" _ "github.com/influxdata/telegraf/plugins/inputs/postgresql_extensible" _ "github.com/influxdata/telegraf/plugins/inputs/powerdns" + _ "github.com/influxdata/telegraf/plugins/inputs/powerdns_recursor" + _ "github.com/influxdata/telegraf/plugins/inputs/processes" _ "github.com/influxdata/telegraf/plugins/inputs/procstat" _ "github.com/influxdata/telegraf/plugins/inputs/prometheus" _ "github.com/influxdata/telegraf/plugins/inputs/puppetagent" @@ -92,29 +142,42 @@ import ( _ "github.com/influxdata/telegraf/plugins/inputs/riak" _ "github.com/influxdata/telegraf/plugins/inputs/salesforce" _ "github.com/influxdata/telegraf/plugins/inputs/sensors" + _ "github.com/influxdata/telegraf/plugins/inputs/sflow" _ "github.com/influxdata/telegraf/plugins/inputs/smart" _ "github.com/influxdata/telegraf/plugins/inputs/snmp" _ "github.com/influxdata/telegraf/plugins/inputs/snmp_legacy" + _ "github.com/influxdata/telegraf/plugins/inputs/snmp_trap" _ "github.com/influxdata/telegraf/plugins/inputs/socket_listener" _ "github.com/influxdata/telegraf/plugins/inputs/solr" _ "github.com/influxdata/telegraf/plugins/inputs/sqlserver" + _ "github.com/influxdata/telegraf/plugins/inputs/stackdriver" _ "github.com/influxdata/telegraf/plugins/inputs/statsd" + _ "github.com/influxdata/telegraf/plugins/inputs/suricata" + _ "github.com/influxdata/telegraf/plugins/inputs/swap" + _ "github.com/influxdata/telegraf/plugins/inputs/synproxy" _ "github.com/influxdata/telegraf/plugins/inputs/syslog" _ "github.com/influxdata/telegraf/plugins/inputs/sysstat" _ "github.com/influxdata/telegraf/plugins/inputs/system" + _ "github.com/influxdata/telegraf/plugins/inputs/systemd_units" _ "github.com/influxdata/telegraf/plugins/inputs/tail" _ "github.com/influxdata/telegraf/plugins/inputs/tcp_listener" _ "github.com/influxdata/telegraf/plugins/inputs/teamspeak" + _ "github.com/influxdata/telegraf/plugins/inputs/temp" _ "github.com/influxdata/telegraf/plugins/inputs/tengine" _ "github.com/influxdata/telegraf/plugins/inputs/tomcat" _ "github.com/influxdata/telegraf/plugins/inputs/trig" _ "github.com/influxdata/telegraf/plugins/inputs/twemproxy" _ "github.com/influxdata/telegraf/plugins/inputs/udp_listener" _ "github.com/influxdata/telegraf/plugins/inputs/unbound" + _ "github.com/influxdata/telegraf/plugins/inputs/uwsgi" _ "github.com/influxdata/telegraf/plugins/inputs/varnish" + _ "github.com/influxdata/telegraf/plugins/inputs/vsphere" _ "github.com/influxdata/telegraf/plugins/inputs/webhooks" _ "github.com/influxdata/telegraf/plugins/inputs/win_perf_counters" _ "github.com/influxdata/telegraf/plugins/inputs/win_services" + _ "github.com/influxdata/telegraf/plugins/inputs/wireguard" + _ "github.com/influxdata/telegraf/plugins/inputs/wireless" + _ "github.com/influxdata/telegraf/plugins/inputs/x509_cert" _ "github.com/influxdata/telegraf/plugins/inputs/zfs" _ "github.com/influxdata/telegraf/plugins/inputs/zipkin" _ "github.com/influxdata/telegraf/plugins/inputs/zookeeper" diff --git a/plugins/inputs/amqp_consumer/README.md b/plugins/inputs/amqp_consumer/README.md index bc42f9107..8ef6d6fe2 100644 --- a/plugins/inputs/amqp_consumer/README.md +++ b/plugins/inputs/amqp_consumer/README.md @@ -1,6 +1,6 @@ # AMQP Consumer Input Plugin -This plugin provides a consumer for use with AMQP 0-9-1, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). +This plugin provides a consumer for use with AMQP 0-9-1, a prominent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). Metrics are read from a topic exchange using the configured queue and binding_key. @@ -13,7 +13,6 @@ For an introduction to AMQP see: The following defaults are known to work with RabbitMQ: ```toml -# AMQP consumer plugin [[inputs.amqp_consumer]] ## Broker to consume from. ## deprecated in 1.7; use the brokers option @@ -28,7 +27,7 @@ The following defaults are known to work with RabbitMQ: # username = "" # password = "" - ## Exchange to declare and consume from. + ## Name of the exchange to declare. If unset, no exchange will be declared. exchange = "telegraf" ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". @@ -42,16 +41,34 @@ The following defaults are known to work with RabbitMQ: ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## AMQP queue name queue = "telegraf" - ## Binding Key + + ## AMQP queue durability can be "transient" or "durable". + queue_durability = "durable" + + ## If true, queue will be passively declared. + # queue_passive = false + + ## A binding between the exchange and queue using this binding key is + ## created. If unset, no binding is created. binding_key = "#" ## Maximum number of messages server should give to the worker. # prefetch_count = 50 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Auth method. PLAIN and EXTERNAL are supported ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as ## described here: https://www.rabbitmq.com/plugins.html @@ -64,6 +81,10 @@ The following defaults are known to work with RabbitMQ: ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + # content_encoding = "identity" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/inputs/amqp_consumer/amqp_consumer.go b/plugins/inputs/amqp_consumer/amqp_consumer.go index 739ed76e4..f3ee235e7 100644 --- a/plugins/inputs/amqp_consumer/amqp_consumer.go +++ b/plugins/inputs/amqp_consumer/amqp_consumer.go @@ -1,36 +1,47 @@ package amqp_consumer import ( + "context" "errors" "fmt" - "log" "math/rand" "strings" "sync" "time" - "github.com/streadway/amqp" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/streadway/amqp" ) +const ( + defaultMaxUndeliveredMessages = 1000 +) + +type empty struct{} +type semaphore chan empty + // AMQPConsumer is the top level struct for this plugin type AMQPConsumer struct { - URL string `toml:"url"` // deprecated in 1.7; use brokers - Brokers []string `toml:"brokers"` - Username string `toml:"username"` - Password string `toml:"password"` - Exchange string `toml:"exchange"` - ExchangeType string `toml:"exchange_type"` - ExchangeDurability string `toml:"exchange_durability"` - ExchangePassive bool `toml:"exchange_passive"` - ExchangeArguments map[string]string `toml:"exchange_arguments"` + URL string `toml:"url"` // deprecated in 1.7; use brokers + Brokers []string `toml:"brokers"` + Username string `toml:"username"` + Password string `toml:"password"` + Exchange string `toml:"exchange"` + ExchangeType string `toml:"exchange_type"` + ExchangeDurability string `toml:"exchange_durability"` + ExchangePassive bool `toml:"exchange_passive"` + ExchangeArguments map[string]string `toml:"exchange_arguments"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` // Queue Name - Queue string + Queue string `toml:"queue"` + QueueDurability string `toml:"queue_durability"` + QueuePassive bool `toml:"queue_passive"` + // Binding Key BindingKey string `toml:"binding_key"` @@ -42,9 +53,16 @@ type AMQPConsumer struct { AuthMethod string tls.ClientConfig - parser parsers.Parser - conn *amqp.Connection - wg *sync.WaitGroup + ContentEncoding string `toml:"content_encoding"` + Log telegraf.Logger + + deliveries map[telegraf.TrackingID]amqp.Delivery + + parser parsers.Parser + conn *amqp.Connection + wg *sync.WaitGroup + cancel context.CancelFunc + decoder internal.ContentDecoder } type externalAuth struct{} @@ -64,6 +82,8 @@ const ( DefaultExchangeType = "topic" DefaultExchangeDurability = "durable" + DefaultQueueDurability = "durable" + DefaultPrefetchCount = 50 ) @@ -82,7 +102,7 @@ func (a *AMQPConsumer) SampleConfig() string { # username = "" # password = "" - ## Exchange to declare and consume from. + ## Name of the exchange to declare. If unset, no exchange will be declared. exchange = "telegraf" ## Exchange type; common types are "direct", "fanout", "topic", "header", "x-consistent-hash". @@ -96,17 +116,34 @@ func (a *AMQPConsumer) SampleConfig() string { ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} - ## AMQP queue name + ## AMQP queue name. queue = "telegraf" - ## Binding Key + ## AMQP queue durability can be "transient" or "durable". + queue_durability = "durable" + + ## If true, queue will be passively declared. + # queue_passive = false + + ## A binding between the exchange and queue using this binding key is + ## created. If unset, no binding is created. binding_key = "#" ## Maximum number of messages server should give to the worker. # prefetch_count = 50 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Auth method. PLAIN and EXTERNAL are supported ## Using EXTERNAL requires enabling the rabbitmq_auth_mechanism_ssl plugin as ## described here: https://www.rabbitmq.com/plugins.html @@ -119,6 +156,10 @@ func (a *AMQPConsumer) SampleConfig() string { ## Use TLS but skip chain & host verification # insecure_skip_verify = false + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + # content_encoding = "identity" + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -173,14 +214,25 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error { return err } + a.decoder, err = internal.NewContentDecoder(a.ContentEncoding) + if err != nil { + return err + } + msgs, err := a.connect(amqpConf) if err != nil { return err } + ctx, cancel := context.WithCancel(context.Background()) + a.cancel = cancel + a.wg = &sync.WaitGroup{} a.wg.Add(1) - go a.process(msgs, acc) + go func() { + defer a.wg.Done() + a.process(ctx, msgs, acc) + }() go func() { for { @@ -189,17 +241,20 @@ func (a *AMQPConsumer) Start(acc telegraf.Accumulator) error { break } - log.Printf("I! AMQP consumer connection closed: %s; trying to reconnect", err) + a.Log.Infof("Connection closed: %s; trying to reconnect", err) for { msgs, err := a.connect(amqpConf) if err != nil { - log.Printf("E! AMQP connection failed: %s", err) + a.Log.Errorf("AMQP connection failed: %s", err) time.Sleep(10 * time.Second) continue } a.wg.Add(1) - go a.process(msgs, acc) + go func() { + defer a.wg.Done() + a.process(ctx, msgs, acc) + }() break } } @@ -217,14 +272,14 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err p := rand.Perm(len(brokers)) for _, n := range p { broker := brokers[n] - log.Printf("D! [amqp_consumer] connecting to %q", broker) + a.Log.Debugf("Connecting to %q", broker) conn, err := amqp.DialConfig(broker, *amqpConf) if err == nil { a.conn = conn - log.Printf("D! [amqp_consumer] connected to %q", broker) + a.Log.Debugf("Connected to %q", broker) break } - log.Printf("D! [amqp_consumer] error connecting to %q", broker) + a.Log.Debugf("Error connecting to %q", broker) } if a.conn == nil { @@ -233,54 +288,55 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err ch, err := a.conn.Channel() if err != nil { - return nil, fmt.Errorf("Failed to open a channel: %s", err) + return nil, fmt.Errorf("Failed to open a channel: %s", err.Error()) } - var exchangeDurable = true - switch a.ExchangeDurability { - case "transient": - exchangeDurable = false - default: - exchangeDurable = true + if a.Exchange != "" { + var exchangeDurable = true + switch a.ExchangeDurability { + case "transient": + exchangeDurable = false + default: + exchangeDurable = true + } + + exchangeArgs := make(amqp.Table, len(a.ExchangeArguments)) + for k, v := range a.ExchangeArguments { + exchangeArgs[k] = v + } + + err = declareExchange( + ch, + a.Exchange, + a.ExchangeType, + a.ExchangePassive, + exchangeDurable, + exchangeArgs) + if err != nil { + return nil, err + } } - exchangeArgs := make(amqp.Table, len(a.ExchangeArguments)) - for k, v := range a.ExchangeArguments { - exchangeArgs[k] = v - } - - err = declareExchange( + q, err := declareQueue( ch, - a.Exchange, - a.ExchangeType, - a.ExchangePassive, - exchangeDurable, - exchangeArgs) + a.Queue, + a.QueueDurability, + a.QueuePassive) if err != nil { return nil, err } - q, err := ch.QueueDeclare( - a.Queue, // queue - true, // durable - false, // delete when unused - false, // exclusive - false, // no-wait - nil, // arguments - ) - if err != nil { - return nil, fmt.Errorf("Failed to declare a queue: %s", err) - } - - err = ch.QueueBind( - q.Name, // queue - a.BindingKey, // binding-key - a.Exchange, // exchange - false, - nil, - ) - if err != nil { - return nil, fmt.Errorf("Failed to bind a queue: %s", err) + if a.BindingKey != "" { + err = ch.QueueBind( + q.Name, // queue + a.BindingKey, // binding-key + a.Exchange, // exchange + false, + nil, + ) + if err != nil { + return nil, fmt.Errorf("Failed to bind a queue: %s", err) + } } err = ch.Qos( @@ -305,7 +361,6 @@ func (a *AMQPConsumer) connect(amqpConf *amqp.Config) (<-chan amqp.Delivery, err return nil, fmt.Errorf("Failed establishing connection to queue: %s", err) } - log.Println("I! Started AMQP consumer") return msgs, err } @@ -340,47 +395,164 @@ func declareExchange( ) } if err != nil { - return fmt.Errorf("error declaring exchange: %v", err) + return fmt.Errorf("Error declaring exchange: %v", err) } return nil } +func declareQueue( + channel *amqp.Channel, + queueName string, + queueDurability string, + queuePassive bool, +) (*amqp.Queue, error) { + var queue amqp.Queue + var err error + + var queueDurable = true + switch queueDurability { + case "transient": + queueDurable = false + default: + queueDurable = true + } + + if queuePassive { + queue, err = channel.QueueDeclarePassive( + queueName, // queue + queueDurable, // durable + false, // delete when unused + false, // exclusive + false, // no-wait + nil, // arguments + ) + } else { + queue, err = channel.QueueDeclare( + queueName, // queue + queueDurable, // durable + false, // delete when unused + false, // exclusive + false, // no-wait + nil, // arguments + ) + } + if err != nil { + return nil, fmt.Errorf("Error declaring queue: %v", err) + } + return &queue, nil +} + // Read messages from queue and add them to the Accumulator -func (a *AMQPConsumer) process(msgs <-chan amqp.Delivery, acc telegraf.Accumulator) { - defer a.wg.Done() - for d := range msgs { - metrics, err := a.parser.Parse(d.Body) - if err != nil { - log.Printf("E! %v: error parsing metric - %v", err, string(d.Body)) - } else { - for _, m := range metrics { - acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) +func (a *AMQPConsumer) process(ctx context.Context, msgs <-chan amqp.Delivery, ac telegraf.Accumulator) { + a.deliveries = make(map[telegraf.TrackingID]amqp.Delivery) + + acc := ac.WithTracking(a.MaxUndeliveredMessages) + sem := make(semaphore, a.MaxUndeliveredMessages) + + for { + select { + case <-ctx.Done(): + return + case track := <-acc.Delivered(): + if a.onDelivery(track) { + <-sem + } + case sem <- empty{}: + select { + case <-ctx.Done(): + return + case track := <-acc.Delivered(): + if a.onDelivery(track) { + <-sem + <-sem + } + case d, ok := <-msgs: + if !ok { + return + } + err := a.onMessage(acc, d) + if err != nil { + acc.AddError(err) + <-sem + } } } - - d.Ack(false) } - log.Printf("I! AMQP consumer queue closed") +} + +func (a *AMQPConsumer) onMessage(acc telegraf.TrackingAccumulator, d amqp.Delivery) error { + onError := func() { + // Discard the message from the queue; will never be able to process + // this message. + rejErr := d.Ack(false) + if rejErr != nil { + a.Log.Errorf("Unable to reject message: %d: %v", d.DeliveryTag, rejErr) + a.conn.Close() + } + } + + body, err := a.decoder.Decode(d.Body) + if err != nil { + onError() + return err + } + + metrics, err := a.parser.Parse(body) + if err != nil { + onError() + return err + } + + id := acc.AddTrackingMetricGroup(metrics) + a.deliveries[id] = d + return nil +} + +func (a *AMQPConsumer) onDelivery(track telegraf.DeliveryInfo) bool { + delivery, ok := a.deliveries[track.ID()] + if !ok { + // Added by a previous connection + return false + } + + if track.Delivered() { + err := delivery.Ack(false) + if err != nil { + a.Log.Errorf("Unable to ack written delivery: %d: %v", delivery.DeliveryTag, err) + a.conn.Close() + } + } else { + err := delivery.Reject(false) + if err != nil { + a.Log.Errorf("Unable to reject failed delivery: %d: %v", delivery.DeliveryTag, err) + a.conn.Close() + } + } + + delete(a.deliveries, track.ID()) + return true } func (a *AMQPConsumer) Stop() { + a.cancel() + a.wg.Wait() err := a.conn.Close() if err != nil && err != amqp.ErrClosed { - log.Printf("E! Error closing AMQP connection: %s", err) + a.Log.Errorf("Error closing AMQP connection: %s", err) return } - a.wg.Wait() - log.Println("I! Stopped AMQP service") } func init() { inputs.Add("amqp_consumer", func() telegraf.Input { return &AMQPConsumer{ - URL: DefaultBroker, - AuthMethod: DefaultAuthMethod, - ExchangeType: DefaultExchangeType, - ExchangeDurability: DefaultExchangeDurability, - PrefetchCount: DefaultPrefetchCount, + URL: DefaultBroker, + AuthMethod: DefaultAuthMethod, + ExchangeType: DefaultExchangeType, + ExchangeDurability: DefaultExchangeDurability, + QueueDurability: DefaultQueueDurability, + PrefetchCount: DefaultPrefetchCount, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, } }) } diff --git a/plugins/inputs/apcupsd/README.md b/plugins/inputs/apcupsd/README.md new file mode 100644 index 000000000..97526d7ec --- /dev/null +++ b/plugins/inputs/apcupsd/README.md @@ -0,0 +1,54 @@ +# APCUPSD Input Plugin + +This plugin reads data from an apcupsd daemon over its NIS network protocol. + +### Requirements + +apcupsd should be installed and it's daemon should be running. + +### Configuration + +```toml +[[inputs.apcupsd]] + # A list of running apcupsd server to connect to. + # If not provided will default to tcp://127.0.0.1:3551 + servers = ["tcp://127.0.0.1:3551"] + + ## Timeout for dialing server. + timeout = "5s" +``` + +### Metrics + +- apcupsd + - tags: + - serial + - status (string representing the set status_flags) + - ups_name + - model + - fields: + - status_flags ([status-bits][]) + - input_voltage + - load_percent + - battery_charge_percent + - time_left_ns + - output_voltage + - internal_temp + - battery_voltage + - input_frequency + - time_on_battery_ns + - battery_date + - nominal_input_voltage + - nominal_battery_voltage + - nominal_power + - firmware + + + +### Example output + +``` +apcupsd,serial=AS1231515,status=ONLINE,ups_name=name1 time_on_battery=0,load_percent=9.7,time_left_minutes=98,output_voltage=230.4,internal_temp=32.4,battery_voltage=27.4,input_frequency=50.2,input_voltage=230.4,battery_charge_percent=100,status_flags=8i 1490035922000000000 +``` + +[status-bits]: http://www.apcupsd.org/manual/manual.html#status-bits diff --git a/plugins/inputs/apcupsd/apcupsd.go b/plugins/inputs/apcupsd/apcupsd.go new file mode 100644 index 000000000..a862bbfc8 --- /dev/null +++ b/plugins/inputs/apcupsd/apcupsd.go @@ -0,0 +1,114 @@ +package apcupsd + +import ( + "context" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/mdlayher/apcupsd" +) + +const defaultAddress = "tcp://127.0.0.1:3551" + +var defaultTimeout = internal.Duration{Duration: time.Duration(time.Second * 5)} + +type ApcUpsd struct { + Servers []string + Timeout internal.Duration +} + +func (*ApcUpsd) Description() string { + return "Monitor APC UPSes connected to apcupsd" +} + +var sampleConfig = ` + # A list of running apcupsd server to connect to. + # If not provided will default to tcp://127.0.0.1:3551 + servers = ["tcp://127.0.0.1:3551"] + + ## Timeout for dialing server. + timeout = "5s" +` + +func (*ApcUpsd) SampleConfig() string { + return sampleConfig +} + +func (h *ApcUpsd) Gather(acc telegraf.Accumulator) error { + ctx := context.Background() + + for _, addr := range h.Servers { + addrBits, err := url.Parse(addr) + if err != nil { + return err + } + if addrBits.Scheme == "" { + addrBits.Scheme = "tcp" + } + + ctx, cancel := context.WithTimeout(ctx, h.Timeout.Duration) + defer cancel() + + status, err := fetchStatus(ctx, addrBits) + if err != nil { + return err + } + + tags := map[string]string{ + "serial": status.SerialNumber, + "ups_name": status.UPSName, + "status": status.Status, + "model": status.Model, + } + + flags, err := strconv.ParseUint(strings.Fields(status.StatusFlags)[0], 0, 64) + if err != nil { + return err + } + + fields := map[string]interface{}{ + "status_flags": flags, + "input_voltage": status.LineVoltage, + "load_percent": status.LoadPercent, + "battery_charge_percent": status.BatteryChargePercent, + "time_left_ns": status.TimeLeft.Nanoseconds(), + "output_voltage": status.OutputVoltage, + "internal_temp": status.InternalTemp, + "battery_voltage": status.BatteryVoltage, + "input_frequency": status.LineFrequency, + "time_on_battery_ns": status.TimeOnBattery.Nanoseconds(), + "nominal_input_voltage": status.NominalInputVoltage, + "nominal_battery_voltage": status.NominalBatteryVoltage, + "nominal_power": status.NominalPower, + "firmware": status.Firmware, + "battery_date": status.BatteryDate, + } + + acc.AddFields("apcupsd", fields, tags) + } + return nil +} + +func fetchStatus(ctx context.Context, addr *url.URL) (*apcupsd.Status, error) { + client, err := apcupsd.DialContext(ctx, addr.Scheme, addr.Host) + if err != nil { + return nil, err + } + defer client.Close() + + return client.Status() +} + +func init() { + inputs.Add("apcupsd", func() telegraf.Input { + return &ApcUpsd{ + Servers: []string{defaultAddress}, + Timeout: defaultTimeout, + } + }) +} diff --git a/plugins/inputs/apcupsd/apcupsd_test.go b/plugins/inputs/apcupsd/apcupsd_test.go new file mode 100644 index 000000000..e749d5137 --- /dev/null +++ b/plugins/inputs/apcupsd/apcupsd_test.go @@ -0,0 +1,235 @@ +package apcupsd + +import ( + "context" + "encoding/binary" + "net" + "testing" + "time" + + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestApcupsdDocs(t *testing.T) { + apc := &ApcUpsd{} + apc.Description() + apc.SampleConfig() +} + +func TestApcupsdInit(t *testing.T) { + input, ok := inputs.Inputs["apcupsd"] + if !ok { + t.Fatal("Input not defined") + } + + _ = input().(*ApcUpsd) +} + +func listen(ctx context.Context, t *testing.T, out [][]byte) (string, error) { + lc := net.ListenConfig{} + ln, err := lc.Listen(ctx, "tcp4", "127.0.0.1:0") + if err != nil { + return "", err + } + + go func() { + for ctx.Err() == nil { + defer ln.Close() + + conn, err := ln.Accept() + if err != nil { + continue + } + defer conn.Close() + conn.SetReadDeadline(time.Now().Add(time.Second)) + + in := make([]byte, 128) + n, err := conn.Read(in) + require.NoError(t, err, "failed to read from connection") + + status := []byte{0, 6, 's', 't', 'a', 't', 'u', 's'} + want, got := status, in[:n] + require.Equal(t, want, got) + + // Run against test function and append EOF to end of output bytes + out = append(out, []byte{0, 0}) + + for _, o := range out { + _, err := conn.Write(o) + require.NoError(t, err, "failed to write to connection") + } + } + }() + + return ln.Addr().String(), nil +} + +func TestConfig(t *testing.T) { + apc := &ApcUpsd{Timeout: defaultTimeout} + + var ( + tests = []struct { + name string + servers []string + err bool + }{ + { + name: "test listen address no scheme", + servers: []string{"127.0.0.1:1234"}, + err: true, + }, + { + name: "test no port", + servers: []string{"127.0.0.3"}, + err: true, + }, + } + + acc testutil.Accumulator + ) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + apc.Servers = tt.servers + + err := apc.Gather(&acc) + if tt.err { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } + +} + +func TestApcupsdGather(t *testing.T) { + apc := &ApcUpsd{Timeout: defaultTimeout} + + var ( + tests = []struct { + name string + err bool + tags map[string]string + fields map[string]interface{} + out func() [][]byte + }{ + { + name: "test listening server with output", + err: false, + tags: map[string]string{ + "serial": "ABC123", + "status": "ONLINE", + "ups_name": "BERTHA", + "model": "Model 12345", + }, + fields: map[string]interface{}{ + "status_flags": uint64(8), + "battery_charge_percent": float64(0), + "battery_voltage": float64(0), + "input_frequency": float64(0), + "input_voltage": float64(0), + "internal_temp": float64(0), + "load_percent": float64(13), + "output_voltage": float64(0), + "time_left_ns": int64(2790000000000), + "time_on_battery_ns": int64(0), + "nominal_input_voltage": float64(230), + "nominal_battery_voltage": float64(12), + "nominal_power": int(865), + "firmware": string("857.L3 .I USB FW:L3"), + "battery_date": time.Date(2016, time.September, 06, 0, 0, 0, 0, time.UTC), + }, + out: genOutput, + }, + { + name: "test with bad output", + err: true, + out: genBadOutput, + }, + } + + acc testutil.Accumulator + ) + + for _, tt := range tests { + + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + lAddr, err := listen(ctx, t, tt.out()) + if err != nil { + t.Fatal(err) + } + + apc.Servers = []string{"tcp://" + lAddr} + + err = apc.Gather(&acc) + if tt.err { + require.Error(t, err) + } else { + require.NoError(t, err) + acc.AssertContainsTaggedFields(t, "apcupsd", tt.fields, tt.tags) + } + cancel() + }) + } +} + +// The following functionality is straight from apcupsd tests. + +// kvBytes is a helper to generate length and key/value byte buffers. +func kvBytes(kv string) ([]byte, []byte) { + lenb := make([]byte, 2) + binary.BigEndian.PutUint16(lenb, uint16(len(kv))) + + return lenb, []byte(kv) +} + +func genOutput() [][]byte { + kvs := []string{ + "SERIALNO : ABC123", + "STATUS : ONLINE", + "STATFLAG : 0x08 Status Flag", + "UPSNAME : BERTHA", + "MODEL : Model 12345", + "DATE : 2016-09-06 22:13:28 -0400", + "HOSTNAME : example", + "LOADPCT : 13.0 Percent Load Capacity", + "BATTDATE : 2016-09-06", + "TIMELEFT : 46.5 Minutes", + "TONBATT : 0 seconds", + "NUMXFERS : 0", + "SELFTEST : NO", + "NOMINV : 230 Volts", + "NOMBATTV : 12.0 Volts", + "NOMPOWER : 865 Watts", + "FIRMWARE : 857.L3 .I USB FW:L3", + } + + var out [][]byte + for _, kv := range kvs { + lenb, kvb := kvBytes(kv) + out = append(out, lenb) + out = append(out, kvb) + } + + return out +} + +func genBadOutput() [][]byte { + kvs := []string{ + "STATFLAG : 0x08Status Flag", + } + + var out [][]byte + for _, kv := range kvs { + lenb, kvb := kvBytes(kv) + out = append(out, lenb) + out = append(out, kvb) + } + + return out +} diff --git a/plugins/inputs/azure_storage_queue/README.md b/plugins/inputs/azure_storage_queue/README.md new file mode 100644 index 000000000..7985c886e --- /dev/null +++ b/plugins/inputs/azure_storage_queue/README.md @@ -0,0 +1,35 @@ +# Telegraf Input Plugin: Azure Storage Queue + +This plugin gathers sizes of Azure Storage Queues. + +### Configuration: + +```toml +# Description +[[inputs.azure_storage_queue]] + ## Required Azure Storage Account name + account_name = "mystorageaccount" + + ## Required Azure Storage Account access key + account_key = "storageaccountaccesskey" + + ## Set to false to disable peeking age of oldest message (executes faster) + # peek_oldest_message_age = true +``` + +### Metrics +- azure_storage_queues + - tags: + - queue + - account + - fields: + - size (integer, count) + - oldest_message_age_ns (integer, nanoseconds) Age of message at the head of the queue. + Requires `peek_oldest_message_age` to be configured to `true`. + +### Example Output + +``` +azure_storage_queues,queue=myqueue,account=mystorageaccount oldest_message_age=799714900i,size=7i 1565970503000000000 +azure_storage_queues,queue=myemptyqueue,account=mystorageaccount size=0i 1565970502000000000 +``` \ No newline at end of file diff --git a/plugins/inputs/azure_storage_queue/azure_storage_queue.go b/plugins/inputs/azure_storage_queue/azure_storage_queue.go new file mode 100644 index 000000000..6d132a5ef --- /dev/null +++ b/plugins/inputs/azure_storage_queue/azure_storage_queue.go @@ -0,0 +1,134 @@ +package azure_storage_queue + +import ( + "context" + "errors" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-storage-queue-go/azqueue" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type AzureStorageQueue struct { + StorageAccountName string `toml:"account_name"` + StorageAccountKey string `toml:"account_key"` + PeekOldestMessageAge bool `toml:"peek_oldest_message_age"` + Log telegraf.Logger + + serviceURL *azqueue.ServiceURL +} + +var sampleConfig = ` + ## Required Azure Storage Account name + account_name = "mystorageaccount" + + ## Required Azure Storage Account access key + account_key = "storageaccountaccesskey" + + ## Set to false to disable peeking age of oldest message (executes faster) + # peek_oldest_message_age = true + ` + +func (a *AzureStorageQueue) Description() string { + return "Gather Azure Storage Queue metrics" +} + +func (a *AzureStorageQueue) SampleConfig() string { + return sampleConfig +} + +func (a *AzureStorageQueue) Init() error { + if a.StorageAccountName == "" { + return errors.New("account_name must be configured") + } + + if a.StorageAccountKey == "" { + return errors.New("account_key must be configured") + } + return nil +} + +func (a *AzureStorageQueue) GetServiceURL() (azqueue.ServiceURL, error) { + if a.serviceURL == nil { + _url, err := url.Parse("https://" + a.StorageAccountName + ".queue.core.windows.net") + if err != nil { + return azqueue.ServiceURL{}, err + } + + credential, err := azqueue.NewSharedKeyCredential(a.StorageAccountName, a.StorageAccountKey) + if err != nil { + return azqueue.ServiceURL{}, err + } + + pipeline := azqueue.NewPipeline(credential, azqueue.PipelineOptions{}) + + serviceURL := azqueue.NewServiceURL(*_url, pipeline) + a.serviceURL = &serviceURL + } + return *a.serviceURL, nil +} + +func (a *AzureStorageQueue) GatherQueueMetrics(acc telegraf.Accumulator, queueItem azqueue.QueueItem, properties *azqueue.QueueGetPropertiesResponse, peekedMessage *azqueue.PeekedMessage) { + fields := make(map[string]interface{}) + tags := make(map[string]string) + tags["queue"] = strings.TrimSpace(queueItem.Name) + tags["account"] = a.StorageAccountName + fields["size"] = properties.ApproximateMessagesCount() + if peekedMessage != nil { + fields["oldest_message_age_ns"] = time.Now().UnixNano() - peekedMessage.InsertionTime.UnixNano() + } + acc.AddFields("azure_storage_queues", fields, tags) +} + +func (a *AzureStorageQueue) Gather(acc telegraf.Accumulator) error { + serviceURL, err := a.GetServiceURL() + if err != nil { + return err + } + + ctx := context.TODO() + + for marker := (azqueue.Marker{}); marker.NotDone(); { + a.Log.Debugf("Listing queues of storage account '%s'", a.StorageAccountName) + queuesSegment, err := serviceURL.ListQueuesSegment(ctx, marker, + azqueue.ListQueuesSegmentOptions{ + Detail: azqueue.ListQueuesSegmentDetails{Metadata: false}, + }) + if err != nil { + return err + } + marker = queuesSegment.NextMarker + + for _, queueItem := range queuesSegment.QueueItems { + a.Log.Debugf("Processing queue '%s' of storage account '%s'", queueItem.Name, a.StorageAccountName) + queueURL := serviceURL.NewQueueURL(queueItem.Name) + properties, err := queueURL.GetProperties(ctx) + if err != nil { + a.Log.Errorf("Error getting properties for queue %s: %s", queueItem.Name, err.Error()) + continue + } + var peekedMessage *azqueue.PeekedMessage + if a.PeekOldestMessageAge { + messagesURL := queueURL.NewMessagesURL() + messagesResponse, err := messagesURL.Peek(ctx, 1) + if err != nil { + a.Log.Errorf("Error peeking queue %s: %s", queueItem.Name, err.Error()) + } else if messagesResponse.NumMessages() > 0 { + peekedMessage = messagesResponse.Message(0) + } + } + + a.GatherQueueMetrics(acc, queueItem, properties, peekedMessage) + } + } + return nil +} + +func init() { + inputs.Add("azure_storage_queue", func() telegraf.Input { + return &AzureStorageQueue{PeekOldestMessageAge: true} + }) +} diff --git a/plugins/inputs/bcache/bcache.go b/plugins/inputs/bcache/bcache.go index 1171dbd92..8d20e3623 100644 --- a/plugins/inputs/bcache/bcache.go +++ b/plugins/inputs/bcache/bcache.go @@ -59,7 +59,7 @@ func prettyToBytes(v string) uint64 { } var factor uint64 factor = 1 - prefix := v[len(v)-1 : len(v)] + prefix := v[len(v)-1:] if factors[prefix] != 0 { v = v[:len(v)-1] factor = factors[prefix] diff --git a/plugins/inputs/beanstalkd/README.md b/plugins/inputs/beanstalkd/README.md new file mode 100644 index 000000000..e4fe2203d --- /dev/null +++ b/plugins/inputs/beanstalkd/README.md @@ -0,0 +1,98 @@ +# Beanstalkd Input Plugin + +The `beanstalkd` plugin collects server stats as well as tube stats (reported by `stats` and `stats-tube` commands respectively). + +### Configuration: + +```toml +[[inputs.beanstalkd]] + ## Server to collect data from + server = "localhost:11300" + + ## List of tubes to gather stats about. + ## If no tubes specified then data gathered for each tube on server reported by list-tubes command + tubes = ["notifications"] +``` + +### Metrics: + +Please see the [Beanstalk Protocol doc](https://raw.githubusercontent.com/kr/beanstalkd/master/doc/protocol.txt) for detailed explanation of `stats` and `stats-tube` commands output. + +`beanstalkd_overview` – statistical information about the system as a whole +- fields + - cmd_delete + - cmd_pause_tube + - current_jobs_buried + - current_jobs_delayed + - current_jobs_ready + - current_jobs_reserved + - current_jobs_urgent + - current_using + - current_waiting + - current_watching + - pause + - pause_time_left + - total_jobs +- tags + - name + - server (address taken from config) + +`beanstalkd_tube` – statistical information about the specified tube +- fields + - binlog_current_index + - binlog_max_size + - binlog_oldest_index + - binlog_records_migrated + - binlog_records_written + - cmd_bury + - cmd_delete + - cmd_ignore + - cmd_kick + - cmd_list_tube_used + - cmd_list_tubes + - cmd_list_tubes_watched + - cmd_pause_tube + - cmd_peek + - cmd_peek_buried + - cmd_peek_delayed + - cmd_peek_ready + - cmd_put + - cmd_release + - cmd_reserve + - cmd_reserve_with_timeout + - cmd_stats + - cmd_stats_job + - cmd_stats_tube + - cmd_touch + - cmd_use + - cmd_watch + - current_connections + - current_jobs_buried + - current_jobs_delayed + - current_jobs_ready + - current_jobs_reserved + - current_jobs_urgent + - current_producers + - current_tubes + - current_waiting + - current_workers + - job_timeouts + - max_job_size + - pid + - rusage_stime + - rusage_utime + - total_connections + - total_jobs + - uptime +- tags + - hostname + - id + - server (address taken from config) + - version + +### Example Output: +``` +beanstalkd_overview,host=server.local,hostname=a2ab22ed12e0,id=232485800aa11b24,server=localhost:11300,version=1.10 cmd_stats_tube=29482i,current_jobs_delayed=0i,current_jobs_urgent=6i,cmd_kick=0i,cmd_stats=7378i,cmd_stats_job=0i,current_waiting=0i,max_job_size=65535i,pid=6i,cmd_bury=0i,cmd_reserve_with_timeout=0i,cmd_touch=0i,current_connections=1i,current_jobs_ready=6i,current_producers=0i,cmd_delete=0i,cmd_list_tubes=7369i,cmd_peek_ready=0i,cmd_put=6i,cmd_use=3i,cmd_watch=0i,current_jobs_reserved=0i,rusage_stime=6.07,cmd_list_tubes_watched=0i,cmd_pause_tube=0i,total_jobs=6i,binlog_records_migrated=0i,cmd_list_tube_used=0i,cmd_peek_delayed=0i,cmd_release=0i,current_jobs_buried=0i,job_timeouts=0i,binlog_current_index=0i,binlog_max_size=10485760i,total_connections=7378i,cmd_peek_buried=0i,cmd_reserve=0i,current_tubes=4i,binlog_records_written=0i,cmd_peek=0i,rusage_utime=1.13,uptime=7099i,binlog_oldest_index=0i,current_workers=0i,cmd_ignore=0i 1528801650000000000 + +beanstalkd_tube,host=server.local,name=notifications,server=localhost:11300 pause_time_left=0i,current_jobs_buried=0i,current_jobs_delayed=0i,current_jobs_reserved=0i,current_using=0i,current_waiting=0i,pause=0i,total_jobs=3i,cmd_delete=0i,cmd_pause_tube=0i,current_jobs_ready=3i,current_jobs_urgent=3i,current_watching=0i 1528801650000000000 +``` diff --git a/plugins/inputs/beanstalkd/beanstalkd.go b/plugins/inputs/beanstalkd/beanstalkd.go new file mode 100644 index 000000000..932edd301 --- /dev/null +++ b/plugins/inputs/beanstalkd/beanstalkd.go @@ -0,0 +1,270 @@ +package beanstalkd + +import ( + "fmt" + "io" + "net/textproto" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "gopkg.in/yaml.v2" +) + +const sampleConfig = ` + ## Server to collect data from + server = "localhost:11300" + + ## List of tubes to gather stats about. + ## If no tubes specified then data gathered for each tube on server reported by list-tubes command + tubes = ["notifications"] +` + +type Beanstalkd struct { + Server string `toml:"server"` + Tubes []string `toml:"tubes"` +} + +func (b *Beanstalkd) Description() string { + return "Collects Beanstalkd server and tubes stats" +} + +func (b *Beanstalkd) SampleConfig() string { + return sampleConfig +} + +func (b *Beanstalkd) Gather(acc telegraf.Accumulator) error { + connection, err := textproto.Dial("tcp", b.Server) + if err != nil { + return err + } + defer connection.Close() + + tubes := b.Tubes + if len(tubes) == 0 { + err = runQuery(connection, "list-tubes", &tubes) + if err != nil { + acc.AddError(err) + } + } + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + err := b.gatherServerStats(connection, acc) + if err != nil { + acc.AddError(err) + } + wg.Done() + }() + + for _, tube := range tubes { + wg.Add(1) + go func(tube string) { + b.gatherTubeStats(connection, tube, acc) + wg.Done() + }(tube) + } + + wg.Wait() + + return nil +} + +func (b *Beanstalkd) gatherServerStats(connection *textproto.Conn, acc telegraf.Accumulator) error { + stats := new(statsResponse) + if err := runQuery(connection, "stats", stats); err != nil { + return err + } + + acc.AddFields("beanstalkd_overview", + map[string]interface{}{ + "binlog_current_index": stats.BinlogCurrentIndex, + "binlog_max_size": stats.BinlogMaxSize, + "binlog_oldest_index": stats.BinlogOldestIndex, + "binlog_records_migrated": stats.BinlogRecordsMigrated, + "binlog_records_written": stats.BinlogRecordsWritten, + "cmd_bury": stats.CmdBury, + "cmd_delete": stats.CmdDelete, + "cmd_ignore": stats.CmdIgnore, + "cmd_kick": stats.CmdKick, + "cmd_list_tube_used": stats.CmdListTubeUsed, + "cmd_list_tubes": stats.CmdListTubes, + "cmd_list_tubes_watched": stats.CmdListTubesWatched, + "cmd_pause_tube": stats.CmdPauseTube, + "cmd_peek": stats.CmdPeek, + "cmd_peek_buried": stats.CmdPeekBuried, + "cmd_peek_delayed": stats.CmdPeekDelayed, + "cmd_peek_ready": stats.CmdPeekReady, + "cmd_put": stats.CmdPut, + "cmd_release": stats.CmdRelease, + "cmd_reserve": stats.CmdReserve, + "cmd_reserve_with_timeout": stats.CmdReserveWithTimeout, + "cmd_stats": stats.CmdStats, + "cmd_stats_job": stats.CmdStatsJob, + "cmd_stats_tube": stats.CmdStatsTube, + "cmd_touch": stats.CmdTouch, + "cmd_use": stats.CmdUse, + "cmd_watch": stats.CmdWatch, + "current_connections": stats.CurrentConnections, + "current_jobs_buried": stats.CurrentJobsBuried, + "current_jobs_delayed": stats.CurrentJobsDelayed, + "current_jobs_ready": stats.CurrentJobsReady, + "current_jobs_reserved": stats.CurrentJobsReserved, + "current_jobs_urgent": stats.CurrentJobsUrgent, + "current_producers": stats.CurrentProducers, + "current_tubes": stats.CurrentTubes, + "current_waiting": stats.CurrentWaiting, + "current_workers": stats.CurrentWorkers, + "job_timeouts": stats.JobTimeouts, + "max_job_size": stats.MaxJobSize, + "pid": stats.Pid, + "rusage_stime": stats.RusageStime, + "rusage_utime": stats.RusageUtime, + "total_connections": stats.TotalConnections, + "total_jobs": stats.TotalJobs, + "uptime": stats.Uptime, + }, + map[string]string{ + "hostname": stats.Hostname, + "id": stats.Id, + "server": b.Server, + "version": stats.Version, + }, + ) + + return nil +} + +func (b *Beanstalkd) gatherTubeStats(connection *textproto.Conn, tube string, acc telegraf.Accumulator) error { + stats := new(statsTubeResponse) + if err := runQuery(connection, "stats-tube "+tube, stats); err != nil { + return err + } + + acc.AddFields("beanstalkd_tube", + map[string]interface{}{ + "cmd_delete": stats.CmdDelete, + "cmd_pause_tube": stats.CmdPauseTube, + "current_jobs_buried": stats.CurrentJobsBuried, + "current_jobs_delayed": stats.CurrentJobsDelayed, + "current_jobs_ready": stats.CurrentJobsReady, + "current_jobs_reserved": stats.CurrentJobsReserved, + "current_jobs_urgent": stats.CurrentJobsUrgent, + "current_using": stats.CurrentUsing, + "current_waiting": stats.CurrentWaiting, + "current_watching": stats.CurrentWatching, + "pause": stats.Pause, + "pause_time_left": stats.PauseTimeLeft, + "total_jobs": stats.TotalJobs, + }, + map[string]string{ + "name": stats.Name, + "server": b.Server, + }, + ) + + return nil +} + +func runQuery(connection *textproto.Conn, cmd string, result interface{}) error { + requestId, err := connection.Cmd(cmd) + if err != nil { + return err + } + + connection.StartResponse(requestId) + defer connection.EndResponse(requestId) + + status, err := connection.ReadLine() + if err != nil { + return err + } + + size := 0 + if _, err = fmt.Sscanf(status, "OK %d", &size); err != nil { + return err + } + + body := make([]byte, size+2) + if _, err = io.ReadFull(connection.R, body); err != nil { + return err + } + + return yaml.Unmarshal(body, result) +} + +func init() { + inputs.Add("beanstalkd", func() telegraf.Input { + return &Beanstalkd{} + }) +} + +type statsResponse struct { + BinlogCurrentIndex int `yaml:"binlog-current-index"` + BinlogMaxSize int `yaml:"binlog-max-size"` + BinlogOldestIndex int `yaml:"binlog-oldest-index"` + BinlogRecordsMigrated int `yaml:"binlog-records-migrated"` + BinlogRecordsWritten int `yaml:"binlog-records-written"` + CmdBury int `yaml:"cmd-bury"` + CmdDelete int `yaml:"cmd-delete"` + CmdIgnore int `yaml:"cmd-ignore"` + CmdKick int `yaml:"cmd-kick"` + CmdListTubeUsed int `yaml:"cmd-list-tube-used"` + CmdListTubes int `yaml:"cmd-list-tubes"` + CmdListTubesWatched int `yaml:"cmd-list-tubes-watched"` + CmdPauseTube int `yaml:"cmd-pause-tube"` + CmdPeek int `yaml:"cmd-peek"` + CmdPeekBuried int `yaml:"cmd-peek-buried"` + CmdPeekDelayed int `yaml:"cmd-peek-delayed"` + CmdPeekReady int `yaml:"cmd-peek-ready"` + CmdPut int `yaml:"cmd-put"` + CmdRelease int `yaml:"cmd-release"` + CmdReserve int `yaml:"cmd-reserve"` + CmdReserveWithTimeout int `yaml:"cmd-reserve-with-timeout"` + CmdStats int `yaml:"cmd-stats"` + CmdStatsJob int `yaml:"cmd-stats-job"` + CmdStatsTube int `yaml:"cmd-stats-tube"` + CmdTouch int `yaml:"cmd-touch"` + CmdUse int `yaml:"cmd-use"` + CmdWatch int `yaml:"cmd-watch"` + CurrentConnections int `yaml:"current-connections"` + CurrentJobsBuried int `yaml:"current-jobs-buried"` + CurrentJobsDelayed int `yaml:"current-jobs-delayed"` + CurrentJobsReady int `yaml:"current-jobs-ready"` + CurrentJobsReserved int `yaml:"current-jobs-reserved"` + CurrentJobsUrgent int `yaml:"current-jobs-urgent"` + CurrentProducers int `yaml:"current-producers"` + CurrentTubes int `yaml:"current-tubes"` + CurrentWaiting int `yaml:"current-waiting"` + CurrentWorkers int `yaml:"current-workers"` + Hostname string `yaml:"hostname"` + Id string `yaml:"id"` + JobTimeouts int `yaml:"job-timeouts"` + MaxJobSize int `yaml:"max-job-size"` + Pid int `yaml:"pid"` + RusageStime float64 `yaml:"rusage-stime"` + RusageUtime float64 `yaml:"rusage-utime"` + TotalConnections int `yaml:"total-connections"` + TotalJobs int `yaml:"total-jobs"` + Uptime int `yaml:"uptime"` + Version string `yaml:"version"` +} + +type statsTubeResponse struct { + CmdDelete int `yaml:"cmd-delete"` + CmdPauseTube int `yaml:"cmd-pause-tube"` + CurrentJobsBuried int `yaml:"current-jobs-buried"` + CurrentJobsDelayed int `yaml:"current-jobs-delayed"` + CurrentJobsReady int `yaml:"current-jobs-ready"` + CurrentJobsReserved int `yaml:"current-jobs-reserved"` + CurrentJobsUrgent int `yaml:"current-jobs-urgent"` + CurrentUsing int `yaml:"current-using"` + CurrentWaiting int `yaml:"current-waiting"` + CurrentWatching int `yaml:"current-watching"` + Name string `yaml:"name"` + Pause int `yaml:"pause"` + PauseTimeLeft int `yaml:"pause-time-left"` + TotalJobs int `yaml:"total-jobs"` +} diff --git a/plugins/inputs/beanstalkd/beanstalkd_test.go b/plugins/inputs/beanstalkd/beanstalkd_test.go new file mode 100644 index 000000000..92c108e06 --- /dev/null +++ b/plugins/inputs/beanstalkd/beanstalkd_test.go @@ -0,0 +1,332 @@ +package beanstalkd_test + +import ( + "io" + "net" + "net/textproto" + "testing" + + "github.com/influxdata/telegraf/plugins/inputs/beanstalkd" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestBeanstalkd(t *testing.T) { + type tubeStats struct { + name string + fields map[string]interface{} + } + + tests := []struct { + name string + tubesConfig []string + expectedTubes []tubeStats + notExpectedTubes []tubeStats + }{ + { + name: "All tubes stats", + tubesConfig: []string{}, + expectedTubes: []tubeStats{ + {name: "default", fields: defaultTubeFields}, + {name: "test", fields: testTubeFields}, + }, + notExpectedTubes: []tubeStats{}, + }, + { + name: "Specified tubes stats", + tubesConfig: []string{"test"}, + expectedTubes: []tubeStats{ + {name: "test", fields: testTubeFields}, + }, + notExpectedTubes: []tubeStats{ + {name: "default", fields: defaultTubeFields}, + }, + }, + { + name: "Unknown tube stats", + tubesConfig: []string{"unknown"}, + expectedTubes: []tubeStats{}, + notExpectedTubes: []tubeStats{ + {name: "default", fields: defaultTubeFields}, + {name: "test", fields: testTubeFields}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + server, err := startTestServer(t) + if err != nil { + t.Fatalf("Unable to create test server") + } + defer server.Close() + + serverAddress := server.Addr().String() + plugin := beanstalkd.Beanstalkd{ + Server: serverAddress, + Tubes: test.tubesConfig, + } + + var acc testutil.Accumulator + require.NoError(t, acc.GatherError(plugin.Gather)) + + acc.AssertContainsTaggedFields(t, "beanstalkd_overview", + overviewFields, + getOverviewTags(serverAddress), + ) + + for _, expectedTube := range test.expectedTubes { + acc.AssertContainsTaggedFields(t, "beanstalkd_tube", + expectedTube.fields, + getTubeTags(serverAddress, expectedTube.name), + ) + } + + for _, notExpectedTube := range test.notExpectedTubes { + acc.AssertDoesNotContainsTaggedFields(t, "beanstalkd_tube", + notExpectedTube.fields, + getTubeTags(serverAddress, notExpectedTube.name), + ) + } + }) + } +} + +func startTestServer(t *testing.T) (net.Listener, error) { + server, err := net.Listen("tcp", "localhost:0") + if err != nil { + return nil, err + } + + go func() { + defer server.Close() + + connection, err := server.Accept() + if err != nil { + t.Log("Test server: failed to accept connection. Error: ", err) + return + } + + tp := textproto.NewConn(connection) + defer tp.Close() + + sendSuccessResponse := func(body string) { + tp.PrintfLine("OK %d\r\n%s", len(body), body) + } + + for { + cmd, err := tp.ReadLine() + if err == io.EOF { + return + } else if err != nil { + t.Log("Test server: failed read command. Error: ", err) + return + } + + switch cmd { + case "list-tubes": + sendSuccessResponse(listTubesResponse) + case "stats": + sendSuccessResponse(statsResponse) + case "stats-tube default": + sendSuccessResponse(statsTubeDefaultResponse) + case "stats-tube test": + sendSuccessResponse(statsTubeTestResponse) + case "stats-tube unknown": + tp.PrintfLine("NOT_FOUND") + default: + t.Log("Test server: unknown command") + } + } + }() + + return server, nil +} + +const ( + listTubesResponse = `--- +- default +- test +` + statsResponse = `--- +current-jobs-urgent: 5 +current-jobs-ready: 5 +current-jobs-reserved: 0 +current-jobs-delayed: 1 +current-jobs-buried: 0 +cmd-put: 6 +cmd-peek: 0 +cmd-peek-ready: 1 +cmd-peek-delayed: 0 +cmd-peek-buried: 0 +cmd-reserve: 0 +cmd-reserve-with-timeout: 1 +cmd-delete: 1 +cmd-release: 0 +cmd-use: 2 +cmd-watch: 0 +cmd-ignore: 0 +cmd-bury: 1 +cmd-kick: 1 +cmd-touch: 0 +cmd-stats: 1 +cmd-stats-job: 0 +cmd-stats-tube: 2 +cmd-list-tubes: 1 +cmd-list-tube-used: 0 +cmd-list-tubes-watched: 0 +cmd-pause-tube: 0 +job-timeouts: 0 +total-jobs: 6 +max-job-size: 65535 +current-tubes: 2 +current-connections: 2 +current-producers: 1 +current-workers: 1 +current-waiting: 0 +total-connections: 2 +pid: 6 +version: 1.10 +rusage-utime: 0.000000 +rusage-stime: 0.000000 +uptime: 20 +binlog-oldest-index: 0 +binlog-current-index: 0 +binlog-records-migrated: 0 +binlog-records-written: 0 +binlog-max-size: 10485760 +id: bba7546657efdd4c +hostname: 2873efd3e88c +` + statsTubeDefaultResponse = `--- +name: default +current-jobs-urgent: 0 +current-jobs-ready: 0 +current-jobs-reserved: 0 +current-jobs-delayed: 0 +current-jobs-buried: 0 +total-jobs: 0 +current-using: 2 +current-watching: 2 +current-waiting: 0 +cmd-delete: 0 +cmd-pause-tube: 0 +pause: 0 +pause-time-left: 0 +` + statsTubeTestResponse = `--- +name: test +current-jobs-urgent: 5 +current-jobs-ready: 5 +current-jobs-reserved: 0 +current-jobs-delayed: 1 +current-jobs-buried: 0 +total-jobs: 6 +current-using: 0 +current-watching: 0 +current-waiting: 0 +cmd-delete: 0 +cmd-pause-tube: 0 +pause: 0 +pause-time-left: 0 +` +) + +var ( + // Default tube without stats + defaultTubeFields = map[string]interface{}{ + "cmd_delete": 0, + "cmd_pause_tube": 0, + "current_jobs_buried": 0, + "current_jobs_delayed": 0, + "current_jobs_ready": 0, + "current_jobs_reserved": 0, + "current_jobs_urgent": 0, + "current_using": 2, + "current_waiting": 0, + "current_watching": 2, + "pause": 0, + "pause_time_left": 0, + "total_jobs": 0, + } + // Test tube with stats + testTubeFields = map[string]interface{}{ + "cmd_delete": 0, + "cmd_pause_tube": 0, + "current_jobs_buried": 0, + "current_jobs_delayed": 1, + "current_jobs_ready": 5, + "current_jobs_reserved": 0, + "current_jobs_urgent": 5, + "current_using": 0, + "current_waiting": 0, + "current_watching": 0, + "pause": 0, + "pause_time_left": 0, + "total_jobs": 6, + } + // Server stats + overviewFields = map[string]interface{}{ + "binlog_current_index": 0, + "binlog_max_size": 10485760, + "binlog_oldest_index": 0, + "binlog_records_migrated": 0, + "binlog_records_written": 0, + "cmd_bury": 1, + "cmd_delete": 1, + "cmd_ignore": 0, + "cmd_kick": 1, + "cmd_list_tube_used": 0, + "cmd_list_tubes": 1, + "cmd_list_tubes_watched": 0, + "cmd_pause_tube": 0, + "cmd_peek": 0, + "cmd_peek_buried": 0, + "cmd_peek_delayed": 0, + "cmd_peek_ready": 1, + "cmd_put": 6, + "cmd_release": 0, + "cmd_reserve": 0, + "cmd_reserve_with_timeout": 1, + "cmd_stats": 1, + "cmd_stats_job": 0, + "cmd_stats_tube": 2, + "cmd_touch": 0, + "cmd_use": 2, + "cmd_watch": 0, + "current_connections": 2, + "current_jobs_buried": 0, + "current_jobs_delayed": 1, + "current_jobs_ready": 5, + "current_jobs_reserved": 0, + "current_jobs_urgent": 5, + "current_producers": 1, + "current_tubes": 2, + "current_waiting": 0, + "current_workers": 1, + "job_timeouts": 0, + "max_job_size": 65535, + "pid": 6, + "rusage_stime": 0.0, + "rusage_utime": 0.0, + "total_connections": 2, + "total_jobs": 6, + "uptime": 20, + } +) + +func getOverviewTags(server string) map[string]string { + return map[string]string{ + "hostname": "2873efd3e88c", + "id": "bba7546657efdd4c", + "server": server, + "version": "1.10", + } +} + +func getTubeTags(server string, tube string) map[string]string { + return map[string]string{ + "name": tube, + "server": server, + } +} diff --git a/plugins/inputs/bind/README.md b/plugins/inputs/bind/README.md new file mode 100644 index 000000000..34d419d3a --- /dev/null +++ b/plugins/inputs/bind/README.md @@ -0,0 +1,118 @@ +# BIND 9 Nameserver Statistics Input Plugin + +This plugin decodes the JSON or XML statistics provided by BIND 9 nameservers. + +### XML Statistics Channel + +Version 2 statistics (BIND 9.6 - 9.9) and version 3 statistics (BIND 9.9+) are supported. Note that +for BIND 9.9 to support version 3 statistics, it must be built with the `--enable-newstats` compile +flag, and it must be specifically requested via the correct URL. Version 3 statistics are the +default (and only) XML format in BIND 9.10+. + +### JSON Statistics Channel + +JSON statistics schema version 1 (BIND 9.10+) is supported. As of writing, some distros still do +not enable support for JSON statistics in their BIND packages. + +### Configuration: + +- **urls** []string: List of BIND statistics channel URLs to collect from. Do not include a + trailing slash in the URL. Default is "http://localhost:8053/xml/v3". +- **gather_memory_contexts** bool: Report per-context memory statistics. +- **gather_views** bool: Report per-view query statistics. + +The following table summarizes the URL formats which should be used, depending on your BIND +version and configured statistics channel. + +| BIND Version | Statistics Format | Example URL | +| ------------ | ----------------- | ----------------------------- | +| 9.6 - 9.8 | XML v2 | http://localhost:8053 | +| 9.9 | XML v2 | http://localhost:8053/xml/v2 | +| 9.9+ | XML v3 | http://localhost:8053/xml/v3 | +| 9.10+ | JSON v1 | http://localhost:8053/json/v1 | + +#### Configuration of BIND Daemon + +Add the following to your named.conf if running Telegraf on the same host as the BIND daemon: +``` +statistics-channels { + inet 127.0.0.1 port 8053; +}; +``` + +Alternatively, specify a wildcard address (e.g., 0.0.0.0) or specific IP address of an interface to +configure the BIND daemon to listen on that address. Note that you should secure the statistics +channel with an ACL if it is publicly reachable. Consult the BIND Administrator Reference Manual +for more information. + +### Measurements & Fields: + +- bind_counter + - name=value (multiple) +- bind_memory + - total_use + - in_use + - block_size + - context_size + - lost +- bind_memory_context + - total + - in_use + +### Tags: + +- All measurements + - url + - source + - port +- bind_counter + - type + - view (optional) +- bind_memory_context + - id + - name + +### Sample Queries: + +These are some useful queries (to generate dashboards or other) to run against data from this +plugin: + +``` +SELECT non_negative_derivative(mean(/^A$|^PTR$/), 5m) FROM bind_counter \ +WHERE "url" = 'localhost:8053' AND "type" = 'qtype' AND time > now() - 1h \ +GROUP BY time(5m), "type" +``` + +``` +name: bind_counter +tags: type=qtype +time non_negative_derivative_A non_negative_derivative_PTR +---- ------------------------- --------------------------- +1553862000000000000 254.99444444430992 1388.311111111194 +1553862300000000000 354 2135.716666666791 +1553862600000000000 316.8666666666977 2130.133333333768 +1553862900000000000 309.05000000004657 2126.75 +1553863200000000000 315.64999999990687 2128.483333332464 +1553863500000000000 308.9166666667443 2132.350000000559 +1553863800000000000 302.64999999990687 2131.1833333335817 +1553864100000000000 310.85000000009313 2132.449999999255 +1553864400000000000 314.3666666666977 2136.216666666791 +1553864700000000000 303.2333333331626 2133.8166666673496 +1553865000000000000 304.93333333334886 2127.333333333023 +1553865300000000000 317.93333333334886 2130.3166666664183 +1553865600000000000 280.6666666667443 1807.9071428570896 +``` + +### Example Output + +Here is example output of this plugin: + +``` +bind_memory,host=LAP,port=8053,source=localhost,url=localhost:8053 block_size=12058624i,context_size=4575056i,in_use=4113717i,lost=0i,total_use=16663252i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=opcode,url=localhost:8053 IQUERY=0i,NOTIFY=0i,QUERY=9i,STATUS=0i,UPDATE=0i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=rcode,url=localhost:8053 17=0i,18=0i,19=0i,20=0i,21=0i,22=0i,BADCOOKIE=0i,BADVERS=0i,FORMERR=0i,NOERROR=7i,NOTAUTH=0i,NOTIMP=0i,NOTZONE=0i,NXDOMAIN=0i,NXRRSET=0i,REFUSED=0i,RESERVED11=0i,RESERVED12=0i,RESERVED13=0i,RESERVED14=0i,RESERVED15=0i,SERVFAIL=2i,YXDOMAIN=0i,YXRRSET=0i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=qtype,url=localhost:8053 A=1i,ANY=1i,NS=1i,PTR=5i,SOA=1i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=nsstat,url=localhost:8053 AuthQryRej=0i,CookieBadSize=0i,CookieBadTime=0i,CookieIn=9i,CookieMatch=0i,CookieNew=9i,CookieNoMatch=0i,DNS64=0i,ECSOpt=0i,ExpireOpt=0i,KeyTagOpt=0i,NSIDOpt=0i,OtherOpt=0i,QryAuthAns=7i,QryBADCOOKIE=0i,QryDropped=0i,QryDuplicate=0i,QryFORMERR=0i,QryFailure=0i,QryNXDOMAIN=0i,QryNXRedir=0i,QryNXRedirRLookup=0i,QryNoauthAns=0i,QryNxrrset=1i,QryRecursion=2i,QryReferral=0i,QrySERVFAIL=2i,QrySuccess=6i,QryTCP=1i,QryUDP=8i,RPZRewrites=0i,RateDropped=0i,RateSlipped=0i,RecQryRej=0i,RecursClients=0i,ReqBadEDNSVer=0i,ReqBadSIG=0i,ReqEdns0=9i,ReqSIG0=0i,ReqTCP=1i,ReqTSIG=0i,Requestv4=9i,Requestv6=0i,RespEDNS0=9i,RespSIG0=0i,RespTSIG=0i,Response=9i,TruncatedResp=0i,UpdateBadPrereq=0i,UpdateDone=0i,UpdateFail=0i,UpdateFwdFail=0i,UpdateRej=0i,UpdateReqFwd=0i,UpdateRespFwd=0i,XfrRej=0i,XfrReqDone=0i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=zonestat,url=localhost:8053 AXFRReqv4=0i,AXFRReqv6=0i,IXFRReqv4=0i,IXFRReqv6=0i,NotifyInv4=0i,NotifyInv6=0i,NotifyOutv4=0i,NotifyOutv6=0i,NotifyRej=0i,SOAOutv4=0i,SOAOutv6=0i,XfrFail=0i,XfrSuccess=0i 1554276619000000000 +bind_counter,host=LAP,port=8053,source=localhost,type=sockstat,url=localhost:8053 FDWatchClose=0i,FDwatchConn=0i,FDwatchConnFail=0i,FDwatchRecvErr=0i,FDwatchSendErr=0i,FdwatchBindFail=0i,RawActive=1i,RawClose=0i,RawOpen=1i,RawOpenFail=0i,RawRecvErr=0i,TCP4Accept=6i,TCP4AcceptFail=0i,TCP4Active=9i,TCP4BindFail=0i,TCP4Close=5i,TCP4Conn=0i,TCP4ConnFail=0i,TCP4Open=8i,TCP4OpenFail=0i,TCP4RecvErr=0i,TCP4SendErr=0i,TCP6Accept=0i,TCP6AcceptFail=0i,TCP6Active=2i,TCP6BindFail=0i,TCP6Close=0i,TCP6Conn=0i,TCP6ConnFail=0i,TCP6Open=2i,TCP6OpenFail=0i,TCP6RecvErr=0i,TCP6SendErr=0i,UDP4Active=18i,UDP4BindFail=14i,UDP4Close=14i,UDP4Conn=0i,UDP4ConnFail=0i,UDP4Open=32i,UDP4OpenFail=0i,UDP4RecvErr=0i,UDP4SendErr=0i,UDP6Active=3i,UDP6BindFail=0i,UDP6Close=6i,UDP6Conn=0i,UDP6ConnFail=6i,UDP6Open=9i,UDP6OpenFail=0i,UDP6RecvErr=0i,UDP6SendErr=0i,UnixAccept=0i,UnixAcceptFail=0i,UnixActive=0i,UnixBindFail=0i,UnixClose=0i,UnixConn=0i,UnixConnFail=0i,UnixOpen=0i,UnixOpenFail=0i,UnixRecvErr=0i,UnixSendErr=0i 1554276619000000000 +``` diff --git a/plugins/inputs/bind/bind.go b/plugins/inputs/bind/bind.go new file mode 100644 index 000000000..967c9031a --- /dev/null +++ b/plugins/inputs/bind/bind.go @@ -0,0 +1,87 @@ +package bind + +import ( + "fmt" + "net/http" + "net/url" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Bind struct { + Urls []string + GatherMemoryContexts bool + GatherViews bool +} + +var sampleConfig = ` + ## An array of BIND XML statistics URI to gather stats. + ## Default is "http://localhost:8053/xml/v3". + # urls = ["http://localhost:8053/xml/v3"] + # gather_memory_contexts = false + # gather_views = false +` + +var client = &http.Client{ + Timeout: time.Duration(4 * time.Second), +} + +func (b *Bind) Description() string { + return "Read BIND nameserver XML statistics" +} + +func (b *Bind) SampleConfig() string { + return sampleConfig +} + +func (b *Bind) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + if len(b.Urls) == 0 { + b.Urls = []string{"http://localhost:8053/xml/v3"} + } + + for _, u := range b.Urls { + addr, err := url.Parse(u) + if err != nil { + acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + continue + } + + wg.Add(1) + go func(addr *url.URL) { + defer wg.Done() + acc.AddError(b.gatherUrl(addr, acc)) + }(addr) + } + + wg.Wait() + return nil +} + +func (b *Bind) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { + switch addr.Path { + case "": + // BIND 9.6 - 9.8 + return b.readStatsXMLv2(addr, acc) + case "/json/v1": + // BIND 9.10+ + return b.readStatsJSON(addr, acc) + case "/xml/v2": + // BIND 9.9 + return b.readStatsXMLv2(addr, acc) + case "/xml/v3": + // BIND 9.9+ + return b.readStatsXMLv3(addr, acc) + default: + return fmt.Errorf("URL %s is ambiguous. Please check plugin documentation for supported URL formats.", + addr) + } +} + +func init() { + inputs.Add("bind", func() telegraf.Input { return &Bind{} }) +} diff --git a/plugins/inputs/bind/bind_test.go b/plugins/inputs/bind/bind_test.go new file mode 100644 index 000000000..f2bfbbf66 --- /dev/null +++ b/plugins/inputs/bind/bind_test.go @@ -0,0 +1,617 @@ +package bind + +import ( + "net" + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestBindJsonStats(t *testing.T) { + ts := httptest.NewServer(http.FileServer(http.Dir("testdata"))) + url := ts.Listener.Addr().String() + host, port, _ := net.SplitHostPort(url) + defer ts.Close() + + b := Bind{ + Urls: []string{ts.URL + "/json/v1"}, + GatherMemoryContexts: true, + GatherViews: true, + } + + var acc testutil.Accumulator + err := acc.GatherError(b.Gather) + + assert.Nil(t, err) + + // Use subtests for counters, since they are similar structure + type fieldSet struct { + fieldKey string + fieldValue int64 + } + + testCases := []struct { + counterType string + values []fieldSet + }{ + { + "opcode", + []fieldSet{ + {"NOTIFY", 0}, + {"UPDATE", 0}, + {"IQUERY", 0}, + {"QUERY", 13}, + {"STATUS", 0}, + }, + }, + { + "rcode", + []fieldSet{ + {"NOERROR", 1732}, + {"FORMERR", 0}, + {"SERVFAIL", 6}, + {"NXDOMAIN", 200}, + {"NOTIMP", 0}, + {"REFUSED", 6}, + {"REFUSED", 0}, + {"YXDOMAIN", 0}, + {"YXRRSET", 0}, + {"NXRRSET", 0}, + {"NOTAUTH", 0}, + {"NOTZONE", 0}, + {"RESERVED11", 0}, + {"RESERVED12", 0}, + {"RESERVED13", 0}, + {"RESERVED14", 0}, + {"RESERVED15", 0}, + {"BADVERS", 0}, + {"17", 0}, + {"18", 0}, + {"19", 0}, + {"20", 0}, + {"21", 0}, + {"22", 0}, + {"BADCOOKIE", 0}, + }, + }, + { + "qtype", + []fieldSet{ + {"A", 2}, + {"AAAA", 2}, + {"PTR", 7}, + {"SRV", 2}, + }, + }, + { + "nsstat", + []fieldSet{ + {"QrySuccess", 6}, + {"QryRecursion", 12}, + {"Requestv4", 13}, + {"QryNXDOMAIN", 4}, + {"QryAuthAns", 1}, + {"QryNxrrset", 1}, + {"QryNoauthAns", 10}, + {"QryUDP", 13}, + {"QryDuplicate", 1}, + {"QrySERVFAIL", 1}, + {"Response", 12}, + }, + }, + { + "sockstat", + []fieldSet{ + {"TCP4Open", 118}, + {"UDP6Close", 112}, + {"UDP4Close", 333}, + {"TCP4Close", 119}, + {"TCP6Active", 2}, + {"UDP4Active", 2}, + {"UDP4RecvErr", 1}, + {"UDP4Open", 335}, + {"TCP4Active", 10}, + {"RawActive", 1}, + {"UDP6ConnFail", 112}, + {"TCP4Conn", 114}, + {"UDP6Active", 1}, + {"UDP6Open", 113}, + {"UDP4Conn", 333}, + {"UDP6SendErr", 112}, + {"RawOpen", 1}, + {"TCP4Accept", 6}, + {"TCP6Open", 2}, + }, + }, + { + "zonestat", + []fieldSet{ + {"NotifyOutv4", 8}, + {"NotifyInv4", 5}, + {"SOAOutv4", 5}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.counterType, func(t *testing.T) { + tags := map[string]string{ + "url": url, + "type": tc.counterType, + "source": host, + "port": port, + } + + fields := map[string]interface{}{} + + for _, val := range tc.values { + fields[val.fieldKey] = val.fieldValue + } + + acc.AssertContainsTaggedFields(t, "bind_counter", fields, tags) + }) + } + + // Subtest for memory stats + t.Run("memory", func(t *testing.T) { + tags := map[string]string{ + "url": url, + "source": host, + "port": port, + } + + fields := map[string]interface{}{ + "block_size": int64(13893632), + "context_size": int64(3685480), + "in_use": int64(3064368), + "lost": int64(0), + "total_use": int64(18206566), + } + acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags) + }) + + // Subtest for per-context memory stats + t.Run("memory_context", func(t *testing.T) { + assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) + assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + }) +} + +func TestBindXmlStatsV2(t *testing.T) { + ts := httptest.NewServer(http.FileServer(http.Dir("testdata"))) + url := ts.Listener.Addr().String() + host, port, _ := net.SplitHostPort(url) + defer ts.Close() + + b := Bind{ + Urls: []string{ts.URL + "/xml/v2"}, + GatherMemoryContexts: true, + GatherViews: true, + } + + var acc testutil.Accumulator + err := acc.GatherError(b.Gather) + + assert.Nil(t, err) + + // Use subtests for counters, since they are similar structure + type fieldSet struct { + fieldKey string + fieldValue int64 + } + + testCases := []struct { + counterType string + values []fieldSet + }{ + { + "opcode", + []fieldSet{ + {"UPDATE", 238}, + {"QUERY", 102312374}, + }, + }, + { + "qtype", + []fieldSet{ + {"ANY", 7}, + {"DNSKEY", 452}, + {"SSHFP", 2987}, + {"SOA", 100415}, + {"AAAA", 37786321}, + {"MX", 441155}, + {"IXFR", 157}, + {"CNAME", 531}, + {"NS", 1999}, + {"TXT", 34628}, + {"A", 58951432}, + {"SRV", 741082}, + {"PTR", 4211487}, + {"NAPTR", 39137}, + {"DS", 584}, + }, + }, + { + "nsstat", + []fieldSet{ + {"XfrReqDone", 157}, + {"ReqEdns0", 441758}, + {"ReqTSIG", 0}, + {"UpdateRespFwd", 0}, + {"RespEDNS0", 441748}, + {"QryDropped", 16}, + {"RPZRewrites", 0}, + {"XfrRej", 0}, + {"RecQryRej", 0}, + {"QryNxrrset", 24423133}, + {"QryFORMERR", 0}, + {"ReqTCP", 1548156}, + {"UpdateDone", 0}, + {"QrySERVFAIL", 14422}, + {"QryRecursion", 2104239}, + {"Requestv4", 102312611}, + {"UpdateFwdFail", 0}, + {"QryReferral", 3}, + {"Response", 102301560}, + {"RespTSIG", 0}, + {"QrySuccess", 63811668}, + {"QryFailure", 0}, + {"RespSIG0", 0}, + {"ReqSIG0", 0}, + {"UpdateRej", 238}, + {"QryAuthAns", 72180718}, + {"UpdateFail", 0}, + {"QryDuplicate", 10879}, + {"RateDropped", 0}, + {"QryNoauthAns", 30106182}, + {"QryNXDOMAIN", 14052096}, + {"ReqBadSIG", 0}, + {"UpdateReqFwd", 0}, + {"RateSlipped", 0}, + {"TruncatedResp", 3787}, + {"Requestv6", 1}, + {"UpdateBadPrereq", 0}, + {"AuthQryRej", 0}, + {"ReqBadEDNSVer", 0}, + }, + }, + { + "sockstat", + []fieldSet{ + {"FdwatchBindFail", 0}, + {"UDP6Open", 238269}, + {"UDP6SendErr", 238250}, + {"TCP4ConnFail", 0}, + {"TCP4Conn", 590}, + {"TCP6AcceptFail", 0}, + {"UDP4SendErr", 0}, + {"FDwatchConn", 0}, + {"TCP4RecvErr", 1}, + {"TCP4OpenFail", 0}, + {"UDP4OpenFail", 0}, + {"UDP6OpenFail", 0}, + {"TCP4Close", 1548268}, + {"TCP6BindFail", 0}, + {"TCP4AcceptFail", 0}, + {"UnixConn", 0}, + {"UDP4Open", 3765532}, + {"TCP6Close", 0}, + {"FDwatchRecvErr", 0}, + {"UDP4Conn", 3764828}, + {"UnixConnFail", 0}, + {"TCP6Conn", 0}, + {"TCP6OpenFail", 0}, + {"TCP6SendErr", 0}, + {"TCP6RecvErr", 0}, + {"FDwatchSendErr", 0}, + {"UDP4RecvErr", 1650}, + {"UDP4ConnFail", 0}, + {"UDP6Close", 238267}, + {"FDWatchClose", 0}, + {"TCP4Accept", 1547672}, + {"UnixAccept", 0}, + {"TCP4Open", 602}, + {"UDP4BindFail", 219}, + {"UDP6ConnFail", 238250}, + {"UnixClose", 0}, + {"TCP4BindFail", 0}, + {"UnixOpenFail", 0}, + {"UDP6BindFail", 16}, + {"UnixOpen", 0}, + {"UnixAcceptFail", 0}, + {"UnixRecvErr", 0}, + {"UDP6RecvErr", 0}, + {"TCP6ConnFail", 0}, + {"FDwatchConnFail", 0}, + {"TCP4SendErr", 0}, + {"UDP4Close", 3765528}, + {"UnixSendErr", 0}, + {"TCP6Open", 2}, + {"UDP6Conn", 1}, + {"TCP6Accept", 0}, + {"UnixBindFail", 0}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.counterType, func(t *testing.T) { + tags := map[string]string{ + "url": url, + "type": tc.counterType, + "source": host, + "port": port, + } + + fields := map[string]interface{}{} + + for _, val := range tc.values { + fields[val.fieldKey] = val.fieldValue + } + + acc.AssertContainsTaggedFields(t, "bind_counter", fields, tags) + }) + } + + // Subtest for memory stats + t.Run("memory", func(t *testing.T) { + tags := map[string]string{ + "url": url, + "source": host, + "port": port, + } + + fields := map[string]interface{}{ + "block_size": int64(77070336), + "context_size": int64(6663840), + "in_use": int64(20772579), + "lost": int64(0), + "total_use": int64(81804609), + } + + acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags) + }) + + // Subtest for per-context memory stats + t.Run("memory_context", func(t *testing.T) { + assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) + assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + }) +} + +func TestBindXmlStatsV3(t *testing.T) { + ts := httptest.NewServer(http.FileServer(http.Dir("testdata"))) + url := ts.Listener.Addr().String() + host, port, _ := net.SplitHostPort(url) + defer ts.Close() + + b := Bind{ + Urls: []string{ts.URL + "/xml/v3"}, + GatherMemoryContexts: true, + GatherViews: true, + } + + var acc testutil.Accumulator + err := acc.GatherError(b.Gather) + + assert.Nil(t, err) + + // Use subtests for counters, since they are similar structure + type fieldSet struct { + fieldKey string + fieldValue int64 + } + + testCases := []struct { + counterType string + values []fieldSet + }{ + { + "opcode", + []fieldSet{ + {"NOTIFY", 0}, + {"UPDATE", 0}, + {"IQUERY", 0}, + {"QUERY", 74941}, + {"STATUS", 0}, + }, + }, + { + "qtype", + []fieldSet{ + {"ANY", 22}, + {"SOA", 18}, + {"AAAA", 5735}, + {"MX", 618}, + {"NS", 373}, + {"TXT", 970}, + {"A", 63672}, + {"SRV", 139}, + {"PTR", 3393}, + {"RRSIG", 1}, + }, + }, + { + "nsstat", + []fieldSet{ + {"DNS64", 0}, + {"ExpireOpt", 0}, + {"NSIDOpt", 0}, + {"OtherOpt", 59}, + {"XfrReqDone", 0}, + {"ReqEdns0", 9250}, + {"ReqTSIG", 0}, + {"UpdateRespFwd", 0}, + {"RespEDNS0", 9250}, + {"QryDropped", 11}, + {"RPZRewrites", 0}, + {"XfrRej", 0}, + {"RecQryRej", 35}, + {"QryNxrrset", 2452}, + {"QryFORMERR", 0}, + {"ReqTCP", 260}, + {"QryTCP", 258}, + {"QryUDP", 74648}, + {"UpdateDone", 0}, + {"QrySERVFAIL", 122}, + {"QryRecursion", 53750}, + {"RecursClients", 0}, + {"Requestv4", 74942}, + {"UpdateFwdFail", 0}, + {"QryReferral", 0}, + {"Response", 63264}, + {"RespTSIG", 0}, + {"QrySuccess", 49044}, + {"QryFailure", 35}, + {"RespSIG0", 0}, + {"ReqSIG0", 0}, + {"UpdateRej", 0}, + {"QryAuthAns", 2752}, + {"UpdateFail", 0}, + {"QryDuplicate", 11667}, + {"RateDropped", 0}, + {"QryNoauthAns", 60354}, + {"QryNXDOMAIN", 11610}, + {"ReqBadSIG", 0}, + {"UpdateReqFwd", 0}, + {"RateSlipped", 0}, + {"TruncatedResp", 365}, + {"Requestv6", 0}, + {"UpdateBadPrereq", 0}, + {"AuthQryRej", 0}, + {"ReqBadEDNSVer", 0}, + {"SitBadSize", 0}, + {"SitBadTime", 0}, + {"SitMatch", 0}, + {"SitNew", 0}, + {"SitNoMatch", 0}, + {"SitOpt", 0}, + {"TruncatedResp", 365}, + }, + }, + { + "sockstat", + []fieldSet{ + {"FDwatchConnFail", 0}, + {"UnixClose", 0}, + {"TCP6OpenFail", 0}, + {"TCP6Active", 0}, + {"UDP4RecvErr", 14}, + {"TCP6Conn", 0}, + {"FDWatchClose", 0}, + {"TCP4ConnFail", 0}, + {"UnixConn", 0}, + {"UnixSendErr", 0}, + {"UDP6Close", 0}, + {"UnixOpen", 0}, + {"UDP4Conn", 92535}, + {"TCP4Close", 336}, + {"UnixAcceptFail", 0}, + {"UnixAccept", 0}, + {"TCP6AcceptFail", 0}, + {"UDP6Open", 0}, + {"UDP6BindFail", 0}, + {"UDP6RecvErr", 0}, + {"RawOpenFail", 0}, + {"TCP4Accept", 293}, + {"UDP6SendErr", 0}, + {"UDP6Conn", 0}, + {"TCP4SendErr", 0}, + {"UDP4BindFail", 1}, + {"UDP4Active", 4}, + {"TCP4Active", 297}, + {"UnixConnFail", 0}, + {"UnixOpenFail", 0}, + {"UDP6ConnFail", 0}, + {"TCP6Accept", 0}, + {"UnixRecvErr", 0}, + {"RawActive", 1}, + {"UDP6OpenFail", 0}, + {"RawClose", 0}, + {"UnixBindFail", 0}, + {"UnixActive", 0}, + {"FdwatchBindFail", 0}, + {"UDP4SendErr", 0}, + {"RawRecvErr", 0}, + {"TCP6Close", 0}, + {"FDwatchRecvErr", 0}, + {"TCP4BindFail", 0}, + {"TCP4AcceptFail", 0}, + {"TCP4OpenFail", 0}, + {"UDP4Open", 92542}, + {"UDP4ConnFail", 0}, + {"TCP4Conn", 44}, + {"TCP6ConnFail", 0}, + {"FDwatchConn", 0}, + {"UDP6Active", 0}, + {"RawOpen", 1}, + {"TCP6BindFail", 0}, + {"UDP4Close", 92538}, + {"TCP6Open", 0}, + {"TCP6SendErr", 0}, + {"TCP4Open", 48}, + {"FDwatchSendErr", 0}, + {"TCP6RecvErr", 0}, + {"UDP4OpenFail", 0}, + {"TCP4RecvErr", 0}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.counterType, func(t *testing.T) { + tags := map[string]string{ + "url": url, + "type": tc.counterType, + "source": host, + "port": port, + } + + fields := map[string]interface{}{} + + for _, val := range tc.values { + fields[val.fieldKey] = val.fieldValue + } + + acc.AssertContainsTaggedFields(t, "bind_counter", fields, tags) + }) + } + + // Subtest for memory stats + t.Run("memory", func(t *testing.T) { + tags := map[string]string{ + "url": url, + "source": host, + "port": port, + } + + fields := map[string]interface{}{ + "block_size": int64(45875200), + "context_size": int64(10037400), + "in_use": int64(6000232), + "lost": int64(0), + "total_use": int64(777821909), + } + + acc.AssertContainsTaggedFields(t, "bind_memory", fields, tags) + }) + + // Subtest for per-context memory stats + t.Run("memory_context", func(t *testing.T) { + assert.True(t, acc.HasInt64Field("bind_memory_context", "total")) + assert.True(t, acc.HasInt64Field("bind_memory_context", "in_use")) + }) +} + +func TestBindUnparseableURL(t *testing.T) { + b := Bind{ + Urls: []string{"://example.com"}, + } + + var acc testutil.Accumulator + err := acc.GatherError(b.Gather) + assert.Contains(t, err.Error(), "Unable to parse address") +} diff --git a/plugins/inputs/bind/json_stats.go b/plugins/inputs/bind/json_stats.go new file mode 100644 index 000000000..87b6065e2 --- /dev/null +++ b/plugins/inputs/bind/json_stats.go @@ -0,0 +1,176 @@ +package bind + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +type jsonStats struct { + OpCodes map[string]int + QTypes map[string]int + RCodes map[string]int + ZoneStats map[string]int + NSStats map[string]int + SockStats map[string]int + Views map[string]jsonView + Memory jsonMemory +} + +type jsonMemory struct { + TotalUse int64 + InUse int64 + BlockSize int64 + ContextSize int64 + Lost int64 + Contexts []struct { + Id string + Name string + Total int64 + InUse int64 + } +} + +type jsonView struct { + Resolver map[string]map[string]int +} + +// addJSONCounter adds a counter array to a Telegraf Accumulator, with the specified tags. +func addJSONCounter(acc telegraf.Accumulator, commonTags map[string]string, stats map[string]int) { + grouper := metric.NewSeriesGrouper() + ts := time.Now() + for name, value := range stats { + if commonTags["type"] == "opcode" && strings.HasPrefix(name, "RESERVED") { + continue + } + + tags := make(map[string]string) + + // Create local copy of tags since maps are reference types + for k, v := range commonTags { + tags[k] = v + } + + grouper.Add("bind_counter", tags, ts, name, value) + } + + //Add grouped metrics + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } +} + +// addStatsJson walks a jsonStats struct and adds the values to the telegraf.Accumulator. +func (b *Bind) addStatsJSON(stats jsonStats, acc telegraf.Accumulator, urlTag string) { + grouper := metric.NewSeriesGrouper() + ts := time.Now() + tags := map[string]string{"url": urlTag} + host, port, _ := net.SplitHostPort(urlTag) + tags["source"] = host + tags["port"] = port + + // Opcodes + tags["type"] = "opcode" + addJSONCounter(acc, tags, stats.OpCodes) + + // RCodes stats + tags["type"] = "rcode" + addJSONCounter(acc, tags, stats.RCodes) + + // Query RDATA types + tags["type"] = "qtype" + addJSONCounter(acc, tags, stats.QTypes) + + // Nameserver stats + tags["type"] = "nsstat" + addJSONCounter(acc, tags, stats.NSStats) + + // Socket statistics + tags["type"] = "sockstat" + addJSONCounter(acc, tags, stats.SockStats) + + // Zonestats + tags["type"] = "zonestat" + addJSONCounter(acc, tags, stats.ZoneStats) + + // Memory stats + fields := map[string]interface{}{ + "total_use": stats.Memory.TotalUse, + "in_use": stats.Memory.InUse, + "block_size": stats.Memory.BlockSize, + "context_size": stats.Memory.ContextSize, + "lost": stats.Memory.Lost, + } + acc.AddGauge("bind_memory", fields, map[string]string{"url": urlTag, "source": host, "port": port}) + + // Detailed, per-context memory stats + if b.GatherMemoryContexts { + for _, c := range stats.Memory.Contexts { + tags := map[string]string{"url": urlTag, "id": c.Id, "name": c.Name, "source": host, "port": port} + fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} + + acc.AddGauge("bind_memory_context", fields, tags) + } + } + + // Detailed, per-view stats + if b.GatherViews { + for vName, view := range stats.Views { + for cntrType, counters := range view.Resolver { + for cntrName, value := range counters { + tags := map[string]string{ + "url": urlTag, + "source": host, + "port": port, + "view": vName, + "type": cntrType, + } + + grouper.Add("bind_counter", tags, ts, cntrName, value) + } + } + } + } + + //Add grouped metrics + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } +} + +// readStatsJSON takes a base URL to probe, and requests the individual statistics blobs that we +// are interested in. These individual blobs have a combined size which is significantly smaller +// than if we requested everything at once (e.g. taskmgr and socketmgr can be omitted). +func (b *Bind) readStatsJSON(addr *url.URL, acc telegraf.Accumulator) error { + var stats jsonStats + + // Progressively build up full jsonStats struct by parsing the individual HTTP responses + for _, suffix := range [...]string{"/server", "/net", "/mem"} { + scrapeUrl := addr.String() + suffix + + resp, err := client.Get(scrapeUrl) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status) + } + + if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("Unable to decode JSON blob: %s", err) + } + } + + b.addStatsJSON(stats, acc, addr.Host) + return nil +} diff --git a/plugins/inputs/bind/testdata/json/v1/mem b/plugins/inputs/bind/testdata/json/v1/mem new file mode 100644 index 000000000..8872344e1 --- /dev/null +++ b/plugins/inputs/bind/testdata/json/v1/mem @@ -0,0 +1,133 @@ +{ + "json-stats-version":"1.2", + "boot-time":"2017-07-28T13:24:53Z", + "config-time":"2017-07-28T13:24:53Z", + "current-time":"2017-07-28T15:33:07Z", + "memory":{ + "TotalUse":18206566, + "InUse":3064368, + "BlockSize":13893632, + "ContextSize":3685480, + "Lost":0, + "contexts":[ + { + "id":"0x55fb2e042de0", + "name":"main", + "references":202, + "total":2693003, + "inuse":1454904, + "maxinuse":1508072, + "blocksize":786432, + "pools":40, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x55fb2e0507e0", + "name":"dst", + "references":1, + "total":387478, + "inuse":91776, + "maxinuse":97208, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x55fb2e0938e0", + "name":"zonemgr-pool", + "references":113, + "total":742986, + "inuse":143776, + "maxinuse":313961, + "blocksize":262144, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d00017d0", + "name":"threadkey", + "references":1, + "total":0, + "inuse":0, + "maxinuse":0, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d00475f0", + "name":"client", + "references":3, + "total":267800, + "inuse":8760, + "maxinuse":8760, + "blocksize":262144, + "pools":2, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d00dfca0", + "name":"cache", + "references":8, + "total":288938, + "inuse":83650, + "maxinuse":83842, + "blocksize":262144, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d00eaa30", + "name":"cache_heap", + "references":18, + "total":393216, + "inuse":132096, + "maxinuse":132096, + "blocksize":262144, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d01094e0", + "name":"res0", + "references":1, + "total":262144, + "inuse":0, + "maxinuse":22048, + "blocksize":262144, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d0114270", + "name":"res1", + "references":1, + "total":0, + "inuse":0, + "maxinuse":0, + "blocksize":0, + "pools":0, + "hiwater":0, + "lowater":0 + }, + { + "id":"0x7f19d011f000", + "name":"res2", + "references":1, + "total":0, + "inuse":0, + "maxinuse":0, + "blocksize":0, + "pools":0, + "hiwater":0, + "lowater":0 + } + ] + } +} \ No newline at end of file diff --git a/plugins/inputs/bind/testdata/json/v1/net b/plugins/inputs/bind/testdata/json/v1/net new file mode 100644 index 000000000..0bbd41429 --- /dev/null +++ b/plugins/inputs/bind/testdata/json/v1/net @@ -0,0 +1,241 @@ +{ + "json-stats-version":"1.2", + "boot-time":"2017-07-28T13:24:53Z", + "config-time":"2017-07-28T13:24:53Z", + "current-time":"2017-07-28T15:33:07Z", + "sockstats":{ + "UDP4Open":335, + "UDP6Open":113, + "TCP4Open":118, + "TCP6Open":2, + "RawOpen":1, + "UDP4Close":333, + "UDP6Close":112, + "TCP4Close":119, + "UDP6ConnFail":112, + "UDP4Conn":333, + "TCP4Conn":114, + "TCP4Accept":6, + "UDP6SendErr":112, + "UDP4RecvErr":1, + "UDP4Active":2, + "UDP6Active":1, + "TCP4Active":10, + "TCP6Active":2, + "RawActive":1 + }, + "socketmgr":{ + "sockets":[ + { + "id":"0x7f19dd849010", + "references":1, + "type":"not-initialized", + "local-address":"", + "states":[ + "bound" + ] + }, + { + "id":"0x7f19dd849268", + "references":1, + "type":"tcp", + "local-address":"0.0.0.0#8053", + "states":[ + "listener", + "bound" + ] + }, + { + "id":"0x7f19dd849718", + "references":2, + "type":"udp", + "local-address":"::#53", + "states":[ + "bound" + ] + }, + { + "id":"0x7f19dd849970", + "references":2, + "type":"tcp", + "local-address":"::#53", + "states":[ + "listener", + "bound" + ] + }, + { + "id":"0x7f19dd849bc8", + "references":2, + "type":"udp", + "local-address":"127.0.0.1#53", + "states":[ + "bound" + ] + }, + { + "id":"0x7f19dd6f4010", + "references":2, + "type":"tcp", + "local-address":"127.0.0.1#53", + "states":[ + "listener", + "bound" + ] + }, + { + "id":"0x7f19dd6f4718", + "references":1, + "type":"tcp", + "local-address":"127.0.0.1#953", + "states":[ + "listener", + "bound" + ] + }, + { + "id":"0x7f19dd6f4bc8", + "references":1, + "type":"tcp", + "local-address":"::1#953", + "states":[ + "listener", + "bound" + ] + }, + { + "id":"0x7f19d4fb7970", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fb7bc8", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fc7010", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fc74c0", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fc7718", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fc7bc8", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd1010", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd1268", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd14c0", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd1718", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd1970", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd1bc8", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd9010", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fda4c0", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd9bc8", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fda268", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd9970", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fda010", + "references":1, + "type":"udp", + "states":[ + ] + }, + { + "id":"0x7f19d4fd9718", + "references":1, + "type":"udp", + "states":[ + ] + } + ] + } +} \ No newline at end of file diff --git a/plugins/inputs/bind/testdata/json/v1/server b/plugins/inputs/bind/testdata/json/v1/server new file mode 100644 index 000000000..060fab6b1 --- /dev/null +++ b/plugins/inputs/bind/testdata/json/v1/server @@ -0,0 +1,172 @@ +{ + "json-stats-version":"1.2", + "boot-time":"2017-07-28T13:24:53Z", + "config-time":"2017-07-28T13:24:53Z", + "current-time":"2017-07-28T15:33:07Z", + "opcodes":{ + "QUERY":13, + "IQUERY":0, + "STATUS":0, + "RESERVED3":0, + "NOTIFY":0, + "UPDATE":0, + "RESERVED6":0, + "RESERVED7":0, + "RESERVED8":0, + "RESERVED9":0, + "RESERVED10":0, + "RESERVED11":0, + "RESERVED12":0, + "RESERVED13":0, + "RESERVED14":0, + "RESERVED15":0 + }, + "rcodes":{ + "NOERROR":1732, + "FORMERR":0, + "SERVFAIL":6, + "NXDOMAIN":200, + "NOTIMP":0, + "REFUSED":0, + "YXDOMAIN":0, + "YXRRSET":0, + "NXRRSET":0, + "NOTAUTH":0, + "NOTZONE":0, + "RESERVED11":0, + "RESERVED12":0, + "RESERVED13":0, + "RESERVED14":0, + "RESERVED15":0, + "BADVERS":0, + "17":0, + "18":0, + "19":0, + "20":0, + "21":0, + "22":0, + "BADCOOKIE":0 + }, + "qtypes":{ + "A":2, + "PTR":7, + "AAAA":2, + "SRV":2 + }, + "nsstats":{ + "Requestv4":13, + "Response":12, + "QrySuccess":6, + "QryAuthAns":1, + "QryNoauthAns":10, + "QryNxrrset":1, + "QrySERVFAIL":1, + "QryNXDOMAIN":4, + "QryRecursion":12, + "QryDuplicate":1, + "QryUDP":13 + }, + "zonestats":{ + "NotifyOutv4":8, + "NotifyInv4":5, + "SOAOutv4":5 + }, + "views":{ + "_default":{ + "resolver":{ + "stats":{ + "Queryv4":447, + "Queryv6":112, + "Responsev4":444, + "NXDOMAIN":3, + "Truncated":114, + "Retry":242, + "QueryTimeout":3, + "GlueFetchv4":61, + "GlueFetchv6":68, + "GlueFetchv6Fail":24, + "ValAttempt":36, + "ValOk":27, + "ValNegOk":9, + "QryRTT100":287, + "QryRTT500":152, + "QryRTT800":4, + "BucketSize":31 + }, + "qtypes":{ + "A":220, + "NS":19, + "PTR":22, + "AAAA":233, + "SRV":14, + "DS":27, + "DNSKEY":24 + }, + "cache":{ + "A":150, + "NS":44, + "PTR":3, + "AAAA":104, + "DS":23, + "RRSIG":94, + "NSEC":8, + "DNSKEY":7, + "!AAAA":23, + "!DS":5, + "NXDOMAIN":1 + }, + "cachestats":{ + "CacheHits":1675, + "CacheMisses":44, + "QueryHits":17, + "QueryMisses":12, + "DeleteLRU":0, + "DeleteTTL":16, + "CacheNodes":219, + "CacheBuckets":129, + "TreeMemTotal":551082, + "TreeMemInUse":150704, + "HeapMemMax":132096, + "HeapMemTotal":393216, + "HeapMemInUse":132096 + }, + "adb":{ + "nentries":1021, + "entriescnt":254, + "nnames":1021, + "namescnt":195 + } + } + }, + "_bind":{ + "resolver":{ + "stats":{ + "BucketSize":31 + }, + "qtypes":{ + }, + "cache":{ + }, + "cachestats":{ + "CacheHits":0, + "CacheMisses":0, + "QueryHits":0, + "QueryMisses":0, + "DeleteLRU":0, + "DeleteTTL":0, + "CacheNodes":0, + "CacheBuckets":64, + "TreeMemTotal":287392, + "TreeMemInUse":29608, + "HeapMemMax":1024, + "HeapMemTotal":262144, + "HeapMemInUse":1024 + }, + "adb":{ + "nentries":1021, + "nnames":1021 + } + } + } + } +} diff --git a/plugins/inputs/bind/testdata/xml/v2 b/plugins/inputs/bind/testdata/xml/v2 new file mode 100644 index 000000000..e16c53dbc --- /dev/null +++ b/plugins/inputs/bind/testdata/xml/v2 @@ -0,0 +1,926 @@ + + + + + + + + _default + + A + 2936881 + + + NS + 28994 + + + CNAME + 26 + + + SOA + 15131 + + + PTR + 47924 + + + MX + 1884 + + + TXT + 6486 + + + AAAA + 949781 + + + SRV + 14740 + + + NAPTR + 1606 + + + DS + 25 + + + SSHFP + 185 + + + DNSKEY + 13 + + + ANY + 1 + + + Queryv4 + 3765426 + + + Queryv6 + 238251 + + + Responsev4 + 3716142 + + + Responsev6 + 1 + + + NXDOMAIN + 100052 + + + SERVFAIL + 5894 + + + FORMERR + 2041 + + + OtherError + 14801 + + + EDNS0Fail + 2615 + + + Mismatch + 0 + + + Truncated + 598 + + + Lame + 117 + + + Retry + 383343 + + + QueryAbort + 0 + + + QuerySockFail + 0 + + + QueryTimeout + 50874 + + + GlueFetchv4 + 260749 + + + GlueFetchv6 + 225310 + + + GlueFetchv4Fail + 5756 + + + GlueFetchv6Fail + 141500 + + + ValAttempt + 0 + + + ValOk + 0 + + + ValNegOk + 0 + + + ValFail + 0 + + + QryRTT10 + 458176 + + + QryRTT100 + 3010133 + + + QryRTT500 + 244312 + + + QryRTT800 + 1275 + + + QryRTT1600 + 361 + + + QryRTT1600+ + 236 + + + + A + 2700 + + + NS + 759 + + + CNAME + 486 + + + SOA + 2 + + + PTR + 6 + + + TXT + 2 + + + AAAA + 629 + + + SRV + 1 + + + DS + 48 + + + RRSIG + 203 + + + NSEC + 22 + + + DNSKEY + 1 + + + !A + 6 + + + !SOA + 26 + + + !AAAA + 84 + + + !NAPTR + 3 + + + NXDOMAIN + 143 + + + + + _bind + + Queryv4 + 0 + + + Queryv6 + 0 + + + Responsev4 + 0 + + + Responsev6 + 0 + + + NXDOMAIN + 0 + + + SERVFAIL + 0 + + + FORMERR + 0 + + + OtherError + 0 + + + EDNS0Fail + 0 + + + Mismatch + 0 + + + Truncated + 0 + + + Lame + 0 + + + Retry + 0 + + + QueryAbort + 0 + + + QuerySockFail + 0 + + + QueryTimeout + 0 + + + GlueFetchv4 + 0 + + + GlueFetchv6 + 0 + + + GlueFetchv4Fail + 0 + + + GlueFetchv6Fail + 0 + + + ValAttempt + 0 + + + ValOk + 0 + + + ValNegOk + 0 + + + ValFail + 0 + + + QryRTT10 + 0 + + + QryRTT100 + 0 + + + QryRTT500 + 0 + + + QryRTT800 + 0 + + + QryRTT1600 + 0 + + + QryRTT1600+ + 0 + + + + + + 2016-10-02T18:45:00Z + 2016-10-23T19:27:48Z + + + QUERY + 102312374 + + + UPDATE + 238 + + + + + A + 58951432 + + + NS + 1999 + + + CNAME + 531 + + + SOA + 100415 + + + PTR + 4211487 + + + MX + 441155 + + + TXT + 34628 + + + AAAA + 37786321 + + + SRV + 741082 + + + NAPTR + 39137 + + + DS + 584 + + + SSHFP + 2987 + + + DNSKEY + 452 + + + IXFR + 157 + + + ANY + 7 + + + + Requestv4 + 102312611 + + + Requestv6 + 1 + + + ReqEdns0 + 441758 + + + ReqBadEDNSVer + 0 + + + ReqTSIG + 0 + + + ReqSIG0 + 0 + + + ReqBadSIG + 0 + + + ReqTCP + 1548156 + + + AuthQryRej + 0 + + + RecQryRej + 0 + + + XfrRej + 0 + + + UpdateRej + 238 + + + Response + 102301560 + + + TruncatedResp + 3787 + + + RespEDNS0 + 441748 + + + RespTSIG + 0 + + + RespSIG0 + 0 + + + QrySuccess + 63811668 + + + QryAuthAns + 72180718 + + + QryNoauthAns + 30106182 + + + QryReferral + 3 + + + QryNxrrset + 24423133 + + + QrySERVFAIL + 14422 + + + QryFORMERR + 0 + + + QryNXDOMAIN + 14052096 + + + QryRecursion + 2104239 + + + QryDuplicate + 10879 + + + QryDropped + 16 + + + QryFailure + 0 + + + XfrReqDone + 157 + + + UpdateReqFwd + 0 + + + UpdateRespFwd + 0 + + + UpdateFwdFail + 0 + + + UpdateDone + 0 + + + UpdateFail + 0 + + + UpdateBadPrereq + 0 + + + RPZRewrites + 0 + + + RateDropped + 0 + + + RateSlipped + 0 + + + NotifyOutv4 + 663 + + + NotifyOutv6 + 0 + + + NotifyInv4 + 0 + + + NotifyInv6 + 0 + + + NotifyRej + 0 + + + SOAOutv4 + 386 + + + SOAOutv6 + 0 + + + AXFRReqv4 + 0 + + + AXFRReqv6 + 0 + + + IXFRReqv4 + 0 + + + IXFRReqv6 + 0 + + + XfrSuccess + 0 + + + XfrFail + 0 + + + Mismatch + 2 + + + UDP4Open + 3765532 + + + UDP6Open + 238269 + + + TCP4Open + 602 + + + TCP6Open + 2 + + + UnixOpen + 0 + + + UDP4OpenFail + 0 + + + UDP6OpenFail + 0 + + + TCP4OpenFail + 0 + + + TCP6OpenFail + 0 + + + UnixOpenFail + 0 + + + UDP4Close + 3765528 + + + UDP6Close + 238267 + + + TCP4Close + 1548268 + + + TCP6Close + 0 + + + UnixClose + 0 + + + FDWatchClose + 0 + + + UDP4BindFail + 219 + + + UDP6BindFail + 16 + + + TCP4BindFail + 0 + + + TCP6BindFail + 0 + + + UnixBindFail + 0 + + + FdwatchBindFail + 0 + + + UDP4ConnFail + 0 + + + UDP6ConnFail + 238250 + + + TCP4ConnFail + 0 + + + TCP6ConnFail + 0 + + + UnixConnFail + 0 + + + FDwatchConnFail + 0 + + + UDP4Conn + 3764828 + + + UDP6Conn + 1 + + + TCP4Conn + 590 + + + TCP6Conn + 0 + + + UnixConn + 0 + + + FDwatchConn + 0 + + + TCP4AcceptFail + 0 + + + TCP6AcceptFail + 0 + + + UnixAcceptFail + 0 + + + TCP4Accept + 1547672 + + + TCP6Accept + 0 + + + UnixAccept + 0 + + + UDP4SendErr + 0 + + + UDP6SendErr + 238250 + + + TCP4SendErr + 0 + + + TCP6SendErr + 0 + + + UnixSendErr + 0 + + + FDwatchSendErr + 0 + + + UDP4RecvErr + 1650 + + + UDP6RecvErr + 0 + + + TCP4RecvErr + 1 + + + TCP6RecvErr + 0 + + + UnixRecvErr + 0 + + + FDwatchRecvErr + 0 + + + + + + 0x7f8a94e061d0 + main + 229 + 5002528 + 3662792 + 4848264 + 2359296 + 75 + 0 + 0 + + + 0x7f8a94e13830 + dst + 1 + 133486 + 96456 + 102346 + - + 0 + 0 + 0 + + + 0x7f8a94e401c0 + zonemgr-pool + 501 + 6339848 + 4384240 + 5734049 + 6029312 + 0 + 0 + 0 + + + + 81804609 + 20772579 + 77070336 + 6663840 + 0 + + + + + diff --git a/plugins/inputs/bind/testdata/xml/v3/mem b/plugins/inputs/bind/testdata/xml/v3/mem new file mode 100644 index 000000000..493708d7d --- /dev/null +++ b/plugins/inputs/bind/testdata/xml/v3/mem @@ -0,0 +1,142 @@ + + + + + 2017-07-21T11:53:28Z + 2017-07-21T11:53:28Z + 2017-07-25T23:47:08Z + + + + + + + 0x55fb2e042de0 + main + 202 + 2706043 + 1454904 + 1508072 + 786432 + 40 + 0 + 0 + + + 0x55fb2e0507e0 + dst + 1 + 387478 + 91776 + 97208 + - + 0 + 0 + 0 + + + 0x55fb2e0938e0 + zonemgr-pool + 113 + 742986 + 143776 + 313961 + 262144 + 0 + 0 + 0 + + + 0x7f19d00017d0 + threadkey + 1 + 0 + 0 + 0 + - + 0 + 0 + 0 + + + 0x7f19d00475f0 + client + 3 + 267800 + 8760 + 8760 + 262144 + 2 + 0 + 0 + + + 0x7f19d00dfca0 + cache + 8 + 288938 + 83650 + 83842 + 262144 + 0 + 0 + 0 + + + 0x7f19d00eaa30 + cache_heap + 18 + 393216 + 132096 + 132096 + 262144 + 0 + 0 + 0 + + + 0x7f19d01094e0 + res0 + 1 + 262144 + 0 + 22048 + 262144 + 0 + 0 + 0 + + + 0x7f19d0114270 + res1 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 0x7f19d011f000 + res2 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + 777821909 + 6000232 + 45875200 + 10037400 + 0 + + + diff --git a/plugins/inputs/bind/testdata/xml/v3/net b/plugins/inputs/bind/testdata/xml/v3/net new file mode 100644 index 000000000..50f713447 --- /dev/null +++ b/plugins/inputs/bind/testdata/xml/v3/net @@ -0,0 +1,156 @@ + + + + + 2017-07-21T11:53:28Z + 2017-07-21T11:53:28Z + 2017-07-25T23:47:08Z + + 92542 + 0 + 48 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 92538 + 0 + 336 + 0 + 0 + 0 + 0 + 1 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 92535 + 0 + 44 + 0 + 0 + 0 + 0 + 0 + 0 + 293 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 14 + 0 + 0 + 0 + 0 + 0 + 0 + 4 + 0 + 297 + 0 + 0 + 1 + + + + + + + + 0x7f19dd849010 + 1 + not-initialized + <unknown address, family 16> + + bound + + + + 0x7f19dd849268 + 1 + tcp + 0.0.0.0#8053 + + listener + bound + + + + 0x7f19dd849718 + 2 + udp + ::#53 + + bound + + + + 0x7f19dd849970 + 2 + tcp + ::#53 + + listener + bound + + + + 0x7f19dd849bc8 + 2 + udp + 127.0.0.1#53 + + bound + + + + 0x7f19dd6f4010 + 2 + tcp + 127.0.0.1#53 + + listener + bound + + + + 0x7f19dd6f4718 + 1 + tcp + 127.0.0.1#953 + + listener + bound + + + + 0x7f19dd6f4bc8 + 1 + tcp + ::1#953 + + listener + bound + + + + + diff --git a/plugins/inputs/bind/testdata/xml/v3/server b/plugins/inputs/bind/testdata/xml/v3/server new file mode 100644 index 000000000..0d9206c69 --- /dev/null +++ b/plugins/inputs/bind/testdata/xml/v3/server @@ -0,0 +1,328 @@ + + + + + 2017-07-21T11:53:28Z + 2017-07-21T11:53:28Z + 2017-07-25T23:47:08Z + + 74941 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + 63672 + 373 + 18 + 3393 + 618 + 970 + 5735 + 139 + 1 + 22 + + + 74942 + 0 + 9250 + 0 + 0 + 0 + 0 + 260 + 0 + 35 + 0 + 0 + 63264 + 365 + 9250 + 0 + 0 + 49044 + 2752 + 60354 + 0 + 2452 + 122 + 0 + 11610 + 53750 + 11667 + 11 + 35 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 74648 + 258 + 0 + 0 + 59 + 0 + 0 + 0 + 0 + 0 + 0 + + + 2 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + + + + 61568 + 9126 + 1249 + 286 + 942 + 3933 + 21 + 13749 + 1699 + + + 92573 + 0 + 92135 + 0 + 8182 + 318 + 0 + 0 + 0 + 0 + 42 + 12 + 800 + 0 + 0 + 0 + 0 + 490 + 1398 + 0 + 3 + 0 + 90256 + 67322 + 22850 + 6 + 0 + 45760 + 45543 + 743 + 75 + 0 + 0 + 31 + 34 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + A + 195 + + + NS + 42 + + + CNAME + 7 + + + PTR + 48 + + + MX + 7 + + + TXT + 6 + + + AAAA + 4 + + + DS + 97 + + + RRSIG + 258 + + + NSEC + 89 + + + DNSKEY + 60 + + + !DS + 29 + + + NXDOMAIN + 25 + + + + 1021 + 314 + 1021 + 316 + + + 1904593 + 96 + 336094 + 369336 + 0 + 47518 + 769 + 519 + 1464363 + 392128 + 828966 + 393216 + 132096 + 132096 + + + + + + 0 + + + 0 + + + 0 + + + 0 + + + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 31 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 0 + + + + 1021 + 0 + 1021 + 0 + + + 0 + 0 + 0 + 0 + 0 + 0 + 0 + 64 + 287392 + 29608 + 29608 + 262144 + 1024 + 1024 + + + + diff --git a/plugins/inputs/bind/xml_stats_v2.go b/plugins/inputs/bind/xml_stats_v2.go new file mode 100644 index 000000000..5e17851fb --- /dev/null +++ b/plugins/inputs/bind/xml_stats_v2.go @@ -0,0 +1,168 @@ +package bind + +import ( + "encoding/xml" + "fmt" + "net" + "net/http" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +type v2Root struct { + XMLName xml.Name + Version string `xml:"version,attr"` + Statistics v2Statistics `xml:"bind>statistics"` +} + +// Omitted branches: socketmgr, taskmgr +type v2Statistics struct { + Version string `xml:"version,attr"` + Views []struct { + // Omitted branches: zones + Name string `xml:"name"` + RdTypes []v2Counter `xml:"rdtype"` + ResStats []v2Counter `xml:"resstat"` + Caches []struct { + Name string `xml:"name,attr"` + RRSets []v2Counter `xml:"rrset"` + } `xml:"cache"` + } `xml:"views>view"` + Server struct { + OpCodes []v2Counter `xml:"requests>opcode"` + RdTypes []v2Counter `xml:"queries-in>rdtype"` + NSStats []v2Counter `xml:"nsstat"` + ZoneStats []v2Counter `xml:"zonestat"` + ResStats []v2Counter `xml:"resstat"` + SockStats []v2Counter `xml:"sockstat"` + } `xml:"server"` + Memory struct { + Contexts []struct { + // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater + Id string `xml:"id"` + Name string `xml:"name"` + Total int64 `xml:"total"` + InUse int64 `xml:"inuse"` + } `xml:"contexts>context"` + Summary struct { + TotalUse int64 + InUse int64 + BlockSize int64 + ContextSize int64 + Lost int64 + } `xml:"summary"` + } `xml:"memory"` +} + +// BIND statistics v2 counter struct used throughout +type v2Counter struct { + Name string `xml:"name"` + Value int `xml:"counter"` +} + +// addXMLv2Counter adds a v2Counter array to a Telegraf Accumulator, with the specified tags +func addXMLv2Counter(acc telegraf.Accumulator, commonTags map[string]string, stats []v2Counter) { + grouper := metric.NewSeriesGrouper() + ts := time.Now() + for _, c := range stats { + tags := make(map[string]string) + + // Create local copy of tags since maps are reference types + for k, v := range commonTags { + tags[k] = v + } + + grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + } + + //Add grouped metrics + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } +} + +// readStatsXMLv2 decodes a BIND9 XML statistics version 2 document. Unlike the XML v3 statistics +// format, the v2 format does not support broken-out subsets. +func (b *Bind) readStatsXMLv2(addr *url.URL, acc telegraf.Accumulator) error { + var stats v2Root + + resp, err := client.Get(addr.String()) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", addr, resp.Status) + } + + if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("Unable to decode XML document: %s", err) + } + + tags := map[string]string{"url": addr.Host} + host, port, _ := net.SplitHostPort(addr.Host) + tags["source"] = host + tags["port"] = port + + // Opcodes + tags["type"] = "opcode" + addXMLv2Counter(acc, tags, stats.Statistics.Server.OpCodes) + + // Query RDATA types + tags["type"] = "qtype" + addXMLv2Counter(acc, tags, stats.Statistics.Server.RdTypes) + + // Nameserver stats + tags["type"] = "nsstat" + addXMLv2Counter(acc, tags, stats.Statistics.Server.NSStats) + + // Zone stats + tags["type"] = "zonestat" + addXMLv2Counter(acc, tags, stats.Statistics.Server.ZoneStats) + + // Socket statistics + tags["type"] = "sockstat" + addXMLv2Counter(acc, tags, stats.Statistics.Server.SockStats) + + // Memory stats + fields := map[string]interface{}{ + "total_use": stats.Statistics.Memory.Summary.TotalUse, + "in_use": stats.Statistics.Memory.Summary.InUse, + "block_size": stats.Statistics.Memory.Summary.BlockSize, + "context_size": stats.Statistics.Memory.Summary.ContextSize, + "lost": stats.Statistics.Memory.Summary.Lost, + } + acc.AddGauge("bind_memory", fields, map[string]string{"url": addr.Host, "source": host, "port": port}) + + // Detailed, per-context memory stats + if b.GatherMemoryContexts { + for _, c := range stats.Statistics.Memory.Contexts { + tags := map[string]string{"url": addr.Host, "id": c.Id, "name": c.Name, "source": host, "port": port} + fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} + + acc.AddGauge("bind_memory_context", fields, tags) + } + } + + // Detailed, per-view stats + if b.GatherViews { + for _, v := range stats.Statistics.Views { + tags := map[string]string{"url": addr.Host, "view": v.Name} + + // Query RDATA types + tags["type"] = "qtype" + addXMLv2Counter(acc, tags, v.RdTypes) + + // Resolver stats + tags["type"] = "resstats" + addXMLv2Counter(acc, tags, v.ResStats) + } + } + + return nil +} diff --git a/plugins/inputs/bind/xml_stats_v3.go b/plugins/inputs/bind/xml_stats_v3.go new file mode 100644 index 000000000..89e4ea0b8 --- /dev/null +++ b/plugins/inputs/bind/xml_stats_v3.go @@ -0,0 +1,161 @@ +package bind + +import ( + "encoding/xml" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +// XML path: //statistics +// Omitted branches: socketmgr, taskmgr +type v3Stats struct { + Server v3Server `xml:"server"` + Views []v3View `xml:"views>view"` + Memory v3Memory `xml:"memory"` +} + +// XML path: //statistics/memory +type v3Memory struct { + Contexts []struct { + // Omitted nodes: references, maxinuse, blocksize, pools, hiwater, lowater + Id string `xml:"id"` + Name string `xml:"name"` + Total int64 `xml:"total"` + InUse int64 `xml:"inuse"` + } `xml:"contexts>context"` + Summary struct { + TotalUse int64 + InUse int64 + BlockSize int64 + ContextSize int64 + Lost int64 + } `xml:"summary"` +} + +// XML path: //statistics/server +type v3Server struct { + CounterGroups []v3CounterGroup `xml:"counters"` +} + +// XML path: //statistics/views/view +type v3View struct { + // Omitted branches: zones + Name string `xml:"name,attr"` + CounterGroups []v3CounterGroup `xml:"counters"` + Caches []struct { + Name string `xml:"name,attr"` + RRSets []struct { + Name string `xml:"name"` + Value int64 `xml:"counter"` + } `xml:"rrset"` + } `xml:"cache"` +} + +// Generic XML v3 doc fragment used in multiple places +type v3CounterGroup struct { + Type string `xml:"type,attr"` + Counters []struct { + Name string `xml:"name,attr"` + Value int64 `xml:",chardata"` + } `xml:"counter"` +} + +// addStatsXMLv3 walks a v3Stats struct and adds the values to the telegraf.Accumulator. +func (b *Bind) addStatsXMLv3(stats v3Stats, acc telegraf.Accumulator, hostPort string) { + grouper := metric.NewSeriesGrouper() + ts := time.Now() + host, port, _ := net.SplitHostPort(hostPort) + // Counter groups + for _, cg := range stats.Server.CounterGroups { + for _, c := range cg.Counters { + if cg.Type == "opcode" && strings.HasPrefix(c.Name, "RESERVED") { + continue + } + + tags := map[string]string{"url": hostPort, "source": host, "port": port, "type": cg.Type} + + grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + } + } + + // Memory stats + fields := map[string]interface{}{ + "total_use": stats.Memory.Summary.TotalUse, + "in_use": stats.Memory.Summary.InUse, + "block_size": stats.Memory.Summary.BlockSize, + "context_size": stats.Memory.Summary.ContextSize, + "lost": stats.Memory.Summary.Lost, + } + acc.AddGauge("bind_memory", fields, map[string]string{"url": hostPort, "source": host, "port": port}) + + // Detailed, per-context memory stats + if b.GatherMemoryContexts { + for _, c := range stats.Memory.Contexts { + tags := map[string]string{"url": hostPort, "source": host, "port": port, "id": c.Id, "name": c.Name} + fields := map[string]interface{}{"total": c.Total, "in_use": c.InUse} + + acc.AddGauge("bind_memory_context", fields, tags) + } + } + + // Detailed, per-view stats + if b.GatherViews { + for _, v := range stats.Views { + for _, cg := range v.CounterGroups { + for _, c := range cg.Counters { + tags := map[string]string{ + "url": hostPort, + "source": host, + "port": port, + "view": v.Name, + "type": cg.Type, + } + + grouper.Add("bind_counter", tags, ts, c.Name, c.Value) + } + } + } + } + + //Add grouped metrics + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } +} + +// readStatsXMLv3 takes a base URL to probe, and requests the individual statistics documents that +// we are interested in. These individual documents have a combined size which is significantly +// smaller than if we requested everything at once (e.g. taskmgr and socketmgr can be omitted). +func (b *Bind) readStatsXMLv3(addr *url.URL, acc telegraf.Accumulator) error { + var stats v3Stats + + // Progressively build up full v3Stats struct by parsing the individual HTTP responses + for _, suffix := range [...]string{"/server", "/net", "/mem"} { + scrapeUrl := addr.String() + suffix + + resp, err := client.Get(scrapeUrl) + if err != nil { + return err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status: %s", scrapeUrl, resp.Status) + } + + if err := xml.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("Unable to decode XML document: %s", err) + } + } + + b.addStatsXMLv3(stats, acc, addr.Host) + return nil +} diff --git a/plugins/inputs/burrow/README.md b/plugins/inputs/burrow/README.md index 19073a6ef..d30a054d6 100644 --- a/plugins/inputs/burrow/README.md +++ b/plugins/inputs/burrow/README.md @@ -50,7 +50,7 @@ Supported Burrow version: `1.x` # insecure_skip_verify = false ``` -### Partition Status mappings +### Group/Partition Status mappings * `OK` = 1 * `NOT_FOUND` = 2 @@ -66,9 +66,11 @@ Supported Burrow version: `1.x` * `burrow_group` (one event per each consumer group) - status (string, see Partition Status mappings) - status_code (int, `1..6`, see Partition status mappings) - - parition_count (int, `number of partitions`) + - partition_count (int, `number of partitions`) + - offset (int64, `total offset of all partitions`) - total_lag (int64, `totallag`) - lag (int64, `maxlag.current_lag || 0`) + - timestamp (int64, `end.timestamp`) * `burrow_partition` (one event per each topic partition) - status (string, see Partition Status mappings) diff --git a/plugins/inputs/burrow/burrow.go b/plugins/inputs/burrow/burrow.go index 91e3ffe15..f08563dbd 100644 --- a/plugins/inputs/burrow/burrow.go +++ b/plugins/inputs/burrow/burrow.go @@ -397,13 +397,11 @@ func (b *burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, ac partitionCount = len(r.Status.Partitions) } - // get max timestamp and offset from partitions list + // get max timestamp and total offset from partitions list offset := int64(0) timestamp := int64(0) for _, partition := range r.Status.Partitions { - if partition.End.Offset > offset { - offset = partition.End.Offset - } + offset += partition.End.Offset if partition.End.Timestamp > timestamp { timestamp = partition.End.Timestamp } @@ -434,6 +432,9 @@ func (b *burrow) genGroupStatusMetrics(r *apiResponse, cluster, group string, ac func (b *burrow) genGroupLagMetrics(r *apiResponse, cluster, group string, acc telegraf.Accumulator) { for _, partition := range r.Status.Partitions { + if !b.filterTopics.Match(partition.Topic) { + continue + } acc.AddFields( "burrow_partition", map[string]interface{}{ diff --git a/plugins/inputs/burrow/burrow_test.go b/plugins/inputs/burrow/burrow_test.go index 5ea85798a..cafbcb940 100644 --- a/plugins/inputs/burrow/burrow_test.go +++ b/plugins/inputs/burrow/burrow_test.go @@ -160,7 +160,7 @@ func TestBurrowGroup(t *testing.T) { "partition_count": 3, "total_lag": int64(0), "lag": int64(0), - "offset": int64(431323195), + "offset": int64(431323195 + 431322962 + 428636563), "timestamp": int64(1515609490008), }, } @@ -262,7 +262,7 @@ func TestFilterGroups(t *testing.T) { acc := &testutil.Accumulator{} plugin.Gather(acc) - require.Exactly(t, 4, len(acc.Metrics)) + require.Exactly(t, 1, len(acc.Metrics)) require.Empty(t, acc.Errors) } diff --git a/plugins/inputs/cassandra/README.md b/plugins/inputs/cassandra/README.md index 86c6a65a3..881bba3e0 100644 --- a/plugins/inputs/cassandra/README.md +++ b/plugins/inputs/cassandra/README.md @@ -39,19 +39,19 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - [How to monitor Cassandra performance metrics](https://www.datadoghq.com/blog/how-to-monitor-cassandra-performance-metrics) - [Cassandra Documentation](http://docs.datastax.com/en/cassandra/3.x/cassandra/operations/monitoringCassandraTOC.html) -####measurement = javaGarbageCollector +#### measurement = javaGarbageCollector - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionTime - /java.lang:type=GarbageCollector,name=ConcurrentMarkSweep/CollectionCount - /java.lang:type=GarbageCollector,name=ParNew/CollectionTime - /java.lang:type=GarbageCollector,name=ParNew/CollectionCount -####measurement = javaMemory +#### measurement = javaMemory - /java.lang:type=Memory/HeapMemoryUsage - /java.lang:type=Memory/NonHeapMemoryUsage -####measurement = cassandraCache +#### measurement = cassandraCache - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Hits - /org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests @@ -64,11 +64,11 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Size - /org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity -####measurement = cassandraClient +#### measurement = cassandraClient - /org.apache.cassandra.metrics:type=Client,name=connectedNativeClients -####measurement = cassandraClientRequest +#### measurement = cassandraClientRequest - /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=TotalLatency - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=TotalLatency @@ -81,24 +81,24 @@ Here is a list of metrics that might be useful to monitor your cassandra cluster - /org.apache.cassandra.metrics:type=ClientRequest,scope=Read,name=Failures - /org.apache.cassandra.metrics:type=ClientRequest,scope=Write,name=Failures -####measurement = cassandraCommitLog +#### measurement = cassandraCommitLog - /org.apache.cassandra.metrics:type=CommitLog,name=PendingTasks - /org.apache.cassandra.metrics:type=CommitLog,name=TotalCommitLogSize -####measurement = cassandraCompaction +#### measurement = cassandraCompaction - /org.apache.cassandra.metrics:type=Compaction,name=CompletedTasks - /org.apache.cassandra.metrics:type=Compaction,name=PendingTasks - /org.apache.cassandra.metrics:type=Compaction,name=TotalCompactionsCompleted - /org.apache.cassandra.metrics:type=Compaction,name=BytesCompacted -####measurement = cassandraStorage +#### measurement = cassandraStorage - /org.apache.cassandra.metrics:type=Storage,name=Load -- /org.apache.cassandra.metrics:type=Storage,name=Exceptions +- /org.apache.cassandra.metrics:type=Storage,name=Exceptions -####measurement = cassandraTable +#### measurement = cassandraTable Using wildcards for "keyspace" and "scope" can create a lot of series as metrics will be reported for every table and keyspace including internal system tables. Specify a keyspace name and/or a table name to limit them. - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=LiveDiskSpaceUsed @@ -110,7 +110,7 @@ Using wildcards for "keyspace" and "scope" can create a lot of series as metrics - /org.apache.cassandra.metrics:type=Table,keyspace=\*,scope=\*,name=WriteTotalLatency -####measurement = cassandraThreadPools +#### measurement = cassandraThreadPools - /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=CompactionExecutor,name=ActiveTasks - /org.apache.cassandra.metrics:type=ThreadPools,path=internal,scope=AntiEntropyStage,name=ActiveTasks diff --git a/plugins/inputs/ceph/README.md b/plugins/inputs/ceph/README.md index 8d04c54b2..f20fd18be 100644 --- a/plugins/inputs/ceph/README.md +++ b/plugins/inputs/ceph/README.md @@ -2,9 +2,11 @@ Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. +Ceph has introduced a Telegraf and Influx plugin in the 13.x Mimic release. The Telegraf module sends to a Telegraf configured with a socket_listener. [Learn more in their docs](http://docs.ceph.com/docs/mimic/mgr/telegraf/) + *Admin Socket Stats* -This gatherer works by scanning the configured SocketDir for OSD and MON socket files. When it finds +This gatherer works by scanning the configured SocketDir for OSD, MON, MDS and RGW socket files. When it finds a MON socket, it runs **ceph --admin-daemon $file perfcounters_dump**. For OSDs it runs **ceph --admin-daemon $file perf dump** The resulting JSON is parsed and grouped into collections, based on top-level key. Top-level keys are @@ -60,6 +62,8 @@ the cluster. The currently supported commands are: ## prefix of MON and OSD socket files, used to determine socket type mon_prefix = "ceph-mon" osd_prefix = "ceph-osd" + mds_prefix = "ceph-mds" + rgw_prefix = "ceph-client" ## suffix used to identify socket files socket_suffix = "asok" @@ -85,67 +89,15 @@ the cluster. The currently supported commands are: gather_cluster_stats = false ``` -### Measurements & Fields: +### Metrics: *Admin Socket Stats* All fields are collected under the **ceph** measurement and stored as float64s. For a full list of fields, see the sample perf dumps in ceph_test.go. -*Cluster Stats* +All admin measurements will have the following tags: -* ceph\_osdmap - * epoch (float) - * full (boolean) - * nearfull (boolean) - * num\_in\_osds (float) - * num\_osds (float) - * num\_remremapped\_pgs (float) - * num\_up\_osds (float) - -* ceph\_pgmap - * bytes\_avail (float) - * bytes\_total (float) - * bytes\_used (float) - * data\_bytes (float) - * num\_pgs (float) - * op\_per\_sec (float) - * read\_bytes\_sec (float) - * version (float) - * write\_bytes\_sec (float) - * recovering\_bytes\_per\_sec (float) - * recovering\_keys\_per\_sec (float) - * recovering\_objects\_per\_sec (float) - -* ceph\_pgmap\_state - * count (float) - -* ceph\_usage - * bytes\_used (float) - * kb\_used (float) - * max\_avail (float) - * objects (float) - -* ceph\_pool\_usage - * bytes\_used (float) - * kb\_used (float) - * max\_avail (float) - * objects (float) - -* ceph\_pool\_stats - * op\_per\_sec (float) - * read\_bytes\_sec (float) - * write\_bytes\_sec (float) - * recovering\_object\_per\_sec (float) - * recovering\_bytes\_per\_sec (float) - * recovering\_keys\_per\_sec (float) - -### Tags: - -*Admin Socket Stats* - -All measurements will have the following tags: - -- type: either 'osd' or 'mon' to indicate which type of node was queried +- type: either 'osd', 'mon', 'mds' or 'rgw' to indicate which type of node was queried - id: a unique string identifier, parsed from the socket file name for the node - collection: the top-level key under which these fields were reported. Possible values are: - for MON nodes: @@ -183,41 +135,252 @@ All measurements will have the following tags: - throttle-objecter_ops - throttle-osd_client_bytes - throttle-osd_client_messages + - for MDS nodes: + - AsyncMessenger::Worker-0 + - AsyncMessenger::Worker-1 + - AsyncMessenger::Worker-2 + - finisher-PurgeQueue + - mds + - mds_cache + - mds_log + - mds_mem + - mds_server + - mds_sessions + - objecter + - purge_queue + - throttle-msgr_dispatch_throttler-mds + - throttle-objecter_bytes + - throttle-objecter_ops + - throttle-write_buf_throttle + - for RGW nodes: + - AsyncMessenger::Worker-0 + - AsyncMessenger::Worker-1 + - AsyncMessenger::Worker-2 + - cct + - finisher-radosclient + - mempool + - objecter + - rgw + - simple-throttler + - throttle-msgr_dispatch_throttler-radosclient + - throttle-objecter_bytes + - throttle-objecter_ops + - throttle-rgw_async_rados_ops *Cluster Stats* -* ceph\_pgmap\_state has the following tags: - * state (state for which the value applies e.g. active+clean, active+remapped+backfill) -* ceph\_pool\_usage has the following tags: - * id - * name -* ceph\_pool\_stats has the following tags: - * id - * name ++ ceph_health + - fields: + - status + - overall_status + +- ceph_osdmap + - fields: + - epoch (float) + - num_osds (float) + - num_up_osds (float) + - num_in_osds (float) + - full (bool) + - nearfull (bool) + - num_remapped_pgs (float) + ++ ceph_pgmap + - fields: + - version (float) + - num_pgs (float) + - data_bytes (float) + - bytes_used (float) + - bytes_avail (float) + - bytes_total (float) + - read_bytes_sec (float) + - write_bytes_sec (float) + - op_per_sec (float, exists only in ceph <10) + - read_op_per_sec (float) + - write_op_per_sec (float) + +- ceph_pgmap_state + - tags: + - state + - fields: + - count (float) + ++ ceph_usage + - fields: + - total_bytes (float) + - total_used_bytes (float) + - total_avail_bytes (float) + - total_space (float, exists only in ceph <0.84) + - total_used (float, exists only in ceph <0.84) + - total_avail (float, exists only in ceph <0.84) + +- ceph_pool_usage + - tags: + - name + - fields: + - kb_used (float) + - bytes_used (float) + - objects (float) + - percent_used (float) + - max_avail (float) + ++ ceph_pool_stats + - tags: + - name + - fields: + - read_bytes_sec (float) + - write_bytes_sec (float) + - op_per_sec (float, exists only in ceph <10) + - read_op_per_sec (float) + - write_op_per_sec (float) + - recovering_objects_per_sec (float) + - recovering_bytes_per_sec (float) + - recovering_keys_per_sec (float) + ### Example Output: -*Admin Socket Stats* - -
-telegraf --config /etc/telegraf/telegraf.conf --config-directory /etc/telegraf/telegraf.d --input-filter ceph --test
-* Plugin: ceph, Collection 1
-> ceph,collection=paxos, id=node-2,role=openstack,type=mon accept_timeout=0,begin=14931264,begin_bytes.avgcount=14931264,begin_bytes.sum=180309683362,begin_keys.avgcount=0,begin_keys.sum=0,begin_latency.avgcount=14931264,begin_latency.sum=9293.29589,collect=1,collect_bytes.avgcount=1,collect_bytes.sum=24,collect_keys.avgcount=1,collect_keys.sum=1,collect_latency.avgcount=1,collect_latency.sum=0.00028,collect_timeout=0,collect_uncommitted=0,commit=14931264,commit_bytes.avgcount=0,commit_bytes.sum=0,commit_keys.avgcount=0,commit_keys.sum=0,commit_latency.avgcount=0,commit_latency.sum=0,lease_ack_timeout=0,lease_timeout=0,new_pn=0,new_pn_latency.avgcount=0,new_pn_latency.sum=0,refresh=14931264,refresh_latency.avgcount=14931264,refresh_latency.sum=8706.98498,restart=4,share_state=0,share_state_bytes.avgcount=0,share_state_bytes.sum=0,share_state_keys.avgcount=0,share_state_keys.sum=0,start_leader=0,start_peon=1,store_state=14931264,store_state_bytes.avgcount=14931264,store_state_bytes.sum=353119959211,store_state_keys.avgcount=14931264,store_state_keys.sum=289807523,store_state_latency.avgcount=14931264,store_state_latency.sum=10952.835724 1462821234814535148
-> ceph,collection=throttle-mon_client_bytes,id=node-2,type=mon get=1413017,get_or_fail_fail=0,get_or_fail_success=0,get_sum=71211705,max=104857600,put=1413013,put_sum=71211459,take=0,take_sum=0,val=246,wait.avgcount=0,wait.sum=0 1462821234814737219
-> ceph,collection=throttle-mon_daemon_bytes,id=node-2,type=mon get=4058121,get_or_fail_fail=0,get_or_fail_success=0,get_sum=6027348117,max=419430400,put=4058121,put_sum=6027348117,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814815661
-> ceph,collection=throttle-msgr_dispatch_throttler-mon,id=node-2,type=mon get=54276277,get_or_fail_fail=0,get_or_fail_success=0,get_sum=370232877040,max=104857600,put=54276277,put_sum=370232877040,take=0,take_sum=0,val=0,wait.avgcount=0,wait.sum=0 1462821234814872064
-
- *Cluster Stats* -
-> ceph_osdmap,host=ceph-mon-0 epoch=170772,full=false,nearfull=false,num_in_osds=340,num_osds=340,num_remapped_pgs=0,num_up_osds=340 1468841037000000000
-> ceph_pgmap,host=ceph-mon-0 bytes_avail=634895531270144,bytes_total=812117151809536,bytes_used=177221620539392,data_bytes=56979991615058,num_pgs=22952,op_per_sec=15869,read_bytes_sec=43956026,version=39387592,write_bytes_sec=165344818 1468841037000000000
-> ceph_pgmap_state,host=ceph-mon-0,state=active+clean count=22952 1468928660000000000
-> ceph_pgmap_state,host=ceph-mon-0,state=active+degraded count=16 1468928660000000000
-> ceph_usage,host=ceph-mon-0 total_avail_bytes=634895514791936,total_bytes=812117151809536,total_used_bytes=177221637017600 1468841037000000000
-> ceph_pool_usage,host=ceph-mon-0,id=150,name=cinder.volumes bytes_used=12648553794802,kb_used=12352103316,max_avail=154342562489244,objects=3026295 1468841037000000000
-> ceph_pool_usage,host=ceph-mon-0,id=182,name=cinder.volumes.flash bytes_used=8541308223964,kb_used=8341121313,max_avail=39388593563936,objects=2075066 1468841037000000000
-> ceph_pool_stats,host=ceph-mon-0,id=150,name=cinder.volumes op_per_sec=1706,read_bytes_sec=28671674,write_bytes_sec=29994541 1468841037000000000
-> ceph_pool_stats,host=ceph-mon-0,id=182,name=cinder.volumes.flash op_per_sec=9748,read_bytes_sec=9605524,write_bytes_sec=45593310 1468841037000000000
-
+``` +ceph_health,host=stefanmon1 overall_status="",status="HEALTH_WARN" 1587118504000000000 +ceph_osdmap,host=stefanmon1 epoch=203,full=false,nearfull=false,num_in_osds=8,num_osds=9,num_remapped_pgs=0,num_up_osds=8 1587118504000000000 +ceph_pgmap,host=stefanmon1 bytes_avail=849879302144,bytes_total=858959904768,bytes_used=9080602624,data_bytes=5055,num_pgs=504,read_bytes_sec=0,read_op_per_sec=0,version=0,write_bytes_sec=0,write_op_per_sec=0 1587118504000000000 +ceph_pgmap_state,host=stefanmon1,state=active+clean count=504 1587118504000000000 +ceph_usage,host=stefanmon1 total_avail_bytes=849879302144,total_bytes=858959904768,total_used_bytes=196018176 1587118505000000000 +ceph_pool_usage,host=stefanmon1,name=cephfs_data bytes_used=0,kb_used=0,max_avail=285804986368,objects=0,percent_used=0 1587118505000000000 +ceph_pool_stats,host=stefanmon1,name=cephfs_data read_bytes_sec=0,read_op_per_sec=0,recovering_bytes_per_sec=0,recovering_keys_per_sec=0,recovering_objects_per_sec=0,write_bytes_sec=0,write_op_per_sec=0 1587118506000000000 +``` + +*Admin Socket Stats* + +``` +> ceph,collection=cct,host=stefanmon1,id=stefanmon1,type=monitor total_workers=0,unhealthy_workers=0 1587117563000000000 +> ceph,collection=mempool,host=stefanmon1,id=stefanmon1,type=monitor bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=719152,buffer_anon_items=192,buffer_meta_bytes=352,buffer_meta_items=4,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=15872,osdmap_items=138,osdmap_mapping_bytes=63112,osdmap_mapping_items=7626,pgmap_bytes=38680,pgmap_items=477,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117563000000000 +> ceph,collection=throttle-mon_client_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=1041157,get_or_fail_fail=0,get_or_fail_success=1041157,get_started=0,get_sum=64928901,max=104857600,put=1041157,put_sum=64928901,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-mon,host=stefanmon1,id=stefanmon1,type=monitor get=12695426,get_or_fail_fail=0,get_or_fail_success=12695426,get_started=0,get_sum=42542216884,max=104857600,put=12695426,put_sum=42542216884,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 +> ceph,collection=finisher-mon_finisher,host=stefanmon1,id=stefanmon1,type=monitor complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117563000000000 +> ceph,collection=finisher-monstore,host=stefanmon1,id=stefanmon1,type=monitor complete_latency.avgcount=1609831,complete_latency.avgtime=0.015857621,complete_latency.sum=25528.09131035,queue_len=0 1587117563000000000 +> ceph,collection=mon,host=stefanmon1,id=stefanmon1,type=monitor election_call=25,election_lose=0,election_win=22,num_elections=94,num_sessions=3,session_add=174679,session_rm=439316,session_trim=137 1587117563000000000 +> ceph,collection=throttle-mon_daemon_bytes,host=stefanmon1,id=stefanmon1,type=monitor get=72697,get_or_fail_fail=0,get_or_fail_success=72697,get_started=0,get_sum=32261199,max=419430400,put=72697,put_sum=32261199,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 +> ceph,collection=rocksdb,host=stefanmon1,id=stefanmon1,type=monitor compact=1,compact_queue_len=0,compact_queue_merge=1,compact_range=19126,get=62449211,get_latency.avgcount=62449211,get_latency.avgtime=0.000022216,get_latency.sum=1387.371811726,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=0,submit_latency.avgtime=0,submit_latency.sum=0,submit_sync_latency.avgcount=3219961,submit_sync_latency.avgtime=0.007532173,submit_sync_latency.sum=24253.303584224,submit_transaction=0,submit_transaction_sync=3219961 1587117563000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=148317,msgr_created_connections=162806,msgr_recv_bytes=11557888328,msgr_recv_messages=5113369,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=868.377161686,msgr_running_send_time=1626.525392721,msgr_running_total_time=4222.235694322,msgr_send_bytes=91516226816,msgr_send_messages=6973706 1587117563000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=146396,msgr_created_connections=159788,msgr_recv_bytes=2162802496,msgr_recv_messages=689168,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=164.148550562,msgr_running_send_time=153.462890368,msgr_running_total_time=644.188791379,msgr_send_bytes=7422484152,msgr_send_messages=749381 1587117563000000000 +> ceph,collection=cluster,host=stefanmon1,id=stefanmon1,type=monitor num_bytes=5055,num_mon=3,num_mon_quorum=3,num_object=245,num_object_degraded=0,num_object_misplaced=0,num_object_unfound=0,num_osd=9,num_osd_in=8,num_osd_up=8,num_pg=504,num_pg_active=504,num_pg_active_clean=504,num_pg_peering=0,num_pool=17,osd_bytes=858959904768,osd_bytes_avail=849889787904,osd_bytes_used=9070116864,osd_epoch=203 1587117563000000000 +> ceph,collection=paxos,host=stefanmon1,id=stefanmon1,type=monitor accept_timeout=1,begin=1609847,begin_bytes.avgcount=1609847,begin_bytes.sum=41408662074,begin_keys.avgcount=1609847,begin_keys.sum=4829541,begin_latency.avgcount=1609847,begin_latency.avgtime=0.007213392,begin_latency.sum=11612.457661116,collect=0,collect_bytes.avgcount=0,collect_bytes.sum=0,collect_keys.avgcount=0,collect_keys.sum=0,collect_latency.avgcount=0,collect_latency.avgtime=0,collect_latency.sum=0,collect_timeout=1,collect_uncommitted=17,commit=1609831,commit_bytes.avgcount=1609831,commit_bytes.sum=41087428442,commit_keys.avgcount=1609831,commit_keys.sum=11637931,commit_latency.avgcount=1609831,commit_latency.avgtime=0.006236333,commit_latency.sum=10039.442388355,lease_ack_timeout=0,lease_timeout=0,new_pn=33,new_pn_latency.avgcount=33,new_pn_latency.avgtime=3.844272773,new_pn_latency.sum=126.86100151,refresh=1609856,refresh_latency.avgcount=1609856,refresh_latency.avgtime=0.005900486,refresh_latency.sum=9498.932866761,restart=109,share_state=2,share_state_bytes.avgcount=2,share_state_bytes.sum=39612,share_state_keys.avgcount=2,share_state_keys.sum=2,start_leader=22,start_peon=0,store_state=14,store_state_bytes.avgcount=14,store_state_bytes.sum=51908281,store_state_keys.avgcount=14,store_state_keys.sum=7016,store_state_latency.avgcount=14,store_state_latency.avgtime=11.668377665,store_state_latency.sum=163.357287311 1587117563000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-mon-mgrc,host=stefanmon1,id=stefanmon1,type=monitor get=13225,get_or_fail_fail=0,get_or_fail_success=13225,get_started=0,get_sum=158700,max=104857600,put=13225,put_sum=158700,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117563000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanmon1,id=stefanmon1,type=monitor msgr_active_connections=147680,msgr_created_connections=162374,msgr_recv_bytes=29781706740,msgr_recv_messages=7170733,msgr_running_fast_dispatch_time=0,msgr_running_recv_time=1728.559151358,msgr_running_send_time=2086.681244508,msgr_running_total_time=6084.532916585,msgr_send_bytes=94062125718,msgr_send_messages=9161564 1587117563000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=0,type=osd get=281745,get_or_fail_fail=0,get_or_fail_success=281745,get_started=0,get_sum=446024457,max=104857600,put=281745,put_sum=446024457,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=0,type=osd get=275707,get_or_fail_fail=0,get_or_fail_success=0,get_started=275707,get_sum=185073179842,max=67108864,put=268870,put_sum=185073179842,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=0,type=osd get=2606982,get_or_fail_fail=0,get_or_fail_success=2606982,get_started=0,get_sum=5224391928,max=104857600,put=2606982,put_sum=5224391928,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=rocksdb,host=stefanosd1,id=0,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1570,get_latency.avgcount=1570,get_latency.avgtime=0.000051233,get_latency.sum=0.080436788,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=275707,submit_latency.avgtime=0.000174936,submit_latency.sum=48.231345334,submit_sync_latency.avgcount=268870,submit_sync_latency.avgtime=0.006097313,submit_sync_latency.sum=1639.384555624,submit_transaction=275707,submit_transaction_sync=268870 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=0,type=osd get=2606982,get_or_fail_fail=0,get_or_fail_success=2606982,get_started=0,get_sum=5224391928,max=104857600,put=2606982,put_sum=5224391928,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=0,type=osd get=2610285,get_or_fail_fail=0,get_or_fail_success=2610285,get_started=0,get_sum=5231011140,max=104857600,put=2610285,put_sum=5231011140,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=0,type=osd msgr_active_connections=2093,msgr_created_connections=29142,msgr_recv_bytes=7214238199,msgr_recv_messages=3928206,msgr_running_fast_dispatch_time=171.289615064,msgr_running_recv_time=278.531155966,msgr_running_send_time=489.482588813,msgr_running_total_time=1134.004853662,msgr_send_bytes=9814725232,msgr_send_messages=3814927 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=0,type=osd get=488206,get_or_fail_fail=0,get_or_fail_success=488206,get_started=0,get_sum=104085134,max=104857600,put=488206,put_sum=104085134,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=0,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=recoverystate_perf,host=stefanosd1,id=0,type=osd activating_latency.avgcount=87,activating_latency.avgtime=0.114348341,activating_latency.sum=9.948305683,active_latency.avgcount=25,active_latency.avgtime=1790.961574431,active_latency.sum=44774.039360795,backfilling_latency.avgcount=0,backfilling_latency.avgtime=0,backfilling_latency.sum=0,clean_latency.avgcount=25,clean_latency.avgtime=1790.830827794,clean_latency.sum=44770.770694867,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=141,getinfo_latency.avgtime=0.446233476,getinfo_latency.sum=62.918920183,getlog_latency.avgcount=87,getlog_latency.avgtime=0.007708069,getlog_latency.sum=0.670602073,getmissing_latency.avgcount=87,getmissing_latency.avgtime=0.000077594,getmissing_latency.sum=0.006750701,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=166,initial_latency.avgtime=0.001313715,initial_latency.sum=0.218076764,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=141,peering_latency.avgtime=0.948324273,peering_latency.sum=133.713722563,primary_latency.avgcount=79,primary_latency.avgtime=567.706192991,primary_latency.sum=44848.78924634,recovered_latency.avgcount=87,recovered_latency.avgtime=0.000378284,recovered_latency.sum=0.032910791,recovering_latency.avgcount=2,recovering_latency.avgtime=0.338242008,recovering_latency.sum=0.676484017,replicaactive_latency.avgcount=23,replicaactive_latency.avgtime=1790.893991295,replicaactive_latency.sum=41190.561799786,repnotrecovering_latency.avgcount=25,repnotrecovering_latency.avgtime=1647.627024984,repnotrecovering_latency.sum=41190.675624616,reprecovering_latency.avgcount=2,reprecovering_latency.avgtime=0.311884638,reprecovering_latency.sum=0.623769276,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=2,repwaitrecoveryreserved_latency.avgtime=0.000462873,repwaitrecoveryreserved_latency.sum=0.000925746,reset_latency.avgcount=372,reset_latency.avgtime=0.125056393,reset_latency.sum=46.520978537,start_latency.avgcount=372,start_latency.avgtime=0.000109397,start_latency.sum=0.040695881,started_latency.avgcount=206,started_latency.avgtime=418.299777245,started_latency.sum=86169.754112641,stray_latency.avgcount=231,stray_latency.avgtime=0.98203205,stray_latency.sum=226.849403565,waitactingchange_latency.avgcount=0,waitactingchange_latency.avgtime=0,waitactingchange_latency.sum=0,waitlocalbackfillreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.avgtime=0,waitlocalbackfillreserved_latency.sum=0,waitlocalrecoveryreserved_latency.avgcount=2,waitlocalrecoveryreserved_latency.avgtime=0.002802377,waitlocalrecoveryreserved_latency.sum=0.005604755,waitremotebackfillreserved_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,waitremotebackfillreserved_latency.sum=0,waitremoterecoveryreserved_latency.avgcount=2,waitremoterecoveryreserved_latency.avgtime=0.012855439,waitremoterecoveryreserved_latency.sum=0.025710878,waitupthru_latency.avgcount=87,waitupthru_latency.avgtime=0.805727895,waitupthru_latency.sum=70.09832695 1587117698000000000 +> ceph,collection=cct,host=stefanosd1,id=0,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=0,type=osd get=2610285,get_or_fail_fail=0,get_or_fail_success=2610285,get_started=0,get_sum=5231011140,max=104857600,put=2610285,put_sum=5231011140,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=bluefs,host=stefanosd1,id=0,type=osd bytes_written_slow=0,bytes_written_sst=9018781,bytes_written_wal=831081573,db_total_bytes=4294967296,db_used_bytes=434110464,files_written_sst=3,files_written_wal=2,gift_bytes=0,log_bytes=134291456,log_compactions=1,logged_bytes=1101668352,max_bytes_db=1234173952,max_bytes_slow=0,max_bytes_wal=0,num_files=11,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000 +> ceph,collection=mempool,host=stefanosd1,id=0,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=10600,bluefs_items=458,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=622592,bluestore_cache_data_items=43,bluestore_cache_onode_bytes=249280,bluestore_cache_onode_items=380,bluestore_cache_other_bytes=192678,bluestore_cache_other_items=20199,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=8272,bluestore_txc_items=11,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=670130,bluestore_writing_deferred_items=176,bluestore_writing_items=0,buffer_anon_bytes=2412465,buffer_anon_items=297,buffer_meta_bytes=5896,buffer_meta_items=67,mds_co_bytes=0,mds_co_items=0,osd_bytes=2124800,osd_items=166,osd_mapbl_bytes=155152,osd_mapbl_items=10,osd_pglog_bytes=3214704,osd_pglog_items=6288,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000 +> ceph,collection=osd,host=stefanosd1,id=0,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=21,map_message_epochs=40,map_messages=31,messages_delayed_for_map=0,missed_crc=0,numpg=166,numpg_primary=62,numpg_removing=0,numpg_replica=104,numpg_stray=0,object_ctx_cache_hit=476529,object_ctx_cache_total=476536,op=476525,op_before_dequeue_op_lat.avgcount=755708,op_before_dequeue_op_lat.avgtime=0.000205759,op_before_dequeue_op_lat.sum=155.493843473,op_before_queue_op_lat.avgcount=755702,op_before_queue_op_lat.avgtime=0.000047877,op_before_queue_op_lat.sum=36.181069552,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=476525,op_latency.avgtime=0.000365956,op_latency.sum=174.387387878,op_out_bytes=10882,op_prepare_latency.avgcount=476527,op_prepare_latency.avgtime=0.000205307,op_prepare_latency.sum=97.834380034,op_process_latency.avgcount=476525,op_process_latency.avgtime=0.000139616,op_process_latency.sum=66.530847665,op_r=476521,op_r_latency.avgcount=476521,op_r_latency.avgtime=0.00036559,op_r_latency.sum=174.21148267,op_r_out_bytes=10882,op_r_prepare_latency.avgcount=476523,op_r_prepare_latency.avgtime=0.000205302,op_r_prepare_latency.sum=97.831473175,op_r_process_latency.avgcount=476521,op_r_process_latency.avgtime=0.000139396,op_r_process_latency.sum=66.425498624,op_rw=2,op_rw_in_bytes=0,op_rw_latency.avgcount=2,op_rw_latency.avgtime=0.048818975,op_rw_latency.sum=0.097637951,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=2,op_rw_prepare_latency.avgtime=0.000467887,op_rw_prepare_latency.sum=0.000935775,op_rw_process_latency.avgcount=2,op_rw_process_latency.avgtime=0.013741256,op_rw_process_latency.sum=0.027482512,op_w=2,op_w_in_bytes=0,op_w_latency.avgcount=2,op_w_latency.avgtime=0.039133628,op_w_latency.sum=0.078267257,op_w_prepare_latency.avgcount=2,op_w_prepare_latency.avgtime=0.000985542,op_w_prepare_latency.sum=0.001971084,op_w_process_latency.avgcount=2,op_w_process_latency.avgtime=0.038933264,op_w_process_latency.sum=0.077866529,op_wip=0,osd_map_bl_cache_hit=22,osd_map_bl_cache_miss=40,osd_map_cache_hit=4570,osd_map_cache_miss=15,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=2050,osd_pg_fastinfo=265780,osd_pg_info=274542,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=2,push_out_bytes=10,recovery_bytes=10,recovery_ops=2,stat_bytes=107369988096,stat_bytes_avail=106271539200,stat_bytes_used=1098448896,subop=253554,subop_in_bytes=168644225,subop_latency.avgcount=253554,subop_latency.avgtime=0.0073036,subop_latency.sum=1851.857230388,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=253554,subop_w_in_bytes=168644225,subop_w_latency.avgcount=253554,subop_w_latency.avgtime=0.0073036,subop_w_latency.sum=1851.857230388,tier_clean=0,tier_delay=0,tier_dirty=0,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=0,type=osd msgr_active_connections=2055,msgr_created_connections=27411,msgr_recv_bytes=6431950009,msgr_recv_messages=3552443,msgr_running_fast_dispatch_time=162.271664213,msgr_running_recv_time=254.307853033,msgr_running_send_time=503.037285799,msgr_running_total_time=1130.21070681,msgr_send_bytes=10865436237,msgr_send_messages=3523374 1587117698000000000 +> ceph,collection=bluestore,host=stefanosd1,id=0,type=osd bluestore_allocated=24641536,bluestore_blob_split=0,bluestore_blobs=88,bluestore_buffer_bytes=622592,bluestore_buffer_hit_bytes=160578,bluestore_buffer_miss_bytes=540236,bluestore_buffers=43,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=88,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=532102,bluestore_onode_misses=388,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=380,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1987856,bluestore_txc=275707,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=60,bluestore_write_small_bytes=343843,bluestore_write_small_deferred=22,bluestore_write_small_new=38,bluestore_write_small_pre_read=22,bluestore_write_small_unused=0,commit_lat.avgcount=275707,commit_lat.avgtime=0.00699778,commit_lat.sum=1929.337103334,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=67,csum_lat.avgtime=0.000032601,csum_lat.sum=0.002184323,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=268870,kv_commit_lat.avgtime=0.006365428,kv_commit_lat.sum=1711.472749866,kv_final_lat.avgcount=268867,kv_final_lat.avgtime=0.000043227,kv_final_lat.sum=11.622427109,kv_flush_lat.avgcount=268870,kv_flush_lat.avgtime=0.000000223,kv_flush_lat.sum=0.060141588,kv_sync_lat.avgcount=268870,kv_sync_lat.avgtime=0.006365652,kv_sync_lat.sum=1711.532891454,omap_lower_bound_lat.avgcount=2,omap_lower_bound_lat.avgtime=0.000006524,omap_lower_bound_lat.sum=0.000013048,omap_next_lat.avgcount=6704,omap_next_lat.avgtime=0.000004721,omap_next_lat.sum=0.031654097,omap_seek_to_first_lat.avgcount=323,omap_seek_to_first_lat.avgtime=0.00000522,omap_seek_to_first_lat.sum=0.00168614,omap_upper_bound_lat.avgcount=4,omap_upper_bound_lat.avgtime=0.000013086,omap_upper_bound_lat.sum=0.000052344,read_lat.avgcount=227,read_lat.avgtime=0.000699457,read_lat.sum=0.158776879,read_onode_meta_lat.avgcount=311,read_onode_meta_lat.avgtime=0.000072207,read_onode_meta_lat.sum=0.022456667,read_wait_aio_lat.avgcount=84,read_wait_aio_lat.avgtime=0.001556141,read_wait_aio_lat.sum=0.130715885,state_aio_wait_lat.avgcount=275707,state_aio_wait_lat.avgtime=0.000000345,state_aio_wait_lat.sum=0.095246457,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=275696,state_done_lat.avgtime=0.00000286,state_done_lat.sum=0.788700007,state_finishing_lat.avgcount=275696,state_finishing_lat.avgtime=0.000000302,state_finishing_lat.sum=0.083437168,state_io_done_lat.avgcount=275707,state_io_done_lat.avgtime=0.000001041,state_io_done_lat.sum=0.287025147,state_kv_commiting_lat.avgcount=275707,state_kv_commiting_lat.avgtime=0.006424459,state_kv_commiting_lat.sum=1771.268407864,state_kv_done_lat.avgcount=275707,state_kv_done_lat.avgtime=0.000001627,state_kv_done_lat.sum=0.448805853,state_kv_queued_lat.avgcount=275707,state_kv_queued_lat.avgtime=0.000488565,state_kv_queued_lat.sum=134.7009424,state_prepare_lat.avgcount=275707,state_prepare_lat.avgtime=0.000082464,state_prepare_lat.sum=22.736065534,submit_lat.avgcount=275707,submit_lat.avgtime=0.000120236,submit_lat.sum=33.149934412,throttle_lat.avgcount=275707,throttle_lat.avgtime=0.000001571,throttle_lat.sum=0.433185935,write_pad_bytes=151773,write_penalty_read_ops=0 1587117698000000000 +> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=0,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=objecter,host=stefanosd1,id=0,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000 +> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=0,type=osd complete_latency.avgcount=11,complete_latency.avgtime=0.003447516,complete_latency.sum=0.037922681,queue_len=0 1587117698000000000 +> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=0,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=0,type=osd msgr_active_connections=2128,msgr_created_connections=33685,msgr_recv_bytes=8679123051,msgr_recv_messages=4200356,msgr_running_fast_dispatch_time=151.889337454,msgr_running_recv_time=297.632294886,msgr_running_send_time=599.20020523,msgr_running_total_time=1321.361931202,msgr_send_bytes=11716202897,msgr_send_messages=4347418 1587117698000000000 +> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=0,type=osd get=476554,get_or_fail_fail=0,get_or_fail_success=476554,get_started=0,get_sum=103413728,max=524288000,put=476587,put_sum=103413728,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=0,type=osd get=11,get_or_fail_fail=0,get_or_fail_success=11,get_started=0,get_sum=7723117,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7723117,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=1,type=osd get=860895,get_or_fail_fail=0,get_or_fail_success=860895,get_started=0,get_sum=596482256,max=104857600,put=860895,put_sum=596482256,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=1,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=osd,host=stefanosd1,id=1,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=29,map_message_epochs=50,map_messages=39,messages_delayed_for_map=0,missed_crc=0,numpg=188,numpg_primary=71,numpg_removing=0,numpg_replica=117,numpg_stray=0,object_ctx_cache_hit=1349777,object_ctx_cache_total=2934118,op=1319230,op_before_dequeue_op_lat.avgcount=3792053,op_before_dequeue_op_lat.avgtime=0.000405802,op_before_dequeue_op_lat.sum=1538.826381623,op_before_queue_op_lat.avgcount=3778690,op_before_queue_op_lat.avgtime=0.000033273,op_before_queue_op_lat.sum=125.731131596,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=1319230,op_latency.avgtime=0.002858138,op_latency.sum=3770.541581676,op_out_bytes=1789210,op_prepare_latency.avgcount=1336472,op_prepare_latency.avgtime=0.000279458,op_prepare_latency.sum=373.488913339,op_process_latency.avgcount=1319230,op_process_latency.avgtime=0.002666408,op_process_latency.sum=3517.606407526,op_r=1075394,op_r_latency.avgcount=1075394,op_r_latency.avgtime=0.000303779,op_r_latency.sum=326.682443032,op_r_out_bytes=1789210,op_r_prepare_latency.avgcount=1075394,op_r_prepare_latency.avgtime=0.000171228,op_r_prepare_latency.sum=184.138580631,op_r_process_latency.avgcount=1075394,op_r_process_latency.avgtime=0.00011609,op_r_process_latency.sum=124.842894319,op_rw=243832,op_rw_in_bytes=0,op_rw_latency.avgcount=243832,op_rw_latency.avgtime=0.014123636,op_rw_latency.sum=3443.79445124,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=261072,op_rw_prepare_latency.avgtime=0.000725265,op_rw_prepare_latency.sum=189.346543463,op_rw_process_latency.avgcount=243832,op_rw_process_latency.avgtime=0.013914089,op_rw_process_latency.sum=3392.700241086,op_w=4,op_w_in_bytes=0,op_w_latency.avgcount=4,op_w_latency.avgtime=0.016171851,op_w_latency.sum=0.064687404,op_w_prepare_latency.avgcount=6,op_w_prepare_latency.avgtime=0.00063154,op_w_prepare_latency.sum=0.003789245,op_w_process_latency.avgcount=4,op_w_process_latency.avgtime=0.01581803,op_w_process_latency.sum=0.063272121,op_wip=0,osd_map_bl_cache_hit=36,osd_map_bl_cache_miss=40,osd_map_cache_hit=5404,osd_map_cache_miss=14,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=2333,osd_pg_fastinfo=576157,osd_pg_info=591751,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=22,push_out_bytes=0,recovery_bytes=0,recovery_ops=21,stat_bytes=107369988096,stat_bytes_avail=106271997952,stat_bytes_used=1097990144,subop=306946,subop_in_bytes=204236742,subop_latency.avgcount=306946,subop_latency.avgtime=0.006744881,subop_latency.sum=2070.314452989,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=306946,subop_w_in_bytes=204236742,subop_w_latency.avgcount=306946,subop_w_latency.avgtime=0.006744881,subop_w_latency.sum=2070.314452989,tier_clean=0,tier_delay=0,tier_dirty=8,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000 +> ceph,collection=objecter,host=stefanosd1,id=1,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=1,type=osd msgr_active_connections=1356,msgr_created_connections=12290,msgr_recv_bytes=8577187219,msgr_recv_messages=6387040,msgr_running_fast_dispatch_time=475.903632306,msgr_running_recv_time=425.937196699,msgr_running_send_time=783.676217521,msgr_running_total_time=1989.242459076,msgr_send_bytes=12583034449,msgr_send_messages=6074344 1587117698000000000 +> ceph,collection=bluestore,host=stefanosd1,id=1,type=osd bluestore_allocated=24182784,bluestore_blob_split=0,bluestore_blobs=88,bluestore_buffer_bytes=614400,bluestore_buffer_hit_bytes=142047,bluestore_buffer_miss_bytes=541480,bluestore_buffers=41,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=88,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=1403948,bluestore_onode_misses=1584732,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=459,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1985647,bluestore_txc=593150,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=58,bluestore_write_small_bytes=343091,bluestore_write_small_deferred=20,bluestore_write_small_new=38,bluestore_write_small_pre_read=20,bluestore_write_small_unused=0,commit_lat.avgcount=593150,commit_lat.avgtime=0.006514834,commit_lat.sum=3864.274280733,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=60,csum_lat.avgtime=0.000028258,csum_lat.sum=0.001695512,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=578129,kv_commit_lat.avgtime=0.00570707,kv_commit_lat.sum=3299.423186928,kv_final_lat.avgcount=578124,kv_final_lat.avgtime=0.000042752,kv_final_lat.sum=24.716171934,kv_flush_lat.avgcount=578129,kv_flush_lat.avgtime=0.000000209,kv_flush_lat.sum=0.121169044,kv_sync_lat.avgcount=578129,kv_sync_lat.avgtime=0.00570728,kv_sync_lat.sum=3299.544355972,omap_lower_bound_lat.avgcount=22,omap_lower_bound_lat.avgtime=0.000005979,omap_lower_bound_lat.sum=0.000131539,omap_next_lat.avgcount=13248,omap_next_lat.avgtime=0.000004836,omap_next_lat.sum=0.064077797,omap_seek_to_first_lat.avgcount=525,omap_seek_to_first_lat.avgtime=0.000004906,omap_seek_to_first_lat.sum=0.002575786,omap_upper_bound_lat.avgcount=0,omap_upper_bound_lat.avgtime=0,omap_upper_bound_lat.sum=0,read_lat.avgcount=406,read_lat.avgtime=0.000383254,read_lat.sum=0.155601529,read_onode_meta_lat.avgcount=483,read_onode_meta_lat.avgtime=0.000008805,read_onode_meta_lat.sum=0.004252832,read_wait_aio_lat.avgcount=77,read_wait_aio_lat.avgtime=0.001907361,read_wait_aio_lat.sum=0.146866799,state_aio_wait_lat.avgcount=593150,state_aio_wait_lat.avgtime=0.000000388,state_aio_wait_lat.sum=0.230498048,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=593140,state_done_lat.avgtime=0.000003048,state_done_lat.sum=1.80789161,state_finishing_lat.avgcount=593140,state_finishing_lat.avgtime=0.000000325,state_finishing_lat.sum=0.192952339,state_io_done_lat.avgcount=593150,state_io_done_lat.avgtime=0.000001202,state_io_done_lat.sum=0.713333116,state_kv_commiting_lat.avgcount=593150,state_kv_commiting_lat.avgtime=0.005788541,state_kv_commiting_lat.sum=3433.473378536,state_kv_done_lat.avgcount=593150,state_kv_done_lat.avgtime=0.000001472,state_kv_done_lat.sum=0.873559611,state_kv_queued_lat.avgcount=593150,state_kv_queued_lat.avgtime=0.000634215,state_kv_queued_lat.sum=376.18491577,state_prepare_lat.avgcount=593150,state_prepare_lat.avgtime=0.000089694,state_prepare_lat.sum=53.202464675,submit_lat.avgcount=593150,submit_lat.avgtime=0.000127856,submit_lat.sum=75.83816759,throttle_lat.avgcount=593150,throttle_lat.avgtime=0.000001726,throttle_lat.sum=1.023832181,write_pad_bytes=144333,write_penalty_read_ops=0 1587117698000000000 +> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=1,type=osd get=2920772,get_or_fail_fail=0,get_or_fail_success=2920772,get_started=0,get_sum=739935873,max=524288000,put=4888498,put_sum=739935873,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=1,type=osd get=2605442,get_or_fail_fail=0,get_or_fail_success=2605442,get_started=0,get_sum=5221305768,max=104857600,put=2605442,put_sum=5221305768,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=1,type=osd msgr_active_connections=1375,msgr_created_connections=12689,msgr_recv_bytes=6393440855,msgr_recv_messages=3260458,msgr_running_fast_dispatch_time=120.622437418,msgr_running_recv_time=225.24709441,msgr_running_send_time=499.150587343,msgr_running_total_time=1043.340296846,msgr_send_bytes=11134862571,msgr_send_messages=3450760 1587117698000000000 +> ceph,collection=bluefs,host=stefanosd1,id=1,type=osd bytes_written_slow=0,bytes_written_sst=19824993,bytes_written_wal=1788507023,db_total_bytes=4294967296,db_used_bytes=522190848,files_written_sst=4,files_written_wal=2,gift_bytes=0,log_bytes=1056768,log_compactions=2,logged_bytes=1933271040,max_bytes_db=1483735040,max_bytes_slow=0,max_bytes_wal=0,num_files=12,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=1,type=osd get=2605442,get_or_fail_fail=0,get_or_fail_success=2605442,get_started=0,get_sum=5221305768,max=104857600,put=2605442,put_sum=5221305768,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=1,type=osd get=10,get_or_fail_fail=0,get_or_fail_success=10,get_started=0,get_sum=7052009,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7052009,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=rocksdb,host=stefanosd1,id=1,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1586061,get_latency.avgcount=1586061,get_latency.avgtime=0.000083009,get_latency.sum=131.658296684,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=593150,submit_latency.avgtime=0.000172072,submit_latency.sum=102.064900673,submit_sync_latency.avgcount=578129,submit_sync_latency.avgtime=0.005447017,submit_sync_latency.sum=3149.078822012,submit_transaction=593150,submit_transaction_sync=578129 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=1,type=osd get=2607669,get_or_fail_fail=0,get_or_fail_success=2607669,get_started=0,get_sum=5225768676,max=104857600,put=2607669,put_sum=5225768676,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=recoverystate_perf,host=stefanosd1,id=1,type=osd activating_latency.avgcount=104,activating_latency.avgtime=0.071646485,activating_latency.sum=7.451234493,active_latency.avgcount=33,active_latency.avgtime=1734.369034268,active_latency.sum=57234.178130859,backfilling_latency.avgcount=1,backfilling_latency.avgtime=2.598401698,backfilling_latency.sum=2.598401698,clean_latency.avgcount=33,clean_latency.avgtime=1734.213467342,clean_latency.sum=57229.044422292,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=167,getinfo_latency.avgtime=0.373444627,getinfo_latency.sum=62.365252849,getlog_latency.avgcount=105,getlog_latency.avgtime=0.003575062,getlog_latency.sum=0.375381569,getmissing_latency.avgcount=104,getmissing_latency.avgtime=0.000157091,getmissing_latency.sum=0.016337565,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=188,initial_latency.avgtime=0.001833512,initial_latency.sum=0.344700343,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=167,peering_latency.avgtime=1.501818082,peering_latency.sum=250.803619796,primary_latency.avgcount=97,primary_latency.avgtime=591.344286378,primary_latency.sum=57360.395778762,recovered_latency.avgcount=104,recovered_latency.avgtime=0.000291138,recovered_latency.sum=0.030278433,recovering_latency.avgcount=2,recovering_latency.avgtime=0.142378096,recovering_latency.sum=0.284756192,replicaactive_latency.avgcount=32,replicaactive_latency.avgtime=1788.474901442,replicaactive_latency.sum=57231.196846165,repnotrecovering_latency.avgcount=34,repnotrecovering_latency.avgtime=1683.273587087,repnotrecovering_latency.sum=57231.301960987,reprecovering_latency.avgcount=2,reprecovering_latency.avgtime=0.418094818,reprecovering_latency.sum=0.836189637,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=2,repwaitrecoveryreserved_latency.avgtime=0.000588413,repwaitrecoveryreserved_latency.sum=0.001176827,reset_latency.avgcount=433,reset_latency.avgtime=0.15669689,reset_latency.sum=67.849753631,start_latency.avgcount=433,start_latency.avgtime=0.000412707,start_latency.sum=0.178702508,started_latency.avgcount=245,started_latency.avgtime=468.419544137,started_latency.sum=114762.788313581,stray_latency.avgcount=266,stray_latency.avgtime=1.489291271,stray_latency.sum=396.151478238,waitactingchange_latency.avgcount=1,waitactingchange_latency.avgtime=0.982689906,waitactingchange_latency.sum=0.982689906,waitlocalbackfillreserved_latency.avgcount=1,waitlocalbackfillreserved_latency.avgtime=0.000542092,waitlocalbackfillreserved_latency.sum=0.000542092,waitlocalrecoveryreserved_latency.avgcount=2,waitlocalrecoveryreserved_latency.avgtime=0.00391669,waitlocalrecoveryreserved_latency.sum=0.007833381,waitremotebackfillreserved_latency.avgcount=1,waitremotebackfillreserved_latency.avgtime=0.003110409,waitremotebackfillreserved_latency.sum=0.003110409,waitremoterecoveryreserved_latency.avgcount=2,waitremoterecoveryreserved_latency.avgtime=0.012229338,waitremoterecoveryreserved_latency.sum=0.024458677,waitupthru_latency.avgcount=104,waitupthru_latency.avgtime=1.807608905,waitupthru_latency.sum=187.991326197 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=1,type=osd msgr_active_connections=1289,msgr_created_connections=9469,msgr_recv_bytes=8348149800,msgr_recv_messages=5048791,msgr_running_fast_dispatch_time=313.754567889,msgr_running_recv_time=372.054833029,msgr_running_send_time=694.900405016,msgr_running_total_time=1656.294769387,msgr_send_bytes=11550148208,msgr_send_messages=5175962 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=1,type=osd get=593150,get_or_fail_fail=0,get_or_fail_success=0,get_started=593150,get_sum=398147414260,max=67108864,put=578129,put_sum=398147414260,take=0,take_sum=0,val=0,wait.avgcount=29,wait.avgtime=0.000972655,wait.sum=0.028207005 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=1,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=cct,host=stefanosd1,id=1,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000 +> ceph,collection=mempool,host=stefanosd1,id=1,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=13064,bluefs_items=593,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=614400,bluestore_cache_data_items=41,bluestore_cache_onode_bytes=301104,bluestore_cache_onode_items=459,bluestore_cache_other_bytes=230945,bluestore_cache_other_items=26119,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=7520,bluestore_txc_items=10,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=657768,bluestore_writing_deferred_items=172,bluestore_writing_items=0,buffer_anon_bytes=2328515,buffer_anon_items=271,buffer_meta_bytes=5808,buffer_meta_items=66,mds_co_bytes=0,mds_co_items=0,osd_bytes=2406400,osd_items=188,osd_mapbl_bytes=139623,osd_mapbl_items=9,osd_pglog_bytes=6768784,osd_pglog_items=18179,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=1,type=osd get=2932513,get_or_fail_fail=0,get_or_fail_success=2932513,get_started=0,get_sum=740620215,max=104857600,put=2932513,put_sum=740620215,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=1,type=osd get=2607669,get_or_fail_fail=0,get_or_fail_success=2607669,get_started=0,get_sum=5225768676,max=104857600,put=2607669,put_sum=5225768676,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=1,type=osd complete_latency.avgcount=10,complete_latency.avgtime=0.002884646,complete_latency.sum=0.028846469,queue_len=0 1587117698000000000 +> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=1,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=throttle-objecter_bytes,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=finisher-commit_finisher,host=stefanosd1,id=2,type=osd complete_latency.avgcount=11,complete_latency.avgtime=0.002714416,complete_latency.sum=0.029858583,queue_len=0 1587117698000000000 +> ceph,collection=finisher-defered_finisher,host=stefanosd1,id=2,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=objecter,host=stefanosd1,id=2,type=osd command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=19,omap_del=0,omap_rd=0,omap_wr=0,op=0,op_active=0,op_laggy=0,op_pg=0,op_r=0,op_reply=0,op_resend=0,op_rmw=0,op_send=0,op_send_bytes=0,op_w=0,osd_laggy=0,osd_session_close=0,osd_session_open=0,osd_sessions=0,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_client,host=stefanosd1,id=2,type=osd get=2607136,get_or_fail_fail=0,get_or_fail_success=2607136,get_started=0,get_sum=5224700544,max=104857600,put=2607136,put_sum=5224700544,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=mempool,host=stefanosd1,id=2,type=osd bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=11624,bluefs_items=522,bluestore_alloc_bytes=230288,bluestore_alloc_items=28786,bluestore_cache_data_bytes=614400,bluestore_cache_data_items=41,bluestore_cache_onode_bytes=228288,bluestore_cache_onode_items=348,bluestore_cache_other_bytes=174158,bluestore_cache_other_items=18527,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=8272,bluestore_txc_items=11,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=670130,bluestore_writing_deferred_items=176,bluestore_writing_items=0,buffer_anon_bytes=2311664,buffer_anon_items=244,buffer_meta_bytes=5456,buffer_meta_items=62,mds_co_bytes=0,mds_co_items=0,osd_bytes=1920000,osd_items=150,osd_mapbl_bytes=155152,osd_mapbl_items=10,osd_pglog_bytes=3393520,osd_pglog_items=9128,osdmap_bytes=710892,osdmap_items=4426,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117698000000000 +> ceph,collection=osd,host=stefanosd1,id=2,type=osd agent_evict=0,agent_flush=0,agent_skip=0,agent_wake=0,cached_crc=0,cached_crc_adjusted=0,copyfrom=0,heartbeat_to_peers=7,loadavg=11,map_message_epoch_dups=37,map_message_epochs=56,map_messages=37,messages_delayed_for_map=0,missed_crc=0,numpg=150,numpg_primary=59,numpg_removing=0,numpg_replica=91,numpg_stray=0,object_ctx_cache_hit=705923,object_ctx_cache_total=705951,op=690584,op_before_dequeue_op_lat.avgcount=1155697,op_before_dequeue_op_lat.avgtime=0.000217926,op_before_dequeue_op_lat.sum=251.856487141,op_before_queue_op_lat.avgcount=1148445,op_before_queue_op_lat.avgtime=0.000039696,op_before_queue_op_lat.sum=45.589516462,op_cache_hit=0,op_in_bytes=0,op_latency.avgcount=690584,op_latency.avgtime=0.002488685,op_latency.sum=1718.646504654,op_out_bytes=1026000,op_prepare_latency.avgcount=698700,op_prepare_latency.avgtime=0.000300375,op_prepare_latency.sum=209.872029659,op_process_latency.avgcount=690584,op_process_latency.avgtime=0.00230742,op_process_latency.sum=1593.46739165,op_r=548020,op_r_latency.avgcount=548020,op_r_latency.avgtime=0.000298287,op_r_latency.sum=163.467760649,op_r_out_bytes=1026000,op_r_prepare_latency.avgcount=548020,op_r_prepare_latency.avgtime=0.000186359,op_r_prepare_latency.sum=102.128629183,op_r_process_latency.avgcount=548020,op_r_process_latency.avgtime=0.00012716,op_r_process_latency.sum=69.686468884,op_rw=142562,op_rw_in_bytes=0,op_rw_latency.avgcount=142562,op_rw_latency.avgtime=0.010908597,op_rw_latency.sum=1555.151525732,op_rw_out_bytes=0,op_rw_prepare_latency.avgcount=150678,op_rw_prepare_latency.avgtime=0.000715043,op_rw_prepare_latency.sum=107.741399304,op_rw_process_latency.avgcount=142562,op_rw_process_latency.avgtime=0.01068836,op_rw_process_latency.sum=1523.754107887,op_w=2,op_w_in_bytes=0,op_w_latency.avgcount=2,op_w_latency.avgtime=0.013609136,op_w_latency.sum=0.027218273,op_w_prepare_latency.avgcount=2,op_w_prepare_latency.avgtime=0.001000586,op_w_prepare_latency.sum=0.002001172,op_w_process_latency.avgcount=2,op_w_process_latency.avgtime=0.013407439,op_w_process_latency.sum=0.026814879,op_wip=0,osd_map_bl_cache_hit=15,osd_map_bl_cache_miss=41,osd_map_cache_hit=4241,osd_map_cache_miss=14,osd_map_cache_miss_low=0,osd_map_cache_miss_low_avg.avgcount=0,osd_map_cache_miss_low_avg.sum=0,osd_pg_biginfo=1824,osd_pg_fastinfo=285998,osd_pg_info=294869,osd_tier_flush_lat.avgcount=0,osd_tier_flush_lat.avgtime=0,osd_tier_flush_lat.sum=0,osd_tier_promote_lat.avgcount=0,osd_tier_promote_lat.avgtime=0,osd_tier_promote_lat.sum=0,osd_tier_r_lat.avgcount=0,osd_tier_r_lat.avgtime=0,osd_tier_r_lat.sum=0,pull=0,push=1,push_out_bytes=0,recovery_bytes=0,recovery_ops=0,stat_bytes=107369988096,stat_bytes_avail=106271932416,stat_bytes_used=1098055680,subop=134165,subop_in_bytes=89501237,subop_latency.avgcount=134165,subop_latency.avgtime=0.007313523,subop_latency.sum=981.218888627,subop_pull=0,subop_pull_latency.avgcount=0,subop_pull_latency.avgtime=0,subop_pull_latency.sum=0,subop_push=0,subop_push_in_bytes=0,subop_push_latency.avgcount=0,subop_push_latency.avgtime=0,subop_push_latency.sum=0,subop_w=134165,subop_w_in_bytes=89501237,subop_w_latency.avgcount=134165,subop_w_latency.avgtime=0.007313523,subop_w_latency.sum=981.218888627,tier_clean=0,tier_delay=0,tier_dirty=4,tier_evict=0,tier_flush=0,tier_flush_fail=0,tier_promote=0,tier_proxy_read=0,tier_proxy_write=0,tier_try_flush=0,tier_try_flush_fail=0,tier_whiteout=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanosd1,id=2,type=osd msgr_active_connections=746,msgr_created_connections=15212,msgr_recv_bytes=8633229006,msgr_recv_messages=4284202,msgr_running_fast_dispatch_time=153.820479102,msgr_running_recv_time=282.031655658,msgr_running_send_time=585.444749736,msgr_running_total_time=1231.431789242,msgr_send_bytes=11962769351,msgr_send_messages=4440622 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-ms_objecter,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_client,host=stefanosd1,id=2,type=osd get=2607136,get_or_fail_fail=0,get_or_fail_success=2607136,get_started=0,get_sum=5224700544,max=104857600,put=2607136,put_sum=5224700544,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=bluefs,host=stefanosd1,id=2,type=osd bytes_written_slow=0,bytes_written_sst=9065815,bytes_written_wal=901884611,db_total_bytes=4294967296,db_used_bytes=546308096,files_written_sst=3,files_written_wal=2,gift_bytes=0,log_bytes=225726464,log_compactions=1,logged_bytes=1195945984,max_bytes_db=1234173952,max_bytes_slow=0,max_bytes_wal=0,num_files=11,reclaim_bytes=0,slow_total_bytes=0,slow_used_bytes=0,wal_total_bytes=0,wal_used_bytes=0 1587117698000000000 +> ceph,collection=recoverystate_perf,host=stefanosd1,id=2,type=osd activating_latency.avgcount=88,activating_latency.avgtime=0.086149065,activating_latency.sum=7.581117751,active_latency.avgcount=29,active_latency.avgtime=1790.849396082,active_latency.sum=51934.632486379,backfilling_latency.avgcount=0,backfilling_latency.avgtime=0,backfilling_latency.sum=0,clean_latency.avgcount=29,clean_latency.avgtime=1790.754765195,clean_latency.sum=51931.888190683,down_latency.avgcount=0,down_latency.avgtime=0,down_latency.sum=0,getinfo_latency.avgcount=134,getinfo_latency.avgtime=0.427567953,getinfo_latency.sum=57.294105786,getlog_latency.avgcount=88,getlog_latency.avgtime=0.011810192,getlog_latency.sum=1.03929697,getmissing_latency.avgcount=88,getmissing_latency.avgtime=0.000104598,getmissing_latency.sum=0.009204673,incomplete_latency.avgcount=0,incomplete_latency.avgtime=0,incomplete_latency.sum=0,initial_latency.avgcount=150,initial_latency.avgtime=0.001251361,initial_latency.sum=0.187704197,notbackfilling_latency.avgcount=0,notbackfilling_latency.avgtime=0,notbackfilling_latency.sum=0,notrecovering_latency.avgcount=0,notrecovering_latency.avgtime=0,notrecovering_latency.sum=0,peering_latency.avgcount=134,peering_latency.avgtime=0.998405763,peering_latency.sum=133.786372331,primary_latency.avgcount=75,primary_latency.avgtime=693.473306562,primary_latency.sum=52010.497992212,recovered_latency.avgcount=88,recovered_latency.avgtime=0.000609715,recovered_latency.sum=0.053654964,recovering_latency.avgcount=1,recovering_latency.avgtime=0.100713031,recovering_latency.sum=0.100713031,replicaactive_latency.avgcount=21,replicaactive_latency.avgtime=1790.852354921,replicaactive_latency.sum=37607.89945336,repnotrecovering_latency.avgcount=21,repnotrecovering_latency.avgtime=1790.852315529,repnotrecovering_latency.sum=37607.898626121,reprecovering_latency.avgcount=0,reprecovering_latency.avgtime=0,reprecovering_latency.sum=0,repwaitbackfillreserved_latency.avgcount=0,repwaitbackfillreserved_latency.avgtime=0,repwaitbackfillreserved_latency.sum=0,repwaitrecoveryreserved_latency.avgcount=0,repwaitrecoveryreserved_latency.avgtime=0,repwaitrecoveryreserved_latency.sum=0,reset_latency.avgcount=346,reset_latency.avgtime=0.126826803,reset_latency.sum=43.882073917,start_latency.avgcount=346,start_latency.avgtime=0.000233277,start_latency.sum=0.080713962,started_latency.avgcount=196,started_latency.avgtime=457.885378797,started_latency.sum=89745.534244237,stray_latency.avgcount=212,stray_latency.avgtime=1.013774396,stray_latency.sum=214.920172121,waitactingchange_latency.avgcount=0,waitactingchange_latency.avgtime=0,waitactingchange_latency.sum=0,waitlocalbackfillreserved_latency.avgcount=0,waitlocalbackfillreserved_latency.avgtime=0,waitlocalbackfillreserved_latency.sum=0,waitlocalrecoveryreserved_latency.avgcount=1,waitlocalrecoveryreserved_latency.avgtime=0.001572379,waitlocalrecoveryreserved_latency.sum=0.001572379,waitremotebackfillreserved_latency.avgcount=0,waitremotebackfillreserved_latency.avgtime=0,waitremotebackfillreserved_latency.sum=0,waitremoterecoveryreserved_latency.avgcount=1,waitremoterecoveryreserved_latency.avgtime=0.012729633,waitremoterecoveryreserved_latency.sum=0.012729633,waitupthru_latency.avgcount=88,waitupthru_latency.avgtime=0.857137729,waitupthru_latency.sum=75.428120205 1587117698000000000 +> ceph,collection=throttle-objecter_ops,host=stefanosd1,id=2,type=osd get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=bluestore,host=stefanosd1,id=2,type=osd bluestore_allocated=24248320,bluestore_blob_split=0,bluestore_blobs=83,bluestore_buffer_bytes=614400,bluestore_buffer_hit_bytes=161362,bluestore_buffer_miss_bytes=534799,bluestore_buffers=41,bluestore_compressed=0,bluestore_compressed_allocated=0,bluestore_compressed_original=0,bluestore_extent_compress=0,bluestore_extents=83,bluestore_fragmentation_micros=1,bluestore_gc_merged=0,bluestore_onode_hits=723852,bluestore_onode_misses=364,bluestore_onode_reshard=0,bluestore_onode_shard_hits=0,bluestore_onode_shard_misses=0,bluestore_onodes=348,bluestore_read_eio=0,bluestore_reads_with_retries=0,bluestore_stored=1984402,bluestore_txc=295997,bluestore_write_big=0,bluestore_write_big_blobs=0,bluestore_write_big_bytes=0,bluestore_write_small=60,bluestore_write_small_bytes=343843,bluestore_write_small_deferred=22,bluestore_write_small_new=38,bluestore_write_small_pre_read=22,bluestore_write_small_unused=0,commit_lat.avgcount=295997,commit_lat.avgtime=0.006994931,commit_lat.sum=2070.478673619,compress_lat.avgcount=0,compress_lat.avgtime=0,compress_lat.sum=0,compress_rejected_count=0,compress_success_count=0,csum_lat.avgcount=47,csum_lat.avgtime=0.000034434,csum_lat.sum=0.001618423,decompress_lat.avgcount=0,decompress_lat.avgtime=0,decompress_lat.sum=0,deferred_write_bytes=0,deferred_write_ops=0,kv_commit_lat.avgcount=291889,kv_commit_lat.avgtime=0.006347015,kv_commit_lat.sum=1852.624108527,kv_final_lat.avgcount=291885,kv_final_lat.avgtime=0.00004358,kv_final_lat.sum=12.720529751,kv_flush_lat.avgcount=291889,kv_flush_lat.avgtime=0.000000211,kv_flush_lat.sum=0.061636079,kv_sync_lat.avgcount=291889,kv_sync_lat.avgtime=0.006347227,kv_sync_lat.sum=1852.685744606,omap_lower_bound_lat.avgcount=1,omap_lower_bound_lat.avgtime=0.000004482,omap_lower_bound_lat.sum=0.000004482,omap_next_lat.avgcount=6933,omap_next_lat.avgtime=0.000003956,omap_next_lat.sum=0.027427456,omap_seek_to_first_lat.avgcount=309,omap_seek_to_first_lat.avgtime=0.000005879,omap_seek_to_first_lat.sum=0.001816658,omap_upper_bound_lat.avgcount=0,omap_upper_bound_lat.avgtime=0,omap_upper_bound_lat.sum=0,read_lat.avgcount=229,read_lat.avgtime=0.000394981,read_lat.sum=0.090450704,read_onode_meta_lat.avgcount=295,read_onode_meta_lat.avgtime=0.000016832,read_onode_meta_lat.sum=0.004965516,read_wait_aio_lat.avgcount=66,read_wait_aio_lat.avgtime=0.001237841,read_wait_aio_lat.sum=0.081697561,state_aio_wait_lat.avgcount=295997,state_aio_wait_lat.avgtime=0.000000357,state_aio_wait_lat.sum=0.105827433,state_deferred_aio_wait_lat.avgcount=0,state_deferred_aio_wait_lat.avgtime=0,state_deferred_aio_wait_lat.sum=0,state_deferred_cleanup_lat.avgcount=0,state_deferred_cleanup_lat.avgtime=0,state_deferred_cleanup_lat.sum=0,state_deferred_queued_lat.avgcount=0,state_deferred_queued_lat.avgtime=0,state_deferred_queued_lat.sum=0,state_done_lat.avgcount=295986,state_done_lat.avgtime=0.000003017,state_done_lat.sum=0.893199127,state_finishing_lat.avgcount=295986,state_finishing_lat.avgtime=0.000000306,state_finishing_lat.sum=0.090792683,state_io_done_lat.avgcount=295997,state_io_done_lat.avgtime=0.000001066,state_io_done_lat.sum=0.315577655,state_kv_commiting_lat.avgcount=295997,state_kv_commiting_lat.avgtime=0.006423586,state_kv_commiting_lat.sum=1901.362268572,state_kv_done_lat.avgcount=295997,state_kv_done_lat.avgtime=0.00000155,state_kv_done_lat.sum=0.458963064,state_kv_queued_lat.avgcount=295997,state_kv_queued_lat.avgtime=0.000477234,state_kv_queued_lat.sum=141.260101773,state_prepare_lat.avgcount=295997,state_prepare_lat.avgtime=0.000091806,state_prepare_lat.sum=27.174436583,submit_lat.avgcount=295997,submit_lat.avgtime=0.000135729,submit_lat.sum=40.17557682,throttle_lat.avgcount=295997,throttle_lat.avgtime=0.000002734,throttle_lat.sum=0.809479837,write_pad_bytes=151773,write_penalty_read_ops=0 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_bytes,host=stefanosd1,id=2,type=osd get=295997,get_or_fail_fail=0,get_or_fail_success=0,get_started=295997,get_sum=198686579299,max=67108864,put=291889,put_sum=198686579299,take=0,take_sum=0,val=0,wait.avgcount=83,wait.avgtime=0.003670612,wait.sum=0.304660858 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-cluster,host=stefanosd1,id=2,type=osd get=452060,get_or_fail_fail=0,get_or_fail_success=452060,get_started=0,get_sum=269934345,max=104857600,put=452060,put_sum=269934345,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-bluestore_throttle_deferred_bytes,host=stefanosd1,id=2,type=osd get=11,get_or_fail_fail=0,get_or_fail_success=11,get_started=0,get_sum=7723117,max=201326592,put=0,put_sum=0,take=0,take_sum=0,val=7723117,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_front_server,host=stefanosd1,id=2,type=osd get=2607433,get_or_fail_fail=0,get_or_fail_success=2607433,get_started=0,get_sum=5225295732,max=104857600,put=2607433,put_sum=5225295732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=finisher-objecter-finisher-0,host=stefanosd1,id=2,type=osd complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117698000000000 +> ceph,collection=cct,host=stefanosd1,id=2,type=osd total_workers=6,unhealthy_workers=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanosd1,id=2,type=osd msgr_active_connections=670,msgr_created_connections=13455,msgr_recv_bytes=6334605563,msgr_recv_messages=3287843,msgr_running_fast_dispatch_time=137.016615819,msgr_running_recv_time=240.687997039,msgr_running_send_time=471.710658466,msgr_running_total_time=1034.029109337,msgr_send_bytes=9753423475,msgr_send_messages=3439611 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-client,host=stefanosd1,id=2,type=osd get=710355,get_or_fail_fail=0,get_or_fail_success=710355,get_started=0,get_sum=166306283,max=104857600,put=710355,put_sum=166306283,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-hb_back_server,host=stefanosd1,id=2,type=osd get=2607433,get_or_fail_fail=0,get_or_fail_success=2607433,get_started=0,get_sum=5225295732,max=104857600,put=2607433,put_sum=5225295732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanosd1,id=2,type=osd msgr_active_connections=705,msgr_created_connections=17953,msgr_recv_bytes=7261438733,msgr_recv_messages=4496034,msgr_running_fast_dispatch_time=254.716476808,msgr_running_recv_time=272.196741555,msgr_running_send_time=571.102924903,msgr_running_total_time=1338.461077493,msgr_send_bytes=10772250508,msgr_send_messages=4192781 1587117698000000000 +> ceph,collection=rocksdb,host=stefanosd1,id=2,type=osd compact=0,compact_queue_len=0,compact_queue_merge=0,compact_range=0,get=1424,get_latency.avgcount=1424,get_latency.avgtime=0.000030752,get_latency.sum=0.043792142,rocksdb_write_delay_time.avgcount=0,rocksdb_write_delay_time.avgtime=0,rocksdb_write_delay_time.sum=0,rocksdb_write_memtable_time.avgcount=0,rocksdb_write_memtable_time.avgtime=0,rocksdb_write_memtable_time.sum=0,rocksdb_write_pre_and_post_time.avgcount=0,rocksdb_write_pre_and_post_time.avgtime=0,rocksdb_write_pre_and_post_time.sum=0,rocksdb_write_wal_time.avgcount=0,rocksdb_write_wal_time.avgtime=0,rocksdb_write_wal_time.sum=0,submit_latency.avgcount=295997,submit_latency.avgtime=0.000173137,submit_latency.sum=51.248072285,submit_sync_latency.avgcount=291889,submit_sync_latency.avgtime=0.006094397,submit_sync_latency.sum=1778.887521449,submit_transaction=295997,submit_transaction_sync=291889 1587117698000000000 +> ceph,collection=throttle-osd_client_bytes,host=stefanosd1,id=2,type=osd get=698701,get_or_fail_fail=0,get_or_fail_success=698701,get_started=0,get_sum=165630172,max=524288000,put=920880,put_sum=165630172,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117698000000000 +> ceph,collection=mds_sessions,host=stefanmds1,id=stefanmds1,type=mds average_load=0,avg_session_uptime=0,session_add=0,session_count=0,session_remove=0,sessions_open=0,sessions_stale=0,total_load=0 1587117476000000000 +> ceph,collection=mempool,host=stefanmds1,id=stefanmds1,type=mds bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=132069,buffer_anon_items=82,buffer_meta_bytes=0,buffer_meta_items=0,mds_co_bytes=44208,mds_co_items=154,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=16952,osdmap_items=139,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117476000000000 +> ceph,collection=objecter,host=stefanmds1,id=stefanmds1,type=mds command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=1,omap_del=0,omap_rd=28,omap_wr=1,op=33,op_active=0,op_laggy=0,op_pg=0,op_r=26,op_reply=33,op_resend=2,op_rmw=0,op_send=35,op_send_bytes=364,op_w=7,osd_laggy=0,osd_session_close=91462,osd_session_open=91468,osd_sessions=6,osdop_append=0,osdop_call=0,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=5,osdop_getxattr=14,osdop_mapext=0,osdop_notify=0,osdop_other=0,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=8,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=2,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=1,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117476000000000 +> ceph,collection=cct,host=stefanmds1,id=stefanmds1,type=mds total_workers=1,unhealthy_workers=0 1587117476000000000 +> ceph,collection=mds_server,host=stefanmds1,id=stefanmds1,type=mds cap_revoke_eviction=0,dispatch_client_request=0,dispatch_server_request=0,handle_client_request=0,handle_client_session=0,handle_slave_request=0,req_create_latency.avgcount=0,req_create_latency.avgtime=0,req_create_latency.sum=0,req_getattr_latency.avgcount=0,req_getattr_latency.avgtime=0,req_getattr_latency.sum=0,req_getfilelock_latency.avgcount=0,req_getfilelock_latency.avgtime=0,req_getfilelock_latency.sum=0,req_link_latency.avgcount=0,req_link_latency.avgtime=0,req_link_latency.sum=0,req_lookup_latency.avgcount=0,req_lookup_latency.avgtime=0,req_lookup_latency.sum=0,req_lookuphash_latency.avgcount=0,req_lookuphash_latency.avgtime=0,req_lookuphash_latency.sum=0,req_lookupino_latency.avgcount=0,req_lookupino_latency.avgtime=0,req_lookupino_latency.sum=0,req_lookupname_latency.avgcount=0,req_lookupname_latency.avgtime=0,req_lookupname_latency.sum=0,req_lookupparent_latency.avgcount=0,req_lookupparent_latency.avgtime=0,req_lookupparent_latency.sum=0,req_lookupsnap_latency.avgcount=0,req_lookupsnap_latency.avgtime=0,req_lookupsnap_latency.sum=0,req_lssnap_latency.avgcount=0,req_lssnap_latency.avgtime=0,req_lssnap_latency.sum=0,req_mkdir_latency.avgcount=0,req_mkdir_latency.avgtime=0,req_mkdir_latency.sum=0,req_mknod_latency.avgcount=0,req_mknod_latency.avgtime=0,req_mknod_latency.sum=0,req_mksnap_latency.avgcount=0,req_mksnap_latency.avgtime=0,req_mksnap_latency.sum=0,req_open_latency.avgcount=0,req_open_latency.avgtime=0,req_open_latency.sum=0,req_readdir_latency.avgcount=0,req_readdir_latency.avgtime=0,req_readdir_latency.sum=0,req_rename_latency.avgcount=0,req_rename_latency.avgtime=0,req_rename_latency.sum=0,req_renamesnap_latency.avgcount=0,req_renamesnap_latency.avgtime=0,req_renamesnap_latency.sum=0,req_rmdir_latency.avgcount=0,req_rmdir_latency.avgtime=0,req_rmdir_latency.sum=0,req_rmsnap_latency.avgcount=0,req_rmsnap_latency.avgtime=0,req_rmsnap_latency.sum=0,req_rmxattr_latency.avgcount=0,req_rmxattr_latency.avgtime=0,req_rmxattr_latency.sum=0,req_setattr_latency.avgcount=0,req_setattr_latency.avgtime=0,req_setattr_latency.sum=0,req_setdirlayout_latency.avgcount=0,req_setdirlayout_latency.avgtime=0,req_setdirlayout_latency.sum=0,req_setfilelock_latency.avgcount=0,req_setfilelock_latency.avgtime=0,req_setfilelock_latency.sum=0,req_setlayout_latency.avgcount=0,req_setlayout_latency.avgtime=0,req_setlayout_latency.sum=0,req_setxattr_latency.avgcount=0,req_setxattr_latency.avgtime=0,req_setxattr_latency.sum=0,req_symlink_latency.avgcount=0,req_symlink_latency.avgtime=0,req_symlink_latency.sum=0,req_unlink_latency.avgcount=0,req_unlink_latency.avgtime=0,req_unlink_latency.sum=0 1587117476000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=84,msgr_created_connections=68511,msgr_recv_bytes=238078,msgr_recv_messages=2655,msgr_running_fast_dispatch_time=0.004247777,msgr_running_recv_time=25.369012545,msgr_running_send_time=3.743427461,msgr_running_total_time=130.277111559,msgr_send_bytes=172767043,msgr_send_messages=18172 1587117476000000000 +> ceph,collection=mds_log,host=stefanmds1,id=stefanmds1,type=mds ev=0,evadd=0,evex=0,evexd=0,evexg=0,evtrm=0,expos=4194304,jlat.avgcount=0,jlat.avgtime=0,jlat.sum=0,rdpos=4194304,replayed=1,seg=1,segadd=0,segex=0,segexd=0,segexg=0,segtrm=0,wrpos=0 1587117476000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=595,msgr_created_connections=943825,msgr_recv_bytes=78618003,msgr_recv_messages=914080,msgr_running_fast_dispatch_time=0.001544386,msgr_running_recv_time=459.627068807,msgr_running_send_time=469.337032316,msgr_running_total_time=2744.084305898,msgr_send_bytes=61684163658,msgr_send_messages=1858008 1587117476000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-mds,host=stefanmds1,id=stefanmds1,type=mds get=1216458,get_or_fail_fail=0,get_or_fail_success=1216458,get_started=0,get_sum=51976882,max=104857600,put=1216458,put_sum=51976882,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanmds1,id=stefanmds1,type=mds msgr_active_connections=226,msgr_created_connections=42679,msgr_recv_bytes=63140151,msgr_recv_messages=299727,msgr_running_fast_dispatch_time=26.316138629,msgr_running_recv_time=36.969916165,msgr_running_send_time=70.457421128,msgr_running_total_time=226.230019936,msgr_send_bytes=193154464,msgr_send_messages=310481 1587117476000000000 +> ceph,collection=mds,host=stefanmds1,id=stefanmds1,type=mds caps=0,dir_commit=0,dir_fetch=12,dir_merge=0,dir_split=0,exported=0,exported_inodes=0,forward=0,imported=0,imported_inodes=0,inode_max=2147483647,inodes=10,inodes_bottom=3,inodes_expired=0,inodes_pin_tail=0,inodes_pinned=10,inodes_top=7,inodes_with_caps=0,load_cent=0,openino_backtrace_fetch=0,openino_dir_fetch=0,openino_peer_discover=0,q=0,reply=0,reply_latency.avgcount=0,reply_latency.avgtime=0,reply_latency.sum=0,request=0,subtrees=2,traverse=0,traverse_dir_fetch=0,traverse_discover=0,traverse_forward=0,traverse_hit=0,traverse_lock=0,traverse_remote_ino=0 1587117476000000000 +> ceph,collection=purge_queue,host=stefanmds1,id=stefanmds1,type=mds pq_executed=0,pq_executing=0,pq_executing_ops=0 1587117476000000000 +> ceph,collection=throttle-write_buf_throttle,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=3758096384,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000 +> ceph,collection=throttle-write_buf_throttle-0x5624e9377f40,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=3758096384,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000 +> ceph,collection=mds_cache,host=stefanmds1,id=stefanmds1,type=mds ireq_enqueue_scrub=0,ireq_exportdir=0,ireq_flush=0,ireq_fragmentdir=0,ireq_fragstats=0,ireq_inodestats=0,num_recovering_enqueued=0,num_recovering_prioritized=0,num_recovering_processing=0,num_strays=0,num_strays_delayed=0,num_strays_enqueuing=0,recovery_completed=0,recovery_started=0,strays_created=0,strays_enqueued=0,strays_migrated=0,strays_reintegrated=0 1587117476000000000 +> ceph,collection=throttle-objecter_bytes,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=104857600,put=16,put_sum=1016,take=33,take_sum=1016,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000 +> ceph,collection=throttle-objecter_ops,host=stefanmds1,id=stefanmds1,type=mds get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=1024,put=33,put_sum=33,take=33,take_sum=33,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117476000000000 +> ceph,collection=mds_mem,host=stefanmds1,id=stefanmds1,type=mds cap=0,cap+=0,cap-=0,dir=12,dir+=12,dir-=0,dn=10,dn+=10,dn-=0,heap=322284,ino=13,ino+=13,ino-=0,rss=76032 1587117476000000000 +> ceph,collection=finisher-PurgeQueue,host=stefanmds1,id=stefanmds1,type=mds complete_latency.avgcount=4,complete_latency.avgtime=0.000176985,complete_latency.sum=0.000707941,queue_len=0 1587117476000000000 +> ceph,collection=cct,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw total_workers=0,unhealthy_workers=0 1587117156000000000 +> ceph,collection=throttle-objecter_bytes,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=791732,get_or_fail_fail=0,get_or_fail_success=791732,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=rgw,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw cache_hit=0,cache_miss=791706,failed_req=0,get=0,get_b=0,get_initial_lat.avgcount=0,get_initial_lat.avgtime=0,get_initial_lat.sum=0,keystone_token_cache_hit=0,keystone_token_cache_miss=0,pubsub_event_lost=0,pubsub_event_triggered=0,pubsub_events=0,pubsub_push_failed=0,pubsub_push_ok=0,pubsub_push_pending=0,pubsub_store_fail=0,pubsub_store_ok=0,put=0,put_b=0,put_initial_lat.avgcount=0,put_initial_lat.avgtime=0,put_initial_lat.sum=0,qactive=0,qlen=0,req=791705 1587117156000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-radosclient,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=2697988,get_or_fail_fail=0,get_or_fail_success=2697988,get_started=0,get_sum=444563051,max=104857600,put=2697988,put_sum=444563051,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=finisher-radosclient,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw complete_latency.avgcount=2,complete_latency.avgtime=0.003530161,complete_latency.sum=0.007060323,queue_len=0 1587117156000000000 +> ceph,collection=throttle-rgw_async_rados_ops,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=0,get_or_fail_fail=0,get_or_fail_success=0,get_started=0,get_sum=0,max=64,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=throttle-objecter_ops,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=791732,get_or_fail_fail=0,get_or_fail_success=791732,get_started=0,get_sum=791732,max=24576,put=791732,put_sum=791732,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=throttle-objecter_bytes-0x5598969981c0,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1637900,get_or_fail_fail=0,get_or_fail_success=1637900,get_started=0,get_sum=0,max=104857600,put=0,put_sum=0,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=objecter,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw command_active=0,command_resend=0,command_send=0,linger_active=8,linger_ping=1905736,linger_resend=4,linger_send=13,map_epoch=203,map_full=0,map_inc=17,omap_del=0,omap_rd=0,omap_wr=0,op=2697488,op_active=0,op_laggy=0,op_pg=0,op_r=791730,op_reply=2697476,op_resend=1,op_rmw=0,op_send=2697490,op_send_bytes=362,op_w=1905758,osd_laggy=5,osd_session_close=59558,osd_session_open=59566,osd_sessions=8,osdop_append=0,osdop_call=1,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=8,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=791714,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=16,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=791706,osdop_truncate=0,osdop_watch=1905750,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117156000000000 +> ceph,collection=AsyncMessenger::Worker-2,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=11,msgr_created_connections=59839,msgr_recv_bytes=342697143,msgr_recv_messages=1441603,msgr_running_fast_dispatch_time=161.807937536,msgr_running_recv_time=118.174064257,msgr_running_send_time=207.679154333,msgr_running_total_time=698.527662129,msgr_send_bytes=530785909,msgr_send_messages=1679950 1587117156000000000 +> ceph,collection=mempool,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw bloom_filter_bytes=0,bloom_filter_items=0,bluefs_bytes=0,bluefs_items=0,bluestore_alloc_bytes=0,bluestore_alloc_items=0,bluestore_cache_data_bytes=0,bluestore_cache_data_items=0,bluestore_cache_onode_bytes=0,bluestore_cache_onode_items=0,bluestore_cache_other_bytes=0,bluestore_cache_other_items=0,bluestore_fsck_bytes=0,bluestore_fsck_items=0,bluestore_txc_bytes=0,bluestore_txc_items=0,bluestore_writing_bytes=0,bluestore_writing_deferred_bytes=0,bluestore_writing_deferred_items=0,bluestore_writing_items=0,buffer_anon_bytes=225471,buffer_anon_items=163,buffer_meta_bytes=0,buffer_meta_items=0,mds_co_bytes=0,mds_co_items=0,osd_bytes=0,osd_items=0,osd_mapbl_bytes=0,osd_mapbl_items=0,osd_pglog_bytes=0,osd_pglog_items=0,osdmap_bytes=33904,osdmap_items=278,osdmap_mapping_bytes=0,osdmap_mapping_items=0,pgmap_bytes=0,pgmap_items=0,unittest_1_bytes=0,unittest_1_items=0,unittest_2_bytes=0,unittest_2_items=0 1587117156000000000 +> ceph,collection=throttle-msgr_dispatch_throttler-radosclient-0x559896998120,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1652935,get_or_fail_fail=0,get_or_fail_success=1652935,get_started=0,get_sum=276333029,max=104857600,put=1652935,put_sum=276333029,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=AsyncMessenger::Worker-1,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=17,msgr_created_connections=84859,msgr_recv_bytes=211170759,msgr_recv_messages=922646,msgr_running_fast_dispatch_time=31.487443762,msgr_running_recv_time=83.190789333,msgr_running_send_time=174.670510496,msgr_running_total_time=484.22086275,msgr_send_bytes=1322113179,msgr_send_messages=1636839 1587117156000000000 +> ceph,collection=finisher-radosclient-0x559896998080,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw complete_latency.avgcount=0,complete_latency.avgtime=0,complete_latency.sum=0,queue_len=0 1587117156000000000 +> ceph,collection=throttle-objecter_ops-0x559896997b80,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw get=1637900,get_or_fail_fail=0,get_or_fail_success=1637900,get_started=0,get_sum=1637900,max=24576,put=1637900,put_sum=1637900,take=0,take_sum=0,val=0,wait.avgcount=0,wait.avgtime=0,wait.sum=0 1587117156000000000 +> ceph,collection=AsyncMessenger::Worker-0,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw msgr_active_connections=18,msgr_created_connections=74757,msgr_recv_bytes=489001094,msgr_recv_messages=1986686,msgr_running_fast_dispatch_time=168.60950961,msgr_running_recv_time=142.903031533,msgr_running_send_time=267.911165712,msgr_running_total_time=824.885614951,msgr_send_bytes=707973504,msgr_send_messages=2463727 1587117156000000000 +> ceph,collection=objecter-0x559896997720,host=stefanrgw1,id=rgw.stefanrgw1.4219.94113851143184,type=rgw command_active=0,command_resend=0,command_send=0,linger_active=0,linger_ping=0,linger_resend=0,linger_send=0,map_epoch=203,map_full=0,map_inc=8,omap_del=0,omap_rd=0,omap_wr=0,op=1637998,op_active=0,op_laggy=0,op_pg=0,op_r=1062803,op_reply=1637998,op_resend=15,op_rmw=0,op_send=1638013,op_send_bytes=63321099,op_w=575195,osd_laggy=0,osd_session_close=125555,osd_session_open=125563,osd_sessions=8,osdop_append=0,osdop_call=1637886,osdop_clonerange=0,osdop_cmpxattr=0,osdop_create=0,osdop_delete=0,osdop_getxattr=0,osdop_mapext=0,osdop_notify=0,osdop_other=112,osdop_pgls=0,osdop_pgls_filter=0,osdop_read=0,osdop_resetxattrs=0,osdop_rmxattr=0,osdop_setxattr=0,osdop_sparse_read=0,osdop_src_cmpxattr=0,osdop_stat=0,osdop_truncate=0,osdop_watch=0,osdop_write=0,osdop_writefull=0,osdop_writesame=0,osdop_zero=0,poolop_active=0,poolop_resend=0,poolop_send=0,poolstat_active=0,poolstat_resend=0,poolstat_send=0,statfs_active=0,statfs_resend=0,statfs_send=0 1587117156000000000 +``` diff --git a/plugins/inputs/ceph/ceph.go b/plugins/inputs/ceph/ceph.go index 0de9cb13b..c875de8df 100644 --- a/plugins/inputs/ceph/ceph.go +++ b/plugins/inputs/ceph/ceph.go @@ -18,8 +18,12 @@ const ( measurement = "ceph" typeMon = "monitor" typeOsd = "osd" + typeMds = "mds" + typeRgw = "rgw" osdPrefix = "ceph-osd" monPrefix = "ceph-mon" + mdsPrefix = "ceph-mds" + rgwPrefix = "ceph-client" sockSuffix = "asok" ) @@ -27,6 +31,8 @@ type Ceph struct { CephBinary string OsdPrefix string MonPrefix string + MdsPrefix string + RgwPrefix string SocketDir string SocketSuffix string CephUser string @@ -36,7 +42,7 @@ type Ceph struct { } func (c *Ceph) Description() string { - return "Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster." + return "Collects performance metrics from the MON, OSD, MDS and RGW nodes in a Ceph storage cluster." } var sampleConfig = ` @@ -55,6 +61,8 @@ var sampleConfig = ` ## prefix of MON and OSD socket files, used to determine socket type mon_prefix = "ceph-mon" osd_prefix = "ceph-osd" + mds_prefix = "ceph-mds" + rgw_prefix = "ceph-client" ## suffix used to identify socket files socket_suffix = "asok" @@ -101,12 +109,12 @@ func (c *Ceph) gatherAdminSocketStats(acc telegraf.Accumulator) error { for _, s := range sockets { dump, err := perfDump(c.CephBinary, s) if err != nil { - acc.AddError(fmt.Errorf("E! error reading from socket '%s': %v", s.socket, err)) + acc.AddError(fmt.Errorf("error reading from socket '%s': %v", s.socket, err)) continue } data, err := parseDump(dump) if err != nil { - acc.AddError(fmt.Errorf("E! error parsing dump from socket '%s': %v", s.socket, err)) + acc.AddError(fmt.Errorf("error parsing dump from socket '%s': %v", s.socket, err)) continue } for tag, metrics := range data { @@ -148,6 +156,8 @@ func init() { CephBinary: "/usr/bin/ceph", OsdPrefix: osdPrefix, MonPrefix: monPrefix, + MdsPrefix: mdsPrefix, + RgwPrefix: rgwPrefix, SocketDir: "/var/run/ceph", SocketSuffix: sockSuffix, CephUser: "client.admin", @@ -157,7 +167,6 @@ func init() { } inputs.Add(measurement, func() telegraf.Input { return &c }) - } var perfDump = func(binary string, socket *socket) (string, error) { @@ -166,6 +175,10 @@ var perfDump = func(binary string, socket *socket) (string, error) { cmdArgs = append(cmdArgs, "perf", "dump") } else if socket.sockType == typeMon { cmdArgs = append(cmdArgs, "perfcounters_dump") + } else if socket.sockType == typeMds { + cmdArgs = append(cmdArgs, "perf", "dump") + } else if socket.sockType == typeRgw { + cmdArgs = append(cmdArgs, "perf", "dump") } else { return "", fmt.Errorf("ignoring unknown socket type: %s", socket.sockType) } @@ -200,7 +213,18 @@ var findSockets = func(c *Ceph) ([]*socket, error) { sockPrefix = osdPrefix } - if sockType == typeOsd || sockType == typeMon { + if strings.HasPrefix(f, c.MdsPrefix) { + sockType = typeMds + sockPrefix = mdsPrefix + + } + if strings.HasPrefix(f, c.RgwPrefix) { + sockType = typeRgw + sockPrefix = rgwPrefix + + } + + if sockType == typeOsd || sockType == typeMon || sockType == typeMds || sockType == typeRgw { path := filepath.Join(c.SocketDir, f) sockets = append(sockets, &socket{parseSockId(f, sockPrefix, c.SocketSuffix), sockType, path}) } @@ -278,7 +302,7 @@ func flatten(data interface{}) []*metric { switch val := data.(type) { case float64: - metrics = []*metric{&metric{make([]string, 0, 1), val}} + metrics = []*metric{{make([]string, 0, 1), val}} case map[string]interface{}: metrics = make([]*metric, 0, len(val)) for k, v := range val { @@ -288,12 +312,13 @@ func flatten(data interface{}) []*metric { } } default: - log.Printf("I! Ignoring unexpected type '%T' for value %v", val, val) + log.Printf("I! [inputs.ceph] ignoring unexpected type '%T' for value %v", val, val) } return metrics } +// exec executes the 'ceph' command with the supplied arguments, returning JSON formatted output func (c *Ceph) exec(command string) (string, error) { cmdArgs := []string{"--conf", c.CephConfig, "--name", c.CephUser, "--format", "json"} cmdArgs = append(cmdArgs, strings.Split(command, " ")...) @@ -317,145 +342,174 @@ func (c *Ceph) exec(command string) (string, error) { return output, nil } +// CephStatus is used to unmarshal "ceph -s" output +type CephStatus struct { + Health struct { + Status string `json:"status"` + OverallStatus string `json:"overall_status"` + } `json:"health"` + OSDMap struct { + OSDMap struct { + Epoch float64 `json:"epoch"` + NumOSDs float64 `json:"num_osds"` + NumUpOSDs float64 `json:"num_up_osds"` + NumInOSDs float64 `json:"num_in_osds"` + Full bool `json:"full"` + NearFull bool `json:"nearfull"` + NumRemappedPGs float64 `json:"num_remapped_pgs"` + } `json:"osdmap"` + } `json:"osdmap"` + PGMap struct { + PGsByState []struct { + StateName string `json:"state_name"` + Count float64 `json:"count"` + } `json:"pgs_by_state"` + Version float64 `json:"version"` + NumPGs float64 `json:"num_pgs"` + DataBytes float64 `json:"data_bytes"` + BytesUsed float64 `json:"bytes_used"` + BytesAvail float64 `json:"bytes_avail"` + BytesTotal float64 `json:"bytes_total"` + ReadBytesSec float64 `json:"read_bytes_sec"` + WriteBytesSec float64 `json:"write_bytes_sec"` + OpPerSec *float64 `json:"op_per_sec"` // This field is no longer reported in ceph 10 and later + ReadOpPerSec float64 `json:"read_op_per_sec"` + WriteOpPerSec float64 `json:"write_op_per_sec"` + } `json:"pgmap"` +} + +// decodeStatus decodes the output of 'ceph -s' func decodeStatus(acc telegraf.Accumulator, input string) error { - data := make(map[string]interface{}) - err := json.Unmarshal([]byte(input), &data) - if err != nil { + data := &CephStatus{} + if err := json.Unmarshal([]byte(input), data); err != nil { return fmt.Errorf("failed to parse json: '%s': %v", input, err) } - err = decodeStatusOsdmap(acc, data) - if err != nil { - return err + decoders := []func(telegraf.Accumulator, *CephStatus) error{ + decodeStatusHealth, + decodeStatusOsdmap, + decodeStatusPgmap, + decodeStatusPgmapState, } - err = decodeStatusPgmap(acc, data) - if err != nil { - return err - } - - err = decodeStatusPgmapState(acc, data) - if err != nil { - return err + for _, decoder := range decoders { + if err := decoder(acc, data); err != nil { + return err + } } return nil } -func decodeStatusOsdmap(acc telegraf.Accumulator, data map[string]interface{}) error { - osdmap, ok := data["osdmap"].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement) +// decodeStatusHealth decodes the health portion of the output of 'ceph status' +func decodeStatusHealth(acc telegraf.Accumulator, data *CephStatus) error { + fields := map[string]interface{}{ + "status": data.Health.Status, + "overall_status": data.Health.OverallStatus, } - fields, ok := osdmap["osdmap"].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode osdmap", measurement) + acc.AddFields("ceph_health", fields, map[string]string{}) + return nil +} + +// decodeStatusOsdmap decodes the OSD map portion of the output of 'ceph -s' +func decodeStatusOsdmap(acc telegraf.Accumulator, data *CephStatus) error { + fields := map[string]interface{}{ + "epoch": data.OSDMap.OSDMap.Epoch, + "num_osds": data.OSDMap.OSDMap.NumOSDs, + "num_up_osds": data.OSDMap.OSDMap.NumUpOSDs, + "num_in_osds": data.OSDMap.OSDMap.NumInOSDs, + "full": data.OSDMap.OSDMap.Full, + "nearfull": data.OSDMap.OSDMap.NearFull, + "num_remapped_pgs": data.OSDMap.OSDMap.NumRemappedPGs, } acc.AddFields("ceph_osdmap", fields, map[string]string{}) return nil } -func decodeStatusPgmap(acc telegraf.Accumulator, data map[string]interface{}) error { - pgmap, ok := data["pgmap"].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode pgmap", measurement) - } - fields := make(map[string]interface{}) - for key, value := range pgmap { - switch value.(type) { - case float64: - fields[key] = value - } +// decodeStatusPgmap decodes the PG map portion of the output of 'ceph -s' +func decodeStatusPgmap(acc telegraf.Accumulator, data *CephStatus) error { + fields := map[string]interface{}{ + "version": data.PGMap.Version, + "num_pgs": data.PGMap.NumPGs, + "data_bytes": data.PGMap.DataBytes, + "bytes_used": data.PGMap.BytesUsed, + "bytes_avail": data.PGMap.BytesAvail, + "bytes_total": data.PGMap.BytesTotal, + "read_bytes_sec": data.PGMap.ReadBytesSec, + "write_bytes_sec": data.PGMap.WriteBytesSec, + "op_per_sec": data.PGMap.OpPerSec, // This field is no longer reported in ceph 10 and later + "read_op_per_sec": data.PGMap.ReadOpPerSec, + "write_op_per_sec": data.PGMap.WriteOpPerSec, } acc.AddFields("ceph_pgmap", fields, map[string]string{}) return nil } -func extractPgmapStates(data map[string]interface{}) ([]interface{}, error) { - const key = "pgs_by_state" - - pgmap, ok := data["pgmap"].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("WARNING %s - unable to decode pgmap", measurement) - } - - s, ok := pgmap[key] - if !ok { - return nil, fmt.Errorf("WARNING %s - pgmap is missing the %s field", measurement, key) - } - - states, ok := s.([]interface{}) - if !ok { - return nil, fmt.Errorf("WARNING %s - pgmap[%s] is not a list", measurement, key) - } - return states, nil -} - -func decodeStatusPgmapState(acc telegraf.Accumulator, data map[string]interface{}) error { - states, err := extractPgmapStates(data) - if err != nil { - return err - } - for _, state := range states { - stateMap, ok := state.(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode pg state", measurement) - } - stateName, ok := stateMap["state_name"].(string) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode pg state name", measurement) - } - stateCount, ok := stateMap["count"].(float64) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode pg state count", measurement) - } - +// decodeStatusPgmapState decodes the PG map state portion of the output of 'ceph -s' +func decodeStatusPgmapState(acc telegraf.Accumulator, data *CephStatus) error { + for _, pgState := range data.PGMap.PGsByState { tags := map[string]string{ - "state": stateName, + "state": pgState.StateName, } fields := map[string]interface{}{ - "count": stateCount, + "count": pgState.Count, } acc.AddFields("ceph_pgmap_state", fields, tags) } return nil } +// CephDF is used to unmarshal 'ceph df' output +type CephDf struct { + Stats struct { + TotalSpace *float64 `json:"total_space"` // pre ceph 0.84 + TotalUsed *float64 `json:"total_used"` // pre ceph 0.84 + TotalAvail *float64 `json:"total_avail"` // pre ceph 0.84 + TotalBytes *float64 `json:"total_bytes"` + TotalUsedBytes *float64 `json:"total_used_bytes"` + TotalAvailBytes *float64 `json:"total_avail_bytes"` + } `json:"stats"` + Pools []struct { + Name string `json:"name"` + Stats struct { + KBUsed float64 `json:"kb_used"` + BytesUsed float64 `json:"bytes_used"` + Objects float64 `json:"objects"` + PercentUsed *float64 `json:"percent_used"` + MaxAvail *float64 `json:"max_avail"` + } `json:"stats"` + } `json:"pools"` +} + +// decodeDf decodes the output of 'ceph df' func decodeDf(acc telegraf.Accumulator, input string) error { - data := make(map[string]interface{}) - err := json.Unmarshal([]byte(input), &data) - if err != nil { + data := &CephDf{} + if err := json.Unmarshal([]byte(input), data); err != nil { return fmt.Errorf("failed to parse json: '%s': %v", input, err) } // ceph.usage: records global utilization and number of objects - stats_fields, ok := data["stats"].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode df stats", measurement) + fields := map[string]interface{}{ + "total_space": data.Stats.TotalSpace, + "total_used": data.Stats.TotalUsed, + "total_avail": data.Stats.TotalAvail, + "total_bytes": data.Stats.TotalBytes, + "total_used_bytes": data.Stats.TotalUsedBytes, + "total_avail_bytes": data.Stats.TotalAvailBytes, } - acc.AddFields("ceph_usage", stats_fields, map[string]string{}) + acc.AddFields("ceph_usage", fields, map[string]string{}) // ceph.pool.usage: records per pool utilization and number of objects - pools, ok := data["pools"].([]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode df pools", measurement) - } - - for _, pool := range pools { - pool_map, ok := pool.(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode df pool", measurement) - } - pool_name, ok := pool_map["name"].(string) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode df pool name", measurement) - } - fields, ok := pool_map["stats"].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode df pool stats", measurement) - } + for _, pool := range data.Pools { tags := map[string]string{ - "name": pool_name, + "name": pool.Name, + } + fields := map[string]interface{}{ + "kb_used": pool.Stats.KBUsed, + "bytes_used": pool.Stats.BytesUsed, + "objects": pool.Stats.Objects, + "percent_used": pool.Stats.PercentUsed, + "max_avail": pool.Stats.MaxAvail, } acc.AddFields("ceph_pool_usage", fields, tags) } @@ -463,36 +517,44 @@ func decodeDf(acc telegraf.Accumulator, input string) error { return nil } +// CephOSDPoolStats is used to unmarshal 'ceph osd pool stats' output +type CephOSDPoolStats []struct { + PoolName string `json:"pool_name"` + ClientIORate struct { + ReadBytesSec float64 `json:"read_bytes_sec"` + WriteBytesSec float64 `json:"write_bytes_sec"` + OpPerSec *float64 `json:"op_per_sec"` // This field is no longer reported in ceph 10 and later + ReadOpPerSec float64 `json:"read_op_per_sec"` + WriteOpPerSec float64 `json:"write_op_per_sec"` + } `json:"client_io_rate"` + RecoveryRate struct { + RecoveringObjectsPerSec float64 `json:"recovering_objects_per_sec"` + RecoveringBytesPerSec float64 `json:"recovering_bytes_per_sec"` + RecoveringKeysPerSec float64 `json:"recovering_keys_per_sec"` + } `json:"recovery_rate"` +} + +// decodeOsdPoolStats decodes the output of 'ceph osd pool stats' func decodeOsdPoolStats(acc telegraf.Accumulator, input string) error { - data := make([]map[string]interface{}, 0) - err := json.Unmarshal([]byte(input), &data) - if err != nil { + data := CephOSDPoolStats{} + if err := json.Unmarshal([]byte(input), &data); err != nil { return fmt.Errorf("failed to parse json: '%s': %v", input, err) } // ceph.pool.stats: records pre pool IO and recovery throughput for _, pool := range data { - pool_name, ok := pool["pool_name"].(string) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode osd pool stats name", measurement) - } - // Note: the 'recovery' object looks broken (in hammer), so it's omitted - objects := []string{ - "client_io_rate", - "recovery_rate", - } - fields := make(map[string]interface{}) - for _, object := range objects { - perfdata, ok := pool[object].(map[string]interface{}) - if !ok { - return fmt.Errorf("WARNING %s - unable to decode osd pool stats", measurement) - } - for key, value := range perfdata { - fields[key] = value - } - } tags := map[string]string{ - "name": pool_name, + "name": pool.PoolName, + } + fields := map[string]interface{}{ + "read_bytes_sec": pool.ClientIORate.ReadBytesSec, + "write_bytes_sec": pool.ClientIORate.WriteBytesSec, + "op_per_sec": pool.ClientIORate.OpPerSec, // This field is no longer reported in ceph 10 and later + "read_op_per_sec": pool.ClientIORate.ReadOpPerSec, + "write_op_per_sec": pool.ClientIORate.WriteOpPerSec, + "recovering_objects_per_sec": pool.RecoveryRate.RecoveringObjectsPerSec, + "recovering_bytes_per_sec": pool.RecoveryRate.RecoveringBytesPerSec, + "recovering_keys_per_sec": pool.RecoveryRate.RecoveringKeysPerSec, } acc.AddFields("ceph_pool_stats", fields, tags) } diff --git a/plugins/inputs/ceph/ceph_test.go b/plugins/inputs/ceph/ceph_test.go index f4a3ebb83..78da3438d 100644 --- a/plugins/inputs/ceph/ceph_test.go +++ b/plugins/inputs/ceph/ceph_test.go @@ -1,7 +1,6 @@ package ceph import ( - "encoding/json" "fmt" "io/ioutil" "os" @@ -18,6 +17,12 @@ const ( epsilon = float64(0.00000001) ) +type expectedResult struct { + metric string + fields map[string]interface{} + tags map[string]string +} + func TestParseSockId(t *testing.T) { s := parseSockId(sockFile(osdPrefix, 1), osdPrefix, sockSuffix) assert.Equal(t, s, "1") @@ -37,26 +42,47 @@ func TestParseOsdDump(t *testing.T) { assert.Equal(t, float64(0), dump["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) } -func TestDecodeStatusPgmapState(t *testing.T) { - data := make(map[string]interface{}) - err := json.Unmarshal([]byte(clusterStatusDump), &data) +func TestParseMdsDump(t *testing.T) { + dump, err := parseDump(mdsPerfDump) assert.NoError(t, err) + assert.InEpsilon(t, 2408386.600934982, dump["mds"]["reply_latency.sum"], epsilon) + assert.Equal(t, float64(0), dump["throttle-write_buf_throttle"]["wait.avgcount"]) +} +func TestParseRgwDump(t *testing.T) { + dump, err := parseDump(rgwPerfDump) + assert.NoError(t, err) + assert.InEpsilon(t, 0.002219876, dump["rgw"]["get_initial_lat.sum"], epsilon) + assert.Equal(t, float64(0), dump["rgw"]["put_initial_lat.avgcount"]) +} + +func TestDecodeStatus(t *testing.T) { acc := &testutil.Accumulator{} - err = decodeStatusPgmapState(acc, data) + err := decodeStatus(acc, clusterStatusDump) assert.NoError(t, err) - var results = []struct { - fields map[string]interface{} - tags map[string]string - }{ - {map[string]interface{}{"count": float64(2560)}, map[string]string{"state": "active+clean"}}, - {map[string]interface{}{"count": float64(10)}, map[string]string{"state": "active+scrubbing"}}, - {map[string]interface{}{"count": float64(5)}, map[string]string{"state": "active+backfilling"}}, + for _, r := range cephStatusResults { + acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) } +} - for _, r := range results { - acc.AssertContainsTaggedFields(t, "ceph_pgmap_state", r.fields, r.tags) +func TestDecodeDf(t *testing.T) { + acc := &testutil.Accumulator{} + err := decodeDf(acc, cephDFDump) + assert.NoError(t, err) + + for _, r := range cephDfResults { + acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) + } +} + +func TestDecodeOSDPoolStats(t *testing.T) { + acc := &testutil.Accumulator{} + err := decodeOsdPoolStats(acc, cephODSPoolStatsDump) + assert.NoError(t, err) + + for _, r := range cephOSDPoolStatsResults { + acc.AssertContainsTaggedFields(t, r.metric, r.fields, r.tags) } } @@ -69,7 +95,7 @@ func TestGather(t *testing.T) { }() findSockets = func(c *Ceph) ([]*socket, error) { - return []*socket{&socket{"osd.1", typeOsd, ""}}, nil + return []*socket{{"osd.1", typeOsd, ""}}, nil } perfDump = func(binary string, s *socket) (string, error) { @@ -93,6 +119,8 @@ func TestFindSockets(t *testing.T) { CephBinary: "foo", OsdPrefix: "ceph-osd", MonPrefix: "ceph-mon", + MdsPrefix: "ceph-mds", + RgwPrefix: "ceph-client", SocketDir: tmpdir, SocketSuffix: "asok", CephUser: "client.admin", @@ -114,6 +142,12 @@ func TestFindSockets(t *testing.T) { for i := 1; i <= st.mons; i++ { assertFoundSocket(t, tmpdir, typeMon, i, sockets) } + for i := 1; i <= st.mdss; i++ { + assertFoundSocket(t, tmpdir, typeMds, i, sockets) + } + for i := 1; i <= st.rgws; i++ { + assertFoundSocket(t, tmpdir, typeRgw, i, sockets) + } cleanupTestFiles(tmpdir, st) } } @@ -122,6 +156,10 @@ func assertFoundSocket(t *testing.T, dir, sockType string, i int, sockets []*soc var prefix string if sockType == typeOsd { prefix = osdPrefix + } else if sockType == typeMds { + prefix = mdsPrefix + } else if sockType == typeRgw { + prefix = rgwPrefix } else { prefix = monPrefix } @@ -170,25 +208,41 @@ func tstFileApply(st *SockTest, fn func(prefix string, i int)) { for i := 1; i <= st.mons; i++ { fn(monPrefix, i) } + for i := 1; i <= st.mdss; i++ { + fn(mdsPrefix, i) + } + for i := 1; i <= st.rgws; i++ { + fn(rgwPrefix, i) + } } type SockTest struct { osds int mons int + mdss int + rgws int } var sockTestParams = []*SockTest{ - &SockTest{ + { osds: 2, mons: 2, + mdss: 2, + rgws: 2, }, - &SockTest{ + { mons: 1, }, - &SockTest{ + { osds: 1, }, - &SockTest{}, + { + mdss: 1, + }, + { + rgws: 1, + }, + {}, } var monPerfDump = ` @@ -710,6 +764,996 @@ var osdPerfDump = ` "wait": { "avgcount": 0, "sum": 0.000000000}}} ` +var mdsPerfDump = ` +{ + "AsyncMessenger::Worker-0": { + "msgr_recv_messages": 2723536628, + "msgr_send_messages": 1160771414, + "msgr_recv_bytes": 1112936719134, + "msgr_send_bytes": 1368194904867, + "msgr_created_connections": 18281, + "msgr_active_connections": 83, + "msgr_running_total_time": 109001.938705141, + "msgr_running_send_time": 33686.215323581, + "msgr_running_recv_time": 8374950.111041426, + "msgr_running_fast_dispatch_time": 5828.083761243 + }, + "AsyncMessenger::Worker-1": { + "msgr_recv_messages": 1426105165, + "msgr_send_messages": 783174767, + "msgr_recv_bytes": 800620150187, + "msgr_send_bytes": 1394738277392, + "msgr_created_connections": 17677, + "msgr_active_connections": 100, + "msgr_running_total_time": 70660.929329800, + "msgr_running_send_time": 24190.940207198, + "msgr_running_recv_time": 3920894.209204916, + "msgr_running_fast_dispatch_time": 8206.816536602 + }, + "AsyncMessenger::Worker-2": { + "msgr_recv_messages": 3471200310, + "msgr_send_messages": 2757725529, + "msgr_recv_bytes": 1331676471794, + "msgr_send_bytes": 2593968875674, + "msgr_created_connections": 16714, + "msgr_active_connections": 73, + "msgr_running_total_time": 167020.893916556, + "msgr_running_send_time": 61197.682840176, + "msgr_running_recv_time": 5816036.495319415, + "msgr_running_fast_dispatch_time": 8581.768789481 + }, + "finisher-PurgeQueue": { + "queue_len": 0, + "complete_latency": { + "avgcount": 20170260, + "sum": 70213.859039869, + "avgtime": 0.003481058 + } + }, + "mds": { + "request": 2167457412, + "reply": 2167457403, + "reply_latency": { + "avgcount": 2167457403, + "sum": 2408386.600934982, + "avgtime": 0.001111157 + }, + "forward": 0, + "dir_fetch": 585012985, + "dir_commit": 58926158, + "dir_split": 8, + "dir_merge": 7, + "inode_max": 2147483647, + "inodes": 39604287, + "inodes_top": 9743493, + "inodes_bottom": 29063656, + "inodes_pin_tail": 797138, + "inodes_pinned": 25685011, + "inodes_expired": 1302542128, + "inodes_with_caps": 4517329, + "caps": 6370838, + "subtrees": 2, + "traverse": 2426357623, + "traverse_hit": 2202314009, + "traverse_forward": 0, + "traverse_discover": 0, + "traverse_dir_fetch": 35332112, + "traverse_remote_ino": 0, + "traverse_lock": 4371557, + "load_cent": 1966748, + "q": 976, + "exported": 0, + "exported_inodes": 0, + "imported": 0, + "imported_inodes": 0, + "openino_dir_fetch": 22725418, + "openino_backtrace_fetch": 6, + "openino_peer_discover": 0 + }, + "mds_cache": { + "num_strays": 384, + "num_strays_delayed": 0, + "num_strays_enqueuing": 0, + "strays_created": 29140050, + "strays_enqueued": 29134399, + "strays_reintegrated": 10171, + "strays_migrated": 0, + "num_recovering_processing": 0, + "num_recovering_enqueued": 0, + "num_recovering_prioritized": 0, + "recovery_started": 229, + "recovery_completed": 229, + "ireq_enqueue_scrub": 0, + "ireq_exportdir": 0, + "ireq_flush": 0, + "ireq_fragmentdir": 15, + "ireq_fragstats": 0, + "ireq_inodestats": 0 + }, + "mds_log": { + "evadd": 1920368707, + "evex": 1920372003, + "evtrm": 1920372003, + "ev": 106627, + "evexg": 0, + "evexd": 4369, + "segadd": 2247990, + "segex": 2247995, + "segtrm": 2247995, + "seg": 123, + "segexg": 0, + "segexd": 5, + "expos": 24852063335817, + "wrpos": 24852205446582, + "rdpos": 22044255640175, + "jlat": { + "avgcount": 182241259, + "sum": 1732094.198366820, + "avgtime": 0.009504402 + }, + "replayed": 109923 + }, + "mds_mem": { + "ino": 39604292, + "ino+": 1307214891, + "ino-": 1267610599, + "dir": 22827008, + "dir+": 591593031, + "dir-": 568766023, + "dn": 39604761, + "dn+": 1376976677, + "dn-": 1337371916, + "cap": 6370838, + "cap+": 1720930015, + "cap-": 1714559177, + "rss": 167723320, + "heap": 322260, + "buf": 0 + }, + "mds_server": { + "dispatch_client_request": 2932764331, + "dispatch_server_request": 0, + "handle_client_request": 2167457412, + "handle_client_session": 10929454, + "handle_slave_request": 0, + "req_create_latency": { + "avgcount": 30590326, + "sum": 23887.274170412, + "avgtime": 0.000780876 + }, + "req_getattr_latency": { + "avgcount": 124767480, + "sum": 718160.497644305, + "avgtime": 0.005755991 + }, + "req_getfilelock_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_link_latency": { + "avgcount": 5636, + "sum": 2.371499732, + "avgtime": 0.000420777 + }, + "req_lookup_latency": { + "avgcount": 474590034, + "sum": 452548.849373476, + "avgtime": 0.000953557 + }, + "req_lookuphash_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_lookupino_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_lookupname_latency": { + "avgcount": 9794, + "sum": 54.118496591, + "avgtime": 0.005525678 + }, + "req_lookupparent_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_lookupsnap_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_lssnap_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_mkdir_latency": { + "avgcount": 13394317, + "sum": 13025.982105531, + "avgtime": 0.000972500 + }, + "req_mknod_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_mksnap_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_open_latency": { + "avgcount": 32849768, + "sum": 12862.382994977, + "avgtime": 0.000391551 + }, + "req_readdir_latency": { + "avgcount": 654394394, + "sum": 715669.609601541, + "avgtime": 0.001093636 + }, + "req_rename_latency": { + "avgcount": 6058807, + "sum": 2126.232719555, + "avgtime": 0.000350932 + }, + "req_renamesnap_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_rmdir_latency": { + "avgcount": 1901530, + "sum": 4064.121157858, + "avgtime": 0.002137290 + }, + "req_rmsnap_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_rmxattr_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_setattr_latency": { + "avgcount": 37051209, + "sum": 171198.037329531, + "avgtime": 0.004620578 + }, + "req_setdirlayout_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_setfilelock_latency": { + "avgcount": 765439143, + "sum": 262660.582883819, + "avgtime": 0.000343150 + }, + "req_setlayout_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "req_setxattr_latency": { + "avgcount": 41572, + "sum": 7.273371375, + "avgtime": 0.000174958 + }, + "req_symlink_latency": { + "avgcount": 329, + "sum": 0.117859965, + "avgtime": 0.000358236 + }, + "req_unlink_latency": { + "avgcount": 26363064, + "sum": 32119.149726314, + "avgtime": 0.001218339 + }, + "cap_revoke_eviction": 0 + }, + "mds_sessions": { + "session_count": 80, + "session_add": 90, + "session_remove": 10, + "sessions_open": 80, + "sessions_stale": 0, + "total_load": 112490, + "average_load": 1406, + "avg_session_uptime": 2221807 + }, + "objecter": { + "op_active": 0, + "op_laggy": 0, + "op_send": 955060080, + "op_send_bytes": 3178832110019, + "op_resend": 67, + "op_reply": 955060013, + "op": 955060013, + "op_r": 585982837, + "op_w": 369077176, + "op_rmw": 0, + "op_pg": 0, + "osdop_stat": 45924375, + "osdop_create": 31162274, + "osdop_read": 969513, + "osdop_write": 183211164, + "osdop_writefull": 1063233, + "osdop_writesame": 0, + "osdop_append": 0, + "osdop_zero": 2, + "osdop_truncate": 8, + "osdop_delete": 60594735, + "osdop_mapext": 0, + "osdop_sparse_read": 0, + "osdop_clonerange": 0, + "osdop_getxattr": 584941886, + "osdop_setxattr": 62324548, + "osdop_cmpxattr": 0, + "osdop_rmxattr": 0, + "osdop_resetxattrs": 0, + "osdop_tmap_up": 0, + "osdop_tmap_put": 0, + "osdop_tmap_get": 0, + "osdop_call": 0, + "osdop_watch": 0, + "osdop_notify": 0, + "osdop_src_cmpxattr": 0, + "osdop_pgls": 0, + "osdop_pgls_filter": 0, + "osdop_other": 32053182, + "linger_active": 0, + "linger_send": 0, + "linger_resend": 0, + "linger_ping": 0, + "poolop_active": 0, + "poolop_send": 0, + "poolop_resend": 0, + "poolstat_active": 0, + "poolstat_send": 0, + "poolstat_resend": 0, + "statfs_active": 0, + "statfs_send": 0, + "statfs_resend": 0, + "command_active": 0, + "command_send": 0, + "command_resend": 0, + "map_epoch": 66793, + "map_full": 0, + "map_inc": 1762, + "osd_sessions": 120, + "osd_session_open": 52554, + "osd_session_close": 52434, + "osd_laggy": 0, + "omap_wr": 106692727, + "omap_rd": 1170026044, + "omap_del": 5674762 + }, + "purge_queue": { + "pq_executing_ops": 0, + "pq_executing": 0, + "pq_executed": 29134399 + }, + "throttle-msgr_dispatch_throttler-mds": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 7620842095, + "get_sum": 2681291022887, + "get_or_fail_fail": 53, + "get_or_fail_success": 7620842095, + "take": 0, + "take_sum": 0, + "put": 7620842095, + "put_sum": 2681291022887, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_bytes": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 0, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 0, + "take": 955060013, + "take_sum": 3172776432475, + "put": 862340641, + "put_sum": 3172776432475, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_ops": { + "val": 0, + "max": 1024, + "get_started": 0, + "get": 0, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 0, + "take": 955060013, + "take_sum": 955060013, + "put": 955060013, + "put_sum": 955060013, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-write_buf_throttle": { + "val": 0, + "max": 3758096384, + "get_started": 0, + "get": 29134399, + "get_sum": 3160498139, + "get_or_fail_fail": 0, + "get_or_fail_success": 29134399, + "take": 0, + "take_sum": 0, + "put": 969905, + "put_sum": 3160498139, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-write_buf_throttle-0x561894f0b8e0": { + "val": 286270, + "max": 3758096384, + "get_started": 0, + "get": 1920368707, + "get_sum": 2807949805409, + "get_or_fail_fail": 0, + "get_or_fail_success": 1920368707, + "take": 0, + "take_sum": 0, + "put": 182241259, + "put_sum": 2807949519139, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + } +} +` + +var rgwPerfDump = ` +{ + "AsyncMessenger::Worker-0": { + "msgr_recv_messages": 10684185, + "msgr_send_messages": 13448962, + "msgr_recv_bytes": 2622531258, + "msgr_send_bytes": 4195038384, + "msgr_created_connections": 8029, + "msgr_active_connections": 3, + "msgr_running_total_time": 3249.441108544, + "msgr_running_send_time": 739.821446096, + "msgr_running_recv_time": 310.354319110, + "msgr_running_fast_dispatch_time": 1915.410317430 + }, + "AsyncMessenger::Worker-1": { + "msgr_recv_messages": 2137773, + "msgr_send_messages": 3850070, + "msgr_recv_bytes": 503824366, + "msgr_send_bytes": 1130107261, + "msgr_created_connections": 11030, + "msgr_active_connections": 1, + "msgr_running_total_time": 445.055291782, + "msgr_running_send_time": 227.817750758, + "msgr_running_recv_time": 78.974093226, + "msgr_running_fast_dispatch_time": 47.587740615 + }, + "AsyncMessenger::Worker-2": { + "msgr_recv_messages": 2809014, + "msgr_send_messages": 4126613, + "msgr_recv_bytes": 653093470, + "msgr_send_bytes": 1022041970, + "msgr_created_connections": 14810, + "msgr_active_connections": 5, + "msgr_running_total_time": 453.384703728, + "msgr_running_send_time": 208.580910390, + "msgr_running_recv_time": 80.075306670, + "msgr_running_fast_dispatch_time": 46.854112208 + }, + "cct": { + "total_workers": 0, + "unhealthy_workers": 0 + }, + "finisher-radosclient": { + "queue_len": 0, + "complete_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "finisher-radosclient-0x55994098e460": { + "queue_len": 0, + "complete_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "finisher-radosclient-0x5599409901c0": { + "queue_len": 0, + "complete_latency": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "mempool": { + "bloom_filter_bytes": 0, + "bloom_filter_items": 0, + "bluestore_alloc_bytes": 0, + "bluestore_alloc_items": 0, + "bluestore_cache_data_bytes": 0, + "bluestore_cache_data_items": 0, + "bluestore_cache_onode_bytes": 0, + "bluestore_cache_onode_items": 0, + "bluestore_cache_other_bytes": 0, + "bluestore_cache_other_items": 0, + "bluestore_fsck_bytes": 0, + "bluestore_fsck_items": 0, + "bluestore_txc_bytes": 0, + "bluestore_txc_items": 0, + "bluestore_writing_deferred_bytes": 0, + "bluestore_writing_deferred_items": 0, + "bluestore_writing_bytes": 0, + "bluestore_writing_items": 0, + "bluefs_bytes": 0, + "bluefs_items": 0, + "buffer_anon_bytes": 258469, + "buffer_anon_items": 201, + "buffer_meta_bytes": 0, + "buffer_meta_items": 0, + "osd_bytes": 0, + "osd_items": 0, + "osd_mapbl_bytes": 0, + "osd_mapbl_items": 0, + "osd_pglog_bytes": 0, + "osd_pglog_items": 0, + "osdmap_bytes": 74448, + "osdmap_items": 732, + "osdmap_mapping_bytes": 0, + "osdmap_mapping_items": 0, + "pgmap_bytes": 0, + "pgmap_items": 0, + "mds_co_bytes": 0, + "mds_co_items": 0, + "unittest_1_bytes": 0, + "unittest_1_items": 0, + "unittest_2_bytes": 0, + "unittest_2_items": 0 + }, + "objecter": { + "op_active": 0, + "op_laggy": 0, + "op_send": 9377910, + "op_send_bytes": 312, + "op_resend": 0, + "op_reply": 9377904, + "op": 9377910, + "op_r": 2755291, + "op_w": 6622619, + "op_rmw": 0, + "op_pg": 0, + "osdop_stat": 2755258, + "osdop_create": 8, + "osdop_read": 25, + "osdop_write": 0, + "osdop_writefull": 0, + "osdop_writesame": 0, + "osdop_append": 0, + "osdop_zero": 0, + "osdop_truncate": 0, + "osdop_delete": 0, + "osdop_mapext": 0, + "osdop_sparse_read": 0, + "osdop_clonerange": 0, + "osdop_getxattr": 0, + "osdop_setxattr": 0, + "osdop_cmpxattr": 0, + "osdop_rmxattr": 0, + "osdop_resetxattrs": 0, + "osdop_call": 0, + "osdop_watch": 6622611, + "osdop_notify": 0, + "osdop_src_cmpxattr": 0, + "osdop_pgls": 0, + "osdop_pgls_filter": 0, + "osdop_other": 2755266, + "linger_active": 8, + "linger_send": 35, + "linger_resend": 27, + "linger_ping": 6622576, + "poolop_active": 0, + "poolop_send": 0, + "poolop_resend": 0, + "poolstat_active": 0, + "poolstat_send": 0, + "poolstat_resend": 0, + "statfs_active": 0, + "statfs_send": 0, + "statfs_resend": 0, + "command_active": 0, + "command_send": 0, + "command_resend": 0, + "map_epoch": 1064, + "map_full": 0, + "map_inc": 106, + "osd_sessions": 8, + "osd_session_open": 11928, + "osd_session_close": 11920, + "osd_laggy": 5, + "omap_wr": 0, + "omap_rd": 0, + "omap_del": 0 + }, + "objecter-0x55994098e500": { + "op_active": 0, + "op_laggy": 0, + "op_send": 827839, + "op_send_bytes": 0, + "op_resend": 0, + "op_reply": 827839, + "op": 827839, + "op_r": 0, + "op_w": 827839, + "op_rmw": 0, + "op_pg": 0, + "osdop_stat": 0, + "osdop_create": 0, + "osdop_read": 0, + "osdop_write": 0, + "osdop_writefull": 0, + "osdop_writesame": 0, + "osdop_append": 0, + "osdop_zero": 0, + "osdop_truncate": 0, + "osdop_delete": 0, + "osdop_mapext": 0, + "osdop_sparse_read": 0, + "osdop_clonerange": 0, + "osdop_getxattr": 0, + "osdop_setxattr": 0, + "osdop_cmpxattr": 0, + "osdop_rmxattr": 0, + "osdop_resetxattrs": 0, + "osdop_call": 0, + "osdop_watch": 827839, + "osdop_notify": 0, + "osdop_src_cmpxattr": 0, + "osdop_pgls": 0, + "osdop_pgls_filter": 0, + "osdop_other": 0, + "linger_active": 1, + "linger_send": 3, + "linger_resend": 2, + "linger_ping": 827836, + "poolop_active": 0, + "poolop_send": 0, + "poolop_resend": 0, + "poolstat_active": 0, + "poolstat_send": 0, + "poolstat_resend": 0, + "statfs_active": 0, + "statfs_send": 0, + "statfs_resend": 0, + "command_active": 0, + "command_send": 0, + "command_resend": 0, + "map_epoch": 1064, + "map_full": 0, + "map_inc": 106, + "osd_sessions": 1, + "osd_session_open": 1, + "osd_session_close": 0, + "osd_laggy": 1, + "omap_wr": 0, + "omap_rd": 0, + "omap_del": 0 + }, + "objecter-0x55994098f720": { + "op_active": 0, + "op_laggy": 0, + "op_send": 5415951, + "op_send_bytes": 205291238, + "op_resend": 8, + "op_reply": 5415943, + "op": 5415943, + "op_r": 3612105, + "op_w": 1803838, + "op_rmw": 0, + "op_pg": 0, + "osdop_stat": 0, + "osdop_create": 0, + "osdop_read": 0, + "osdop_write": 0, + "osdop_writefull": 0, + "osdop_writesame": 0, + "osdop_append": 0, + "osdop_zero": 0, + "osdop_truncate": 0, + "osdop_delete": 0, + "osdop_mapext": 0, + "osdop_sparse_read": 0, + "osdop_clonerange": 0, + "osdop_getxattr": 0, + "osdop_setxattr": 0, + "osdop_cmpxattr": 0, + "osdop_rmxattr": 0, + "osdop_resetxattrs": 0, + "osdop_call": 5415567, + "osdop_watch": 0, + "osdop_notify": 0, + "osdop_src_cmpxattr": 0, + "osdop_pgls": 0, + "osdop_pgls_filter": 0, + "osdop_other": 376, + "linger_active": 0, + "linger_send": 0, + "linger_resend": 0, + "linger_ping": 0, + "poolop_active": 0, + "poolop_send": 0, + "poolop_resend": 0, + "poolstat_active": 0, + "poolstat_send": 0, + "poolstat_resend": 0, + "statfs_active": 0, + "statfs_send": 0, + "statfs_resend": 0, + "command_active": 0, + "command_send": 0, + "command_resend": 0, + "map_epoch": 1064, + "map_full": 0, + "map_inc": 106, + "osd_sessions": 8, + "osd_session_open": 8834, + "osd_session_close": 8826, + "osd_laggy": 0, + "omap_wr": 0, + "omap_rd": 0, + "omap_del": 0 + }, + "rgw": { + "req": 2755258, + "failed_req": 0, + "get": 0, + "get_b": 0, + "get_initial_lat": { + "avgcount": 0, + "sum": 0.002219876, + "avgtime": 0.000000000 + }, + "put": 0, + "put_b": 0, + "put_initial_lat": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + }, + "qlen": 0, + "qactive": 0, + "cache_hit": 0, + "cache_miss": 2755261, + "keystone_token_cache_hit": 0, + "keystone_token_cache_miss": 0, + "gc_retire_object": 0, + "pubsub_event_triggered": 0, + "pubsub_event_lost": 0, + "pubsub_store_ok": 0, + "pubsub_store_fail": 0, + "pubsub_events": 0, + "pubsub_push_ok": 0, + "pubsub_push_failed": 0, + "pubsub_push_pending": 0 + }, + "simple-throttler": { + "throttle": 0 + }, + "throttle-msgr_dispatch_throttler-radosclient": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 9379775, + "get_sum": 1545393284, + "get_or_fail_fail": 0, + "get_or_fail_success": 9379775, + "take": 0, + "take_sum": 0, + "put": 9379775, + "put_sum": 1545393284, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-msgr_dispatch_throttler-radosclient-0x55994098e320": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 829631, + "get_sum": 162850310, + "get_or_fail_fail": 0, + "get_or_fail_success": 829631, + "take": 0, + "take_sum": 0, + "put": 829631, + "put_sum": 162850310, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-msgr_dispatch_throttler-radosclient-0x55994098fa40": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 5421553, + "get_sum": 914508527, + "get_or_fail_fail": 0, + "get_or_fail_success": 5421553, + "take": 0, + "take_sum": 0, + "put": 5421553, + "put_sum": 914508527, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_bytes": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 2755292, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 2755292, + "take": 0, + "take_sum": 0, + "put": 0, + "put_sum": 0, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_bytes-0x55994098e780": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 0, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 0, + "take": 0, + "take_sum": 0, + "put": 0, + "put_sum": 0, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_bytes-0x55994098f7c0": { + "val": 0, + "max": 104857600, + "get_started": 0, + "get": 5415614, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 5415614, + "take": 0, + "take_sum": 0, + "put": 0, + "put_sum": 0, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_ops": { + "val": 0, + "max": 24576, + "get_started": 0, + "get": 2755292, + "get_sum": 2755292, + "get_or_fail_fail": 0, + "get_or_fail_success": 2755292, + "take": 0, + "take_sum": 0, + "put": 2755292, + "put_sum": 2755292, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_ops-0x55994098e640": { + "val": 0, + "max": 24576, + "get_started": 0, + "get": 0, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 0, + "take": 0, + "take_sum": 0, + "put": 0, + "put_sum": 0, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-objecter_ops-0x55994098f0e0": { + "val": 0, + "max": 24576, + "get_started": 0, + "get": 5415614, + "get_sum": 5415614, + "get_or_fail_fail": 0, + "get_or_fail_success": 5415614, + "take": 0, + "take_sum": 0, + "put": 5415614, + "put_sum": 5415614, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + }, + "throttle-rgw_async_rados_ops": { + "val": 0, + "max": 64, + "get_started": 0, + "get": 0, + "get_sum": 0, + "get_or_fail_fail": 0, + "get_or_fail_success": 0, + "take": 0, + "take_sum": 0, + "put": 0, + "put_sum": 0, + "wait": { + "avgcount": 0, + "sum": 0.000000000, + "avgtime": 0.000000000 + } + } +} +` + var clusterStatusDump = ` { "health": { @@ -823,7 +1867,9 @@ var clusterStatusDump = ` "bytes_total": 17335810048000, "read_bytes_sec": 0, "write_bytes_sec": 367217, - "op_per_sec": 98 + "op_per_sec": 98, + "read_op_per_sec": 322, + "write_op_per_sec": 1022 }, "mdsmap": { "epoch": 1, @@ -834,3 +1880,229 @@ var clusterStatusDump = ` } } ` + +var cephStatusResults = []expectedResult{ + { + metric: "ceph_osdmap", + fields: map[string]interface{}{ + "epoch": float64(21734), + "num_osds": float64(24), + "num_up_osds": float64(24), + "num_in_osds": float64(24), + "full": false, + "nearfull": false, + "num_remapped_pgs": float64(0), + }, + tags: map[string]string{}, + }, + { + metric: "ceph_pgmap", + fields: map[string]interface{}{ + "version": float64(52314277), + "num_pgs": float64(2560), + "data_bytes": float64(2700031960713), + "bytes_used": float64(7478347665408), + "bytes_avail": float64(9857462382592), + "bytes_total": float64(17335810048000), + "read_bytes_sec": float64(0), + "write_bytes_sec": float64(367217), + "op_per_sec": pf(98), + "read_op_per_sec": float64(322), + "write_op_per_sec": float64(1022), + }, + tags: map[string]string{}, + }, + { + metric: "ceph_pgmap_state", + fields: map[string]interface{}{ + "count": float64(2560), + }, + tags: map[string]string{ + "state": "active+clean", + }, + }, + { + metric: "ceph_pgmap_state", + fields: map[string]interface{}{ + "count": float64(10), + }, + tags: map[string]string{ + "state": "active+scrubbing", + }, + }, + { + metric: "ceph_pgmap_state", + fields: map[string]interface{}{ + "count": float64(5), + }, + tags: map[string]string{ + "state": "active+backfilling", + }, + }, +} + +var cephDFDump = ` +{ "stats": { "total_space": 472345880, + "total_used": 71058504, + "total_avail": 377286864, + "total_bytes": 472345880, + "total_used_bytes": 71058504, + "total_avail_bytes": 377286864}, + "pools": [ + { "name": "data", + "id": 0, + "stats": { "kb_used": 0, + "bytes_used": 0, + "objects": 0}}, + { "name": "metadata", + "id": 1, + "stats": { "kb_used": 25, + "bytes_used": 25052, + "objects": 53}}, + { "name": "rbd", + "id": 2, + "stats": { "kb_used": 0, + "bytes_used": 0, + "objects": 0}}, + { "name": "test", + "id": 3, + "stats": { "kb_used": 55476, + "bytes_used": 56806602, + "objects": 1}}]}` + +var cephDfResults = []expectedResult{ + { + metric: "ceph_usage", + fields: map[string]interface{}{ + "total_space": pf(472345880), + "total_used": pf(71058504), + "total_avail": pf(377286864), + "total_bytes": pf(472345880), + "total_used_bytes": pf(71058504), + "total_avail_bytes": pf(377286864), + }, + tags: map[string]string{}, + }, + { + metric: "ceph_pool_usage", + fields: map[string]interface{}{ + "kb_used": float64(0), + "bytes_used": float64(0), + "objects": float64(0), + "percent_used": (*float64)(nil), + "max_avail": (*float64)(nil), + }, + tags: map[string]string{ + "name": "data", + }, + }, + { + metric: "ceph_pool_usage", + fields: map[string]interface{}{ + "kb_used": float64(25), + "bytes_used": float64(25052), + "objects": float64(53), + "percent_used": (*float64)(nil), + "max_avail": (*float64)(nil), + }, + tags: map[string]string{ + "name": "metadata", + }, + }, + { + metric: "ceph_pool_usage", + fields: map[string]interface{}{ + "kb_used": float64(0), + "bytes_used": float64(0), + "objects": float64(0), + "percent_used": (*float64)(nil), + "max_avail": (*float64)(nil), + }, + tags: map[string]string{ + "name": "rbd", + }, + }, + { + metric: "ceph_pool_usage", + fields: map[string]interface{}{ + "kb_used": float64(55476), + "bytes_used": float64(56806602), + "objects": float64(1), + "percent_used": (*float64)(nil), + "max_avail": (*float64)(nil), + }, + tags: map[string]string{ + "name": "test", + }, + }, +} + +var cephODSPoolStatsDump = ` +[ + { "pool_name": "data", + "pool_id": 0, + "recovery": {}, + "recovery_rate": {}, + "client_io_rate": {}}, + { "pool_name": "metadata", + "pool_id": 1, + "recovery": {}, + "recovery_rate": {}, + "client_io_rate": {}}, + { "pool_name": "rbd", + "pool_id": 2, + "recovery": {}, + "recovery_rate": {}, + "client_io_rate": {}}, + { "pool_name": "pbench", + "pool_id": 3, + "recovery": { "degraded_objects": 18446744073709551562, + "degraded_total": 412, + "degrated_ratio": "-13.107"}, + "recovery_rate": { "recovering_objects_per_sec": 279, + "recovering_bytes_per_sec": 176401059, + "recovering_keys_per_sec": 0}, + "client_io_rate": { "read_bytes_sec": 10566067, + "write_bytes_sec": 15165220376, + "op_per_sec": 9828, + "read_op_per_sec": 182, + "write_op_per_sec": 473}}]` + +var cephOSDPoolStatsResults = []expectedResult{ + { + metric: "ceph_pool_stats", + fields: map[string]interface{}{ + "read_bytes_sec": float64(0), + "write_bytes_sec": float64(0), + "op_per_sec": (*float64)(nil), + "read_op_per_sec": float64(0), + "write_op_per_sec": float64(0), + "recovering_objects_per_sec": float64(0), + "recovering_bytes_per_sec": float64(0), + "recovering_keys_per_sec": float64(0), + }, + tags: map[string]string{ + "name": "data", + }, + }, + { + metric: "ceph_pool_stats", + fields: map[string]interface{}{ + "read_bytes_sec": float64(10566067), + "write_bytes_sec": float64(15165220376), + "op_per_sec": pf(9828), + "read_op_per_sec": float64(182), + "write_op_per_sec": float64(473), + "recovering_objects_per_sec": float64(279), + "recovering_bytes_per_sec": float64(176401059), + "recovering_keys_per_sec": float64(0), + }, + tags: map[string]string{ + "name": "pbench", + }, + }, +} + +func pf(i float64) *float64 { + return &i +} diff --git a/plugins/inputs/cgroup/cgroup_linux.go b/plugins/inputs/cgroup/cgroup_linux.go index 0765416af..bb38525b7 100644 --- a/plugins/inputs/cgroup/cgroup_linux.go +++ b/plugins/inputs/cgroup/cgroup_linux.go @@ -81,6 +81,7 @@ func isDir(path string) (bool, error) { } func (g *CGroup) generateDirs(list chan<- pathInfo) { + defer close(list) for _, dir := range g.Paths { // getting all dirs that match the pattern 'dir' items, err := filepath.Glob(dir) @@ -101,10 +102,10 @@ func (g *CGroup) generateDirs(list chan<- pathInfo) { } } } - close(list) } func (g *CGroup) generateFiles(dir string, list chan<- pathInfo) { + defer close(list) for _, file := range g.Files { // getting all file paths that match the pattern 'dir + file' // path.Base make sure that file variable does not contains part of path @@ -126,7 +127,6 @@ func (g *CGroup) generateFiles(dir string, list chan<- pathInfo) { } } } - close(list) } // ====================================================================== @@ -173,7 +173,7 @@ const valuePattern = "[\\d-]+" var fileFormats = [...]fileFormat{ // VAL\n - fileFormat{ + { name: "Single value", pattern: "^" + valuePattern + "\n$", parser: func(measurement string, fields map[string]interface{}, b []byte) { @@ -185,7 +185,7 @@ var fileFormats = [...]fileFormat{ // VAL0\n // VAL1\n // ... - fileFormat{ + { name: "New line separated values", pattern: "^(" + valuePattern + "\n){2,}$", parser: func(measurement string, fields map[string]interface{}, b []byte) { @@ -197,7 +197,7 @@ var fileFormats = [...]fileFormat{ }, }, // VAL0 VAL1 ...\n - fileFormat{ + { name: "Space separated values", pattern: "^(" + valuePattern + " )+\n$", parser: func(measurement string, fields map[string]interface{}, b []byte) { @@ -211,7 +211,7 @@ var fileFormats = [...]fileFormat{ // KEY0 VAL0\n // KEY1 VAL1\n // ... - fileFormat{ + { name: "New line separated key-space-value's", pattern: "^(" + keyPattern + " " + valuePattern + "\n)+$", parser: func(measurement string, fields map[string]interface{}, b []byte) { diff --git a/plugins/inputs/chrony/chrony.go b/plugins/inputs/chrony/chrony.go index 6173357cf..3fe18e89c 100644 --- a/plugins/inputs/chrony/chrony.go +++ b/plugins/inputs/chrony/chrony.go @@ -33,11 +33,16 @@ func (*Chrony) SampleConfig() string { ` } -func (c *Chrony) Gather(acc telegraf.Accumulator) error { - if len(c.path) == 0 { +func (c *Chrony) Init() error { + var err error + c.path, err = exec.LookPath("chronyc") + if err != nil { return errors.New("chronyc not found: verify that chrony is installed and that chronyc is in your PATH") } + return nil +} +func (c *Chrony) Gather(acc telegraf.Accumulator) error { flags := []string{} if !c.DNSLookup { flags = append(flags, "-n") @@ -120,12 +125,7 @@ func processChronycOutput(out string) (map[string]interface{}, map[string]string } func init() { - c := Chrony{} - path, _ := exec.LookPath("chronyc") - if len(path) > 0 { - c.path = path - } inputs.Add("chrony", func() telegraf.Input { - return &c + return &Chrony{} }) } diff --git a/plugins/inputs/cisco_telemetry_gnmi/README.md b/plugins/inputs/cisco_telemetry_gnmi/README.md new file mode 100644 index 000000000..d12817da1 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_gnmi/README.md @@ -0,0 +1,72 @@ +# Cisco GNMI Telemetry + +Cisco GNMI Telemetry is an input plugin that consumes telemetry data based on the [GNMI](https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md) Subscribe method. TLS is supported for authentication and encryption. + +It has been optimized to support GNMI telemetry as produced by Cisco IOS XR (64-bit) version 6.5.1, Cisco NX-OS 9.3 and Cisco IOS XE 16.12 and later. + + +### Configuration + +```toml +[[inputs.cisco_telemetry_gnmi]] + ## Address and port of the GNMI GRPC server + addresses = ["10.49.234.114:57777"] + + ## define credentials + username = "cisco" + password = "cisco" + + ## GNMI encoding requested (one of: "proto", "json", "json_ietf") + # encoding = "proto" + + ## redial in case of failures after + redial = "10s" + + ## enable client-side TLS and define CA to authenticate the device + # enable_tls = true + # tls_ca = "/etc/telegraf/ca.pem" + # insecure_skip_verify = true + + ## define client-side TLS certificate & key to authenticate to the device + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## GNMI subscription prefix (optional, can usually be left empty) + ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths + # origin = "" + # prefix = "" + # target = "" + + ## Define additional aliases to map telemetry encoding paths to simple measurement names + # [inputs.cisco_telemetry_gnmi.aliases] + # ifcounters = "openconfig:/interfaces/interface/state/counters" + + [[inputs.cisco_telemetry_gnmi.subscription]] + ## Name of the measurement that will be emitted + name = "ifcounters" + + ## Origin and path of the subscription + ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths + ## + ## origin usually refers to a (YANG) data model implemented by the device + ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) + ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr + origin = "openconfig-interfaces" + path = "/interfaces/interface/state/counters" + + # Subscription mode (one of: "target_defined", "sample", "on_change") and interval + subscription_mode = "sample" + sample_interval = "10s" + + ## Suppress redundant transmissions when measured values are unchanged + # suppress_redundant = false + + ## If suppression is enabled, send updates at least every X seconds anyway + # heartbeat_interval = "60s" +``` + +### Example Output +``` +ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=MgmtEth0/RP0/CPU0/0,source=10.49.234.115 in-multicast-pkts=0i,out-multicast-pkts=0i,out-errors=0i,out-discards=0i,in-broadcast-pkts=0i,out-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,in-errors=0i,out-unicast-pkts=0i,in-octets=0i,out-octets=0i,last-clear="2019-05-22T16:53:21Z",in-unicast-pkts=0i 1559145777425000000 +ifcounters,path=openconfig-interfaces:/interfaces/interface/state/counters,host=linux,name=GigabitEthernet0/0/0/0,source=10.49.234.115 out-multicast-pkts=0i,out-broadcast-pkts=0i,in-errors=0i,out-errors=0i,in-discards=0i,out-octets=0i,in-unknown-protos=0i,in-unicast-pkts=0i,in-octets=0i,in-multicast-pkts=0i,in-broadcast-pkts=0i,last-clear="2019-05-22T16:54:50Z",out-unicast-pkts=0i,out-discards=0i 1559145777425000000 +``` diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go new file mode 100644 index 000000000..894b7feb0 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi.go @@ -0,0 +1,556 @@ +package cisco_telemetry_gnmi + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "math" + "net" + "path" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + internaltls "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/inputs" + jsonparser "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/openconfig/gnmi/proto/gnmi" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" +) + +// CiscoTelemetryGNMI plugin instance +type CiscoTelemetryGNMI struct { + Addresses []string `toml:"addresses"` + Subscriptions []Subscription `toml:"subscription"` + Aliases map[string]string `toml:"aliases"` + + // Optional subscription configuration + Encoding string + Origin string + Prefix string + Target string + UpdatesOnly bool `toml:"updates_only"` + + // Cisco IOS XR credentials + Username string + Password string + + // Redial + Redial internal.Duration + + // GRPC TLS settings + EnableTLS bool `toml:"enable_tls"` + internaltls.ClientConfig + + // Internal state + aliases map[string]string + acc telegraf.Accumulator + cancel context.CancelFunc + wg sync.WaitGroup + + Log telegraf.Logger +} + +// Subscription for a GNMI client +type Subscription struct { + Name string + Origin string + Path string + + // Subscription mode and interval + SubscriptionMode string `toml:"subscription_mode"` + SampleInterval internal.Duration `toml:"sample_interval"` + + // Duplicate suppression + SuppressRedundant bool `toml:"suppress_redundant"` + HeartbeatInterval internal.Duration `toml:"heartbeat_interval"` +} + +// Start the http listener service +func (c *CiscoTelemetryGNMI) Start(acc telegraf.Accumulator) error { + var err error + var ctx context.Context + var tlscfg *tls.Config + var request *gnmi.SubscribeRequest + c.acc = acc + ctx, c.cancel = context.WithCancel(context.Background()) + + // Validate configuration + if request, err = c.newSubscribeRequest(); err != nil { + return err + } else if c.Redial.Duration.Nanoseconds() <= 0 { + return fmt.Errorf("redial duration must be positive") + } + + // Parse TLS config + if c.EnableTLS { + if tlscfg, err = c.ClientConfig.TLSConfig(); err != nil { + return err + } + } + + if len(c.Username) > 0 { + ctx = metadata.AppendToOutgoingContext(ctx, "username", c.Username, "password", c.Password) + } + + // Invert explicit alias list and prefill subscription names + c.aliases = make(map[string]string, len(c.Subscriptions)+len(c.Aliases)) + for _, subscription := range c.Subscriptions { + var gnmiLongPath, gnmiShortPath *gnmi.Path + + // Build the subscription path without keys + if gnmiLongPath, err = parsePath(subscription.Origin, subscription.Path, ""); err != nil { + return err + } + if gnmiShortPath, err = parsePath("", subscription.Path, ""); err != nil { + return err + } + + longPath, _ := c.handlePath(gnmiLongPath, nil, "") + shortPath, _ := c.handlePath(gnmiShortPath, nil, "") + name := subscription.Name + + // If the user didn't provide a measurement name, use last path element + if len(name) == 0 { + name = path.Base(shortPath) + } + if len(name) > 0 { + c.aliases[longPath] = name + c.aliases[shortPath] = name + } + } + for alias, path := range c.Aliases { + c.aliases[path] = alias + } + + // Create a goroutine for each device, dial and subscribe + c.wg.Add(len(c.Addresses)) + for _, addr := range c.Addresses { + go func(address string) { + defer c.wg.Done() + for ctx.Err() == nil { + if err := c.subscribeGNMI(ctx, address, tlscfg, request); err != nil && ctx.Err() == nil { + acc.AddError(err) + } + + select { + case <-ctx.Done(): + case <-time.After(c.Redial.Duration): + } + } + }(addr) + } + return nil +} + +// Create a new GNMI SubscribeRequest +func (c *CiscoTelemetryGNMI) newSubscribeRequest() (*gnmi.SubscribeRequest, error) { + // Create subscription objects + subscriptions := make([]*gnmi.Subscription, len(c.Subscriptions)) + for i, subscription := range c.Subscriptions { + gnmiPath, err := parsePath(subscription.Origin, subscription.Path, "") + if err != nil { + return nil, err + } + mode, ok := gnmi.SubscriptionMode_value[strings.ToUpper(subscription.SubscriptionMode)] + if !ok { + return nil, fmt.Errorf("invalid subscription mode %s", subscription.SubscriptionMode) + } + subscriptions[i] = &gnmi.Subscription{ + Path: gnmiPath, + Mode: gnmi.SubscriptionMode(mode), + SampleInterval: uint64(subscription.SampleInterval.Duration.Nanoseconds()), + SuppressRedundant: subscription.SuppressRedundant, + HeartbeatInterval: uint64(subscription.HeartbeatInterval.Duration.Nanoseconds()), + } + } + + // Construct subscribe request + gnmiPath, err := parsePath(c.Origin, c.Prefix, c.Target) + if err != nil { + return nil, err + } + + if c.Encoding != "proto" && c.Encoding != "json" && c.Encoding != "json_ietf" { + return nil, fmt.Errorf("unsupported encoding %s", c.Encoding) + } + + return &gnmi.SubscribeRequest{ + Request: &gnmi.SubscribeRequest_Subscribe{ + Subscribe: &gnmi.SubscriptionList{ + Prefix: gnmiPath, + Mode: gnmi.SubscriptionList_STREAM, + Encoding: gnmi.Encoding(gnmi.Encoding_value[strings.ToUpper(c.Encoding)]), + Subscription: subscriptions, + UpdatesOnly: c.UpdatesOnly, + }, + }, + }, nil +} + +// SubscribeGNMI and extract telemetry data +func (c *CiscoTelemetryGNMI) subscribeGNMI(ctx context.Context, address string, tlscfg *tls.Config, request *gnmi.SubscribeRequest) error { + var opt grpc.DialOption + if tlscfg != nil { + opt = grpc.WithTransportCredentials(credentials.NewTLS(tlscfg)) + } else { + opt = grpc.WithInsecure() + } + + client, err := grpc.DialContext(ctx, address, opt) + if err != nil { + return fmt.Errorf("failed to dial: %v", err) + } + defer client.Close() + + subscribeClient, err := gnmi.NewGNMIClient(client).Subscribe(ctx) + if err != nil { + return fmt.Errorf("failed to setup subscription: %v", err) + } + + if err = subscribeClient.Send(request); err != nil { + return fmt.Errorf("failed to send subscription request: %v", err) + } + + c.Log.Debugf("Connection to GNMI device %s established", address) + defer c.Log.Debugf("Connection to GNMI device %s closed", address) + for ctx.Err() == nil { + var reply *gnmi.SubscribeResponse + if reply, err = subscribeClient.Recv(); err != nil { + if err != io.EOF && ctx.Err() == nil { + return fmt.Errorf("aborted GNMI subscription: %v", err) + } + break + } + + c.handleSubscribeResponse(address, reply) + } + return nil +} + +// HandleSubscribeResponse message from GNMI and parse contained telemetry data +func (c *CiscoTelemetryGNMI) handleSubscribeResponse(address string, reply *gnmi.SubscribeResponse) { + // Check if response is a GNMI Update and if we have a prefix to derive the measurement name + response, ok := reply.Response.(*gnmi.SubscribeResponse_Update) + if !ok { + return + } + + var prefix, prefixAliasPath string + grouper := metric.NewSeriesGrouper() + timestamp := time.Unix(0, response.Update.Timestamp) + prefixTags := make(map[string]string) + + if response.Update.Prefix != nil { + prefix, prefixAliasPath = c.handlePath(response.Update.Prefix, prefixTags, "") + } + prefixTags["source"], _, _ = net.SplitHostPort(address) + prefixTags["path"] = prefix + + // Parse individual Update message and create measurements + var name, lastAliasPath string + for _, update := range response.Update.Update { + // Prepare tags from prefix + tags := make(map[string]string, len(prefixTags)) + for key, val := range prefixTags { + tags[key] = val + } + aliasPath, fields := c.handleTelemetryField(update, tags, prefix) + + // Inherent valid alias from prefix parsing + if len(prefixAliasPath) > 0 && len(aliasPath) == 0 { + aliasPath = prefixAliasPath + } + + // Lookup alias if alias-path has changed + if aliasPath != lastAliasPath { + name = prefix + if alias, ok := c.aliases[aliasPath]; ok { + name = alias + } else { + c.Log.Debugf("No measurement alias for GNMI path: %s", name) + } + } + + // Group metrics + for k, v := range fields { + key := k + if len(aliasPath) < len(key) { + // This may not be an exact prefix, due to naming style + // conversion on the key. + key = key[len(aliasPath)+1:] + } else { + // Otherwise use the last path element as the field key. + key = path.Base(key) + + // If there are no elements skip the item; this would be an + // invalid message. + key = strings.TrimLeft(key, "/.") + if key == "" { + c.Log.Errorf("invalid empty path: %q", k) + continue + } + } + + grouper.Add(name, tags, timestamp, key, v) + } + + lastAliasPath = aliasPath + } + + // Add grouped measurements + for _, metric := range grouper.Metrics() { + c.acc.AddMetric(metric) + } +} + +// HandleTelemetryField and add it to a measurement +func (c *CiscoTelemetryGNMI) handleTelemetryField(update *gnmi.Update, tags map[string]string, prefix string) (string, map[string]interface{}) { + path, aliasPath := c.handlePath(update.Path, tags, prefix) + + var value interface{} + var jsondata []byte + + // Make sure a value is actually set + if update.Val == nil || update.Val.Value == nil { + c.Log.Infof("Discarded empty or legacy type value with path: %q", path) + return aliasPath, nil + } + + switch val := update.Val.Value.(type) { + case *gnmi.TypedValue_AsciiVal: + value = val.AsciiVal + case *gnmi.TypedValue_BoolVal: + value = val.BoolVal + case *gnmi.TypedValue_BytesVal: + value = val.BytesVal + case *gnmi.TypedValue_DecimalVal: + value = float64(val.DecimalVal.Digits) / math.Pow(10, float64(val.DecimalVal.Precision)) + case *gnmi.TypedValue_FloatVal: + value = val.FloatVal + case *gnmi.TypedValue_IntVal: + value = val.IntVal + case *gnmi.TypedValue_StringVal: + value = val.StringVal + case *gnmi.TypedValue_UintVal: + value = val.UintVal + case *gnmi.TypedValue_JsonIetfVal: + jsondata = val.JsonIetfVal + case *gnmi.TypedValue_JsonVal: + jsondata = val.JsonVal + } + + name := strings.Replace(path, "-", "_", -1) + fields := make(map[string]interface{}) + if value != nil { + fields[name] = value + } else if jsondata != nil { + if err := json.Unmarshal(jsondata, &value); err != nil { + c.acc.AddError(fmt.Errorf("failed to parse JSON value: %v", err)) + } else { + flattener := jsonparser.JSONFlattener{Fields: fields} + flattener.FullFlattenJSON(name, value, true, true) + } + } + return aliasPath, fields +} + +// Parse path to path-buffer and tag-field +func (c *CiscoTelemetryGNMI) handlePath(path *gnmi.Path, tags map[string]string, prefix string) (string, string) { + var aliasPath string + builder := bytes.NewBufferString(prefix) + + // Prefix with origin + if len(path.Origin) > 0 { + builder.WriteString(path.Origin) + builder.WriteRune(':') + } + + // Parse generic keys from prefix + for _, elem := range path.Elem { + if len(elem.Name) > 0 { + builder.WriteRune('/') + builder.WriteString(elem.Name) + } + name := builder.String() + + if _, exists := c.aliases[name]; exists { + aliasPath = name + } + + if tags != nil { + for key, val := range elem.Key { + key = strings.Replace(key, "-", "_", -1) + + // Use short-form of key if possible + if _, exists := tags[key]; exists { + tags[name+"/"+key] = val + } else { + tags[key] = val + } + + } + } + } + + return builder.String(), aliasPath +} + +//ParsePath from XPath-like string to GNMI path structure +func parsePath(origin string, path string, target string) (*gnmi.Path, error) { + var err error + gnmiPath := gnmi.Path{Origin: origin, Target: target} + + if len(path) > 0 && path[0] != '/' { + return nil, fmt.Errorf("path does not start with a '/': %s", path) + } + + elem := &gnmi.PathElem{} + start, name, value, end := 0, -1, -1, -1 + + path = path + "/" + + for i := 0; i < len(path); i++ { + if path[i] == '[' { + if name >= 0 { + break + } + if end < 0 { + end = i + elem.Key = make(map[string]string) + } + name = i + 1 + } else if path[i] == '=' { + if name <= 0 || value >= 0 { + break + } + value = i + 1 + } else if path[i] == ']' { + if name <= 0 || value <= name { + break + } + elem.Key[path[name:value-1]] = strings.Trim(path[value:i], "'\"") + name, value = -1, -1 + } else if path[i] == '/' { + if name < 0 { + if end < 0 { + end = i + } + + if end > start { + elem.Name = path[start:end] + gnmiPath.Elem = append(gnmiPath.Elem, elem) + gnmiPath.Element = append(gnmiPath.Element, path[start:i]) + } + + start, name, value, end = i+1, -1, -1, -1 + elem = &gnmi.PathElem{} + } + } + } + + if name >= 0 || value >= 0 { + err = fmt.Errorf("Invalid GNMI path: %s", path) + } + + if err != nil { + return nil, err + } + + return &gnmiPath, nil +} + +// Stop listener and cleanup +func (c *CiscoTelemetryGNMI) Stop() { + c.cancel() + c.wg.Wait() +} + +const sampleConfig = ` + ## Address and port of the GNMI GRPC server + addresses = ["10.49.234.114:57777"] + + ## define credentials + username = "cisco" + password = "cisco" + + ## GNMI encoding requested (one of: "proto", "json", "json_ietf") + # encoding = "proto" + + ## redial in case of failures after + redial = "10s" + + ## enable client-side TLS and define CA to authenticate the device + # enable_tls = true + # tls_ca = "/etc/telegraf/ca.pem" + # insecure_skip_verify = true + + ## define client-side TLS certificate & key to authenticate to the device + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## GNMI subscription prefix (optional, can usually be left empty) + ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths + # origin = "" + # prefix = "" + # target = "" + + ## Define additional aliases to map telemetry encoding paths to simple measurement names + #[inputs.cisco_telemetry_gnmi.aliases] + # ifcounters = "openconfig:/interfaces/interface/state/counters" + + [[inputs.cisco_telemetry_gnmi.subscription]] + ## Name of the measurement that will be emitted + name = "ifcounters" + + ## Origin and path of the subscription + ## See: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths + ## + ## origin usually refers to a (YANG) data model implemented by the device + ## and path to a specific substructure inside it that should be subscribed to (similar to an XPath) + ## YANG models can be found e.g. here: https://github.com/YangModels/yang/tree/master/vendor/cisco/xr + origin = "openconfig-interfaces" + path = "/interfaces/interface/state/counters" + + # Subscription mode (one of: "target_defined", "sample", "on_change") and interval + subscription_mode = "sample" + sample_interval = "10s" + + ## Suppress redundant transmissions when measured values are unchanged + # suppress_redundant = false + + ## If suppression is enabled, send updates at least every X seconds anyway + # heartbeat_interval = "60s" +` + +// SampleConfig of plugin +func (c *CiscoTelemetryGNMI) SampleConfig() string { + return sampleConfig +} + +// Description of plugin +func (c *CiscoTelemetryGNMI) Description() string { + return "Cisco GNMI telemetry input plugin based on GNMI telemetry data produced in IOS XR" +} + +// Gather plugin measurements (unused) +func (c *CiscoTelemetryGNMI) Gather(_ telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("cisco_telemetry_gnmi", func() telegraf.Input { + return &CiscoTelemetryGNMI{ + Encoding: "proto", + Redial: internal.Duration{Duration: 10 * time.Second}, + } + }) +} diff --git a/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go new file mode 100644 index 000000000..1b12886b9 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_gnmi/cisco_telemetry_gnmi_test.go @@ -0,0 +1,473 @@ +package cisco_telemetry_gnmi + +import ( + "context" + "errors" + "fmt" + "net" + "sync" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/openconfig/gnmi/proto/gnmi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +func TestParsePath(t *testing.T) { + path := "/foo/bar/bla[shoo=woo][shoop=/woop/]/z" + parsed, err := parsePath("theorigin", path, "thetarget") + + assert.Nil(t, err) + assert.Equal(t, parsed.Origin, "theorigin") + assert.Equal(t, parsed.Target, "thetarget") + assert.Equal(t, parsed.Element, []string{"foo", "bar", "bla[shoo=woo][shoop=/woop/]", "z"}) + assert.Equal(t, parsed.Elem, []*gnmi.PathElem{{Name: "foo"}, {Name: "bar"}, + {Name: "bla", Key: map[string]string{"shoo": "woo", "shoop": "/woop/"}}, {Name: "z"}}) + + parsed, err = parsePath("", "", "") + assert.Nil(t, err) + assert.Equal(t, *parsed, gnmi.Path{}) + + parsed, err = parsePath("", "/foo[[", "") + assert.Nil(t, parsed) + assert.Equal(t, errors.New("Invalid GNMI path: /foo[[/"), err) +} + +type MockServer struct { + SubscribeF func(gnmi.GNMI_SubscribeServer) error + GRPCServer *grpc.Server +} + +func (s *MockServer) Capabilities(context.Context, *gnmi.CapabilityRequest) (*gnmi.CapabilityResponse, error) { + return nil, nil +} + +func (s *MockServer) Get(context.Context, *gnmi.GetRequest) (*gnmi.GetResponse, error) { + return nil, nil +} + +func (s *MockServer) Set(context.Context, *gnmi.SetRequest) (*gnmi.SetResponse, error) { + return nil, nil +} + +func (s *MockServer) Subscribe(server gnmi.GNMI_SubscribeServer) error { + return s.SubscribeF(server) +} + +func TestWaitError(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + grpcServer := grpc.NewServer() + gnmiServer := &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + return fmt.Errorf("testerror") + }, + GRPCServer: grpcServer, + } + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + + plugin := &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Addresses: []string{listener.Addr().String()}, + Encoding: "proto", + Redial: internal.Duration{Duration: 1 * time.Second}, + } + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err := grpcServer.Serve(listener) + require.NoError(t, err) + }() + + acc.WaitError(1) + plugin.Stop() + grpcServer.Stop() + wg.Wait() + + require.Contains(t, acc.Errors, + errors.New("aborted GNMI subscription: rpc error: code = Unknown desc = testerror")) +} + +func TestUsernamePassword(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + grpcServer := grpc.NewServer() + gnmiServer := &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + metadata, ok := metadata.FromIncomingContext(server.Context()) + if !ok { + return errors.New("failed to get metadata") + } + + username := metadata.Get("username") + if len(username) != 1 || username[0] != "theusername" { + return errors.New("wrong username") + } + + password := metadata.Get("password") + if len(password) != 1 || password[0] != "thepassword" { + return errors.New("wrong password") + } + + return errors.New("success") + }, + GRPCServer: grpcServer, + } + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + + plugin := &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Addresses: []string{listener.Addr().String()}, + Username: "theusername", + Password: "thepassword", + Encoding: "proto", + Redial: internal.Duration{Duration: 1 * time.Second}, + } + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err := grpcServer.Serve(listener) + require.NoError(t, err) + }() + + acc.WaitError(1) + plugin.Stop() + grpcServer.Stop() + wg.Wait() + + require.Contains(t, acc.Errors, + errors.New("aborted GNMI subscription: rpc error: code = Unknown desc = success")) +} + +func mockGNMINotification() *gnmi.Notification { + return &gnmi.Notification{ + Timestamp: 1543236572000000000, + Prefix: &gnmi.Path{ + Origin: "type", + Elem: []*gnmi.PathElem{ + { + Name: "model", + Key: map[string]string{"foo": "bar"}, + }, + }, + Target: "subscription", + }, + Update: []*gnmi.Update{ + { + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ + {Name: "some"}, + { + Name: "path", + Key: map[string]string{"name": "str", "uint64": "1234"}}, + }, + }, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_IntVal{IntVal: 5678}}, + }, + { + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ + {Name: "other"}, + {Name: "path"}, + }, + }, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "foobar"}}, + }, + { + Path: &gnmi.Path{ + Elem: []*gnmi.PathElem{ + {Name: "other"}, + {Name: "this"}, + }, + }, + Val: &gnmi.TypedValue{Value: &gnmi.TypedValue_StringVal{StringVal: "that"}}, + }, + }, + } +} + +func TestNotification(t *testing.T) { + tests := []struct { + name string + plugin *CiscoTelemetryGNMI + server *MockServer + expected []telegraf.Metric + }{ + { + name: "multiple metrics", + plugin: &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Encoding: "proto", + Redial: internal.Duration{Duration: 1 * time.Second}, + Subscriptions: []Subscription{ + { + Name: "alias", + Origin: "type", + Path: "/model", + SubscriptionMode: "sample", + }, + }, + }, + server: &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + notification := mockGNMINotification() + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_SyncResponse{SyncResponse: true}}) + notification.Prefix.Elem[0].Key["foo"] = "bar2" + notification.Update[0].Path.Elem[1].Key["name"] = "str2" + notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_JsonVal{JsonVal: []byte{'"', '1', '2', '3', '"'}}} + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + return nil + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "alias", + map[string]string{ + "path": "type:/model", + "source": "127.0.0.1", + "foo": "bar", + "name": "str", + "uint64": "1234", + }, + map[string]interface{}{ + "some/path": int64(5678), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "alias", + map[string]string{ + "path": "type:/model", + "source": "127.0.0.1", + "foo": "bar", + }, + map[string]interface{}{ + "other/path": "foobar", + "other/this": "that", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "alias", + map[string]string{ + "path": "type:/model", + "foo": "bar2", + "source": "127.0.0.1", + "name": "str2", + "uint64": "1234", + }, + map[string]interface{}{ + "some/path": "123", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "alias", + map[string]string{ + "path": "type:/model", + "source": "127.0.0.1", + "foo": "bar2", + }, + map[string]interface{}{ + "other/path": "foobar", + "other/this": "that", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "full path field key", + plugin: &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Encoding: "proto", + Redial: internal.Duration{Duration: 1 * time.Second}, + Subscriptions: []Subscription{ + { + Name: "PHY_COUNTERS", + Origin: "type", + Path: "/state/port[port-id=*]/ethernet/oper-speed", + SubscriptionMode: "sample", + }, + }, + }, + server: &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + response := &gnmi.SubscribeResponse{ + Response: &gnmi.SubscribeResponse_Update{ + Update: &gnmi.Notification{ + Timestamp: 1543236572000000000, + Prefix: &gnmi.Path{ + Origin: "type", + Elem: []*gnmi.PathElem{ + { + Name: "state", + }, + { + Name: "port", + Key: map[string]string{"port-id": "1"}, + }, + { + Name: "ethernet", + }, + { + Name: "oper-speed", + }, + }, + Target: "subscription", + }, + Update: []*gnmi.Update{ + { + Path: &gnmi.Path{}, + Val: &gnmi.TypedValue{ + Value: &gnmi.TypedValue_IntVal{IntVal: 42}, + }, + }, + }, + }, + }, + } + server.Send(response) + return nil + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "PHY_COUNTERS", + map[string]string{ + "path": "type:/state/port/ethernet/oper-speed", + "source": "127.0.0.1", + "port_id": "1", + }, + map[string]interface{}{ + "oper_speed": 42, + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + tt.plugin.Addresses = []string{listener.Addr().String()} + + grpcServer := grpc.NewServer() + tt.server.GRPCServer = grpcServer + gnmi.RegisterGNMIServer(grpcServer, tt.server) + + var acc testutil.Accumulator + err = tt.plugin.Start(&acc) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err := grpcServer.Serve(listener) + require.NoError(t, err) + }() + + acc.Wait(len(tt.expected)) + tt.plugin.Stop() + grpcServer.Stop() + wg.Wait() + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) + }) + } +} + +func TestRedial(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + plugin := &CiscoTelemetryGNMI{ + Log: testutil.Logger{}, + Addresses: []string{listener.Addr().String()}, + Encoding: "proto", + Redial: internal.Duration{Duration: 10 * time.Millisecond}, + } + + grpcServer := grpc.NewServer() + gnmiServer := &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + notification := mockGNMINotification() + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + return nil + }, + GRPCServer: grpcServer, + } + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err := grpcServer.Serve(listener) + require.NoError(t, err) + }() + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + acc.Wait(2) + grpcServer.Stop() + wg.Wait() + + // Restart GNMI server at the same address + listener, err = net.Listen("tcp", listener.Addr().String()) + require.NoError(t, err) + + grpcServer = grpc.NewServer() + gnmiServer = &MockServer{ + SubscribeF: func(server gnmi.GNMI_SubscribeServer) error { + notification := mockGNMINotification() + notification.Prefix.Elem[0].Key["foo"] = "bar2" + notification.Update[0].Path.Elem[1].Key["name"] = "str2" + notification.Update[0].Val = &gnmi.TypedValue{Value: &gnmi.TypedValue_BoolVal{BoolVal: false}} + server.Send(&gnmi.SubscribeResponse{Response: &gnmi.SubscribeResponse_Update{Update: notification}}) + return nil + }, + GRPCServer: grpcServer, + } + gnmi.RegisterGNMIServer(grpcServer, gnmiServer) + + wg.Add(1) + go func() { + defer wg.Done() + err := grpcServer.Serve(listener) + require.NoError(t, err) + }() + + acc.Wait(4) + plugin.Stop() + grpcServer.Stop() + wg.Wait() +} diff --git a/plugins/inputs/cisco_telemetry_mdt/README.md b/plugins/inputs/cisco_telemetry_mdt/README.md new file mode 100644 index 000000000..3545c6120 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_mdt/README.md @@ -0,0 +1,44 @@ +# Cisco model-driven telemetry (MDT) + +Cisco model-driven telemetry (MDT) is an input plugin that consumes +telemetry data from Cisco IOS XR, IOS XE and NX-OS platforms. It supports TCP & GRPC dialout transports. +GRPC-based transport can utilize TLS for authentication and encryption. +Telemetry data is expected to be GPB-KV (self-describing-gpb) encoded. + +The GRPC dialout transport is supported on various IOS XR (64-bit) 6.1.x and later, IOS XE 16.10 and later, as well as NX-OS 7.x and later platforms. + +The TCP dialout transport is supported on IOS XR (32-bit and 64-bit) 6.1.x and later. + + +### Configuration: + +```toml +[[inputs.cisco_telemetry_mdt]] + ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when + ## using the grpc transport. + transport = "grpc" + + ## Address and port to host telemetry listener + service_address = ":57000" + + ## Enable TLS; grpc transport only. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Enable TLS client authentication and define allowed CA certificates; grpc + ## transport only. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags + # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] + + ## Define aliases to map telemetry encoding paths to simple measurement names + [inputs.cisco_telemetry_mdt.aliases] + ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +``` + +### Example Output: +``` +ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet2,source=csr1kv,subscription=101 in-unicast-pkts=27i,in-multicast-pkts=0i,discontinuity-time="2019-05-23T07:40:23.000362+00:00",in-octets=5233i,in-errors=0i,out-multicast-pkts=0i,out-discards=0i,in-broadcast-pkts=0i,in-discards=0i,in-unknown-protos=0i,out-unicast-pkts=0i,out-broadcast-pkts=0i,out-octets=0i,out-errors=0i 1559150462624000000 +ifstats,path=ietf-interfaces:interfaces-state/interface/statistics,host=linux,name=GigabitEthernet1,source=csr1kv,subscription=101 in-octets=3394770806i,in-broadcast-pkts=0i,in-multicast-pkts=0i,out-broadcast-pkts=0i,in-unknown-protos=0i,out-octets=350212i,in-unicast-pkts=9477273i,in-discards=0i,out-unicast-pkts=2726i,out-discards=0i,discontinuity-time="2019-05-23T07:40:23.000363+00:00",in-errors=30i,out-multicast-pkts=0i,out-errors=0i 1559150462624000000 +``` diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go new file mode 100644 index 000000000..2ae051d5b --- /dev/null +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt.go @@ -0,0 +1,558 @@ +package cisco_telemetry_mdt + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "path" + "strconv" + "strings" + "sync" + "time" + + dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" + "github.com/golang/protobuf/proto" + "github.com/influxdata/telegraf" + internaltls "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/inputs" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" // Register GRPC gzip decoder to support compressed telemetry + _ "google.golang.org/grpc/encoding/gzip" + "google.golang.org/grpc/peer" +) + +const ( + // Maximum telemetry payload size (in bytes) to accept for GRPC dialout transport + tcpMaxMsgLen uint32 = 1024 * 1024 +) + +// CiscoTelemetryMDT plugin for IOS XR, IOS XE and NXOS platforms +type CiscoTelemetryMDT struct { + // Common configuration + Transport string + ServiceAddress string `toml:"service_address"` + MaxMsgSize int `toml:"max_msg_size"` + Aliases map[string]string `toml:"aliases"` + EmbeddedTags []string `toml:"embedded_tags"` + + Log telegraf.Logger + + // GRPC TLS settings + internaltls.ServerConfig + + // Internal listener / client handle + grpcServer *grpc.Server + listener net.Listener + + // Internal state + aliases map[string]string + warned map[string]struct{} + extraTags map[string]map[string]struct{} + mutex sync.Mutex + acc telegraf.Accumulator + wg sync.WaitGroup +} + +// Start the Cisco MDT service +func (c *CiscoTelemetryMDT) Start(acc telegraf.Accumulator) error { + var err error + c.acc = acc + c.listener, err = net.Listen("tcp", c.ServiceAddress) + if err != nil { + return err + } + + // Invert aliases list + c.warned = make(map[string]struct{}) + c.aliases = make(map[string]string, len(c.Aliases)) + for alias, path := range c.Aliases { + c.aliases[path] = alias + } + + // Fill extra tags + c.extraTags = make(map[string]map[string]struct{}) + for _, tag := range c.EmbeddedTags { + dir := strings.Replace(path.Dir(tag), "-", "_", -1) + if _, hasKey := c.extraTags[dir]; !hasKey { + c.extraTags[dir] = make(map[string]struct{}) + } + c.extraTags[dir][path.Base(tag)] = struct{}{} + } + + switch c.Transport { + case "tcp": + // TCP dialout server accept routine + c.wg.Add(1) + go func() { + c.acceptTCPClients() + c.wg.Done() + }() + + case "grpc": + var opts []grpc.ServerOption + tlsConfig, err := c.ServerConfig.TLSConfig() + if err != nil { + c.listener.Close() + return err + } else if tlsConfig != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(tlsConfig))) + } + + if c.MaxMsgSize > 0 { + opts = append(opts, grpc.MaxRecvMsgSize(c.MaxMsgSize)) + } + + c.grpcServer = grpc.NewServer(opts...) + dialout.RegisterGRPCMdtDialoutServer(c.grpcServer, c) + + c.wg.Add(1) + go func() { + c.grpcServer.Serve(c.listener) + c.wg.Done() + }() + + default: + c.listener.Close() + return fmt.Errorf("invalid Cisco MDT transport: %s", c.Transport) + } + + return nil +} + +// AcceptTCPDialoutClients defines the TCP dialout server main routine +func (c *CiscoTelemetryMDT) acceptTCPClients() { + // Keep track of all active connections, so we can close them if necessary + var mutex sync.Mutex + clients := make(map[net.Conn]struct{}) + + for { + conn, err := c.listener.Accept() + if neterr, ok := err.(*net.OpError); ok && (neterr.Timeout() || neterr.Temporary()) { + continue + } else if err != nil { + break // Stop() will close the connection so Accept() will fail here + } + + mutex.Lock() + clients[conn] = struct{}{} + mutex.Unlock() + + // Individual client connection routine + c.wg.Add(1) + go func() { + c.Log.Debugf("Accepted Cisco MDT TCP dialout connection from %s", conn.RemoteAddr()) + if err := c.handleTCPClient(conn); err != nil { + c.acc.AddError(err) + } + c.Log.Debugf("Closed Cisco MDT TCP dialout connection from %s", conn.RemoteAddr()) + + mutex.Lock() + delete(clients, conn) + mutex.Unlock() + + conn.Close() + c.wg.Done() + }() + } + + // Close all remaining client connections + mutex.Lock() + for client := range clients { + if err := client.Close(); err != nil { + c.Log.Errorf("Failed to close TCP dialout client: %v", err) + } + } + mutex.Unlock() +} + +// Handle a TCP telemetry client +func (c *CiscoTelemetryMDT) handleTCPClient(conn net.Conn) error { + // TCP Dialout telemetry framing header + var hdr struct { + MsgType uint16 + MsgEncap uint16 + MsgHdrVersion uint16 + MsgFlags uint16 + MsgLen uint32 + } + + var payload bytes.Buffer + + for { + // Read and validate dialout telemetry header + if err := binary.Read(conn, binary.BigEndian, &hdr); err != nil { + return err + } + + maxMsgSize := tcpMaxMsgLen + if c.MaxMsgSize > 0 { + maxMsgSize = uint32(c.MaxMsgSize) + } + + if hdr.MsgLen > maxMsgSize { + return fmt.Errorf("dialout packet too long: %v", hdr.MsgLen) + } else if hdr.MsgFlags != 0 { + return fmt.Errorf("invalid dialout flags: %v", hdr.MsgFlags) + } + + // Read and handle telemetry packet + payload.Reset() + if size, err := payload.ReadFrom(io.LimitReader(conn, int64(hdr.MsgLen))); size != int64(hdr.MsgLen) { + if err != nil { + return err + } + return fmt.Errorf("TCP dialout premature EOF") + } + + c.handleTelemetry(payload.Bytes()) + } +} + +// MdtDialout RPC server method for grpc-dialout transport +func (c *CiscoTelemetryMDT) MdtDialout(stream dialout.GRPCMdtDialout_MdtDialoutServer) error { + peer, peerOK := peer.FromContext(stream.Context()) + if peerOK { + c.Log.Debugf("Accepted Cisco MDT GRPC dialout connection from %s", peer.Addr) + } + + var chunkBuffer bytes.Buffer + + for { + packet, err := stream.Recv() + if err != nil { + if err != io.EOF { + c.acc.AddError(fmt.Errorf("GRPC dialout receive error: %v", err)) + } + break + } + + if len(packet.Data) == 0 && len(packet.Errors) != 0 { + c.acc.AddError(fmt.Errorf("GRPC dialout error: %s", packet.Errors)) + break + } + + // Reassemble chunked telemetry data received from NX-OS + if packet.TotalSize == 0 { + c.handleTelemetry(packet.Data) + } else if int(packet.TotalSize) <= c.MaxMsgSize { + chunkBuffer.Write(packet.Data) + if chunkBuffer.Len() >= int(packet.TotalSize) { + c.handleTelemetry(chunkBuffer.Bytes()) + chunkBuffer.Reset() + } + } else { + c.acc.AddError(fmt.Errorf("dropped too large packet: %dB > %dB", packet.TotalSize, c.MaxMsgSize)) + } + } + + if peerOK { + c.Log.Debugf("Closed Cisco MDT GRPC dialout connection from %s", peer.Addr) + } + + return nil +} + +// Handle telemetry packet from any transport, decode and add as measurement +func (c *CiscoTelemetryMDT) handleTelemetry(data []byte) { + msg := &telemetry.Telemetry{} + err := proto.Unmarshal(data, msg) + if err != nil { + c.acc.AddError(fmt.Errorf("Cisco MDT failed to decode: %v", err)) + return + } + + grouper := metric.NewSeriesGrouper() + for _, gpbkv := range msg.DataGpbkv { + // Produce metadata tags + var tags map[string]string + + // Top-level field may have measurement timestamp, if not use message timestamp + measured := gpbkv.Timestamp + if measured == 0 { + measured = msg.MsgTimestamp + } + + timestamp := time.Unix(int64(measured/1000), int64(measured%1000)*1000000) + + // Find toplevel GPBKV fields "keys" and "content" + var keys, content *telemetry.TelemetryField = nil, nil + for _, field := range gpbkv.Fields { + if field.Name == "keys" { + keys = field + } else if field.Name == "content" { + content = field + } + } + + if keys == nil || content == nil { + c.Log.Infof("Message from %s missing keys or content", msg.GetNodeIdStr()) + continue + } + + // Parse keys + tags = make(map[string]string, len(keys.Fields)+3) + tags["source"] = msg.GetNodeIdStr() + tags["subscription"] = msg.GetSubscriptionIdStr() + tags["path"] = msg.GetEncodingPath() + + for _, subfield := range keys.Fields { + c.parseKeyField(tags, subfield, "") + } + + // Parse values + for _, subfield := range content.Fields { + c.parseContentField(grouper, subfield, "", msg.EncodingPath, tags, timestamp) + } + } + + for _, metric := range grouper.Metrics() { + c.acc.AddMetric(metric) + } +} + +func decodeValue(field *telemetry.TelemetryField) interface{} { + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_BytesValue: + return val.BytesValue + case *telemetry.TelemetryField_StringValue: + if len(val.StringValue) > 0 { + return val.StringValue + } + case *telemetry.TelemetryField_BoolValue: + return val.BoolValue + case *telemetry.TelemetryField_Uint32Value: + return val.Uint32Value + case *telemetry.TelemetryField_Uint64Value: + return val.Uint64Value + case *telemetry.TelemetryField_Sint32Value: + return val.Sint32Value + case *telemetry.TelemetryField_Sint64Value: + return val.Sint64Value + case *telemetry.TelemetryField_DoubleValue: + return val.DoubleValue + case *telemetry.TelemetryField_FloatValue: + return val.FloatValue + } + return nil +} + +func decodeTag(field *telemetry.TelemetryField) string { + switch val := field.ValueByType.(type) { + case *telemetry.TelemetryField_BytesValue: + return string(val.BytesValue) + case *telemetry.TelemetryField_StringValue: + return val.StringValue + case *telemetry.TelemetryField_BoolValue: + if val.BoolValue { + return "true" + } + return "false" + case *telemetry.TelemetryField_Uint32Value: + return strconv.FormatUint(uint64(val.Uint32Value), 10) + case *telemetry.TelemetryField_Uint64Value: + return strconv.FormatUint(val.Uint64Value, 10) + case *telemetry.TelemetryField_Sint32Value: + return strconv.FormatInt(int64(val.Sint32Value), 10) + case *telemetry.TelemetryField_Sint64Value: + return strconv.FormatInt(val.Sint64Value, 10) + case *telemetry.TelemetryField_DoubleValue: + return strconv.FormatFloat(val.DoubleValue, 'f', -1, 64) + case *telemetry.TelemetryField_FloatValue: + return strconv.FormatFloat(float64(val.FloatValue), 'f', -1, 32) + default: + return "" + } +} + +// Recursively parse tag fields +func (c *CiscoTelemetryMDT) parseKeyField(tags map[string]string, field *telemetry.TelemetryField, prefix string) { + localname := strings.Replace(field.Name, "-", "_", -1) + name := localname + if len(localname) == 0 { + name = prefix + } else if len(prefix) > 0 { + name = prefix + "/" + localname + } + + if tag := decodeTag(field); len(name) > 0 && len(tag) > 0 { + if _, exists := tags[localname]; !exists { // Use short keys whenever possible + tags[localname] = tag + } else { + tags[name] = tag + } + } + + for _, subfield := range field.Fields { + c.parseKeyField(tags, subfield, name) + } +} + +func (c *CiscoTelemetryMDT) parseContentField(grouper *metric.SeriesGrouper, field *telemetry.TelemetryField, prefix string, + path string, tags map[string]string, timestamp time.Time) { + name := strings.Replace(field.Name, "-", "_", -1) + if len(name) == 0 { + name = prefix + } else if len(prefix) > 0 { + name = prefix + "/" + name + } + + extraTags := c.extraTags[strings.Replace(path, "-", "_", -1)+"/"+name] + + if value := decodeValue(field); value != nil { + // Do alias lookup, to shorten measurement names + measurement := path + if alias, ok := c.aliases[path]; ok { + measurement = alias + } else { + c.mutex.Lock() + if _, haveWarned := c.warned[path]; !haveWarned { + c.Log.Debugf("No measurement alias for encoding path: %s", path) + c.warned[path] = struct{}{} + } + c.mutex.Unlock() + } + + grouper.Add(measurement, tags, timestamp, name, value) + return + } + + if len(extraTags) > 0 { + for _, subfield := range field.Fields { + if _, isExtraTag := extraTags[subfield.Name]; isExtraTag { + tags[name+"/"+strings.Replace(subfield.Name, "-", "_", -1)] = decodeTag(subfield) + } + } + } + + var nxAttributes, nxChildren, nxRows *telemetry.TelemetryField + isNXOS := !strings.ContainsRune(path, ':') // IOS-XR and IOS-XE have a colon in their encoding path, NX-OS does not + for _, subfield := range field.Fields { + if isNXOS && subfield.Name == "attributes" && len(subfield.Fields) > 0 { + nxAttributes = subfield.Fields[0] + } else if isNXOS && subfield.Name == "children" && len(subfield.Fields) > 0 { + nxChildren = subfield + } else if isNXOS && strings.HasPrefix(subfield.Name, "ROW_") { + nxRows = subfield + } else if _, isExtraTag := extraTags[subfield.Name]; !isExtraTag { // Regular telemetry decoding + c.parseContentField(grouper, subfield, name, path, tags, timestamp) + } + } + + if nxAttributes == nil && nxRows == nil { + return + } else if nxRows != nil { + // NXAPI structure: https://developer.cisco.com/docs/cisco-nexus-9000-series-nx-api-cli-reference-release-9-2x/ + for _, row := range nxRows.Fields { + for i, subfield := range row.Fields { + if i == 0 { // First subfield contains the index, promote it from value to tag + tags[prefix] = decodeTag(subfield) + } else { + c.parseContentField(grouper, subfield, "", path, tags, timestamp) + } + } + delete(tags, prefix) + } + return + } + + // DME structure: https://developer.cisco.com/site/nxapi-dme-model-reference-api/ + rn := "" + dn := false + + for _, subfield := range nxAttributes.Fields { + if subfield.Name == "rn" { + rn = decodeTag(subfield) + } else if subfield.Name == "dn" { + dn = true + } + } + + if len(rn) > 0 { + tags[prefix] = rn + } else if !dn { // Check for distinguished name being present + c.acc.AddError(fmt.Errorf("NX-OS decoding failed: missing dn field")) + return + } + + for _, subfield := range nxAttributes.Fields { + if subfield.Name != "rn" { + c.parseContentField(grouper, subfield, "", path, tags, timestamp) + } + } + + if nxChildren != nil { + // This is a nested structure, children will inherit relative name keys of parent + for _, subfield := range nxChildren.Fields { + c.parseContentField(grouper, subfield, prefix, path, tags, timestamp) + } + } + delete(tags, prefix) +} + +func (c *CiscoTelemetryMDT) Address() net.Addr { + return c.listener.Addr() +} + +// Stop listener and cleanup +func (c *CiscoTelemetryMDT) Stop() { + if c.grpcServer != nil { + // Stop server and terminate all running dialout routines + c.grpcServer.Stop() + } + if c.listener != nil { + c.listener.Close() + } + c.wg.Wait() +} + +const sampleConfig = ` + ## Telemetry transport can be "tcp" or "grpc". TLS is only supported when + ## using the grpc transport. + transport = "grpc" + + ## Address and port to host telemetry listener + service_address = ":57000" + + ## Enable TLS; grpc transport only. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Enable TLS client authentication and define allowed CA certificates; grpc + ## transport only. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Define (for certain nested telemetry measurements with embedded tags) which fields are tags + # embedded_tags = ["Cisco-IOS-XR-qos-ma-oper:qos/interface-table/interface/input/service-policy-names/service-policy-instance/statistics/class-stats/class-name"] + + ## Define aliases to map telemetry encoding paths to simple measurement names + [inputs.cisco_telemetry_mdt.aliases] + ifstats = "ietf-interfaces:interfaces-state/interface/statistics" +` + +// SampleConfig of plugin +func (c *CiscoTelemetryMDT) SampleConfig() string { + return sampleConfig +} + +// Description of plugin +func (c *CiscoTelemetryMDT) Description() string { + return "Cisco model-driven telemetry (MDT) input plugin for IOS XR, IOS XE and NX-OS platforms" +} + +// Gather plugin measurements (unused) +func (c *CiscoTelemetryMDT) Gather(_ telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("cisco_telemetry_mdt", func() telegraf.Input { + return &CiscoTelemetryMDT{ + Transport: "grpc", + ServiceAddress: "127.0.0.1:57000", + } + }) +} diff --git a/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go new file mode 100644 index 000000000..ea200bc74 --- /dev/null +++ b/plugins/inputs/cisco_telemetry_mdt/cisco_telemetry_mdt_test.go @@ -0,0 +1,592 @@ +package cisco_telemetry_mdt + +import ( + "context" + "encoding/binary" + "errors" + "net" + "testing" + + dialout "github.com/cisco-ie/nx-telemetry-proto/mdt_dialout" + telemetry "github.com/cisco-ie/nx-telemetry-proto/telemetry_bis" + "github.com/golang/protobuf/proto" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func TestHandleTelemetryTwoSimple(t *testing.T) { + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"alias": "type:model/some/path"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "type:model/some/path", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "name", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, + }, + { + Name: "uint64", + ValueByType: &telemetry.TelemetryField_Uint64Value{Uint64Value: 1234}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Name: "bool", + ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: true}, + }, + }, + }, + }, + }, + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "name", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str2"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Name: "bool", + ValueByType: &telemetry.TelemetryField_BoolValue{BoolValue: false}, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + + tags := map[string]string{"path": "type:model/some/path", "name": "str", "uint64": "1234", "source": "hostname", "subscription": "subscription"} + fields := map[string]interface{}{"bool": true} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) + + tags = map[string]string{"path": "type:model/some/path", "name": "str2", "source": "hostname", "subscription": "subscription"} + fields = map[string]interface{}{"bool": false} + acc.AssertContainsTaggedFields(t, "alias", fields, tags) +} + +func TestHandleTelemetrySingleNested(t *testing.T) { + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "dummy", Aliases: map[string]string{"nested": "type:model/nested/path"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "type:model/nested/path", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "nested", + Fields: []*telemetry.TelemetryField{ + { + Name: "key", + Fields: []*telemetry.TelemetryField{ + { + Name: "level", + ValueByType: &telemetry.TelemetryField_DoubleValue{DoubleValue: 3}, + }, + }, + }, + }, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Name: "nested", + Fields: []*telemetry.TelemetryField{ + { + Name: "value", + Fields: []*telemetry.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + + tags := map[string]string{"path": "type:model/nested/path", "level": "3", "source": "hostname", "subscription": "subscription"} + fields := map[string]interface{}{"nested/value/foo": "bar"} + acc.AssertContainsTaggedFields(t, "nested", fields, tags) +} + +func TestHandleEmbeddedTags(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"extra": "type:model/extra"}, EmbeddedTags: []string{"type:model/extra/list/name"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "type:model/extra", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Name: "list", + Fields: []*telemetry.TelemetryField{ + { + Name: "name", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry1"}, + }, + { + Name: "test", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + }, + }, + }, + { + Name: "list", + Fields: []*telemetry.TelemetryField{ + { + Name: "name", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "entry2"}, + }, + { + Name: "test", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + + tags1 := map[string]string{"path": "type:model/extra", "foo": "bar", "source": "hostname", "subscription": "subscription", "list/name": "entry1"} + fields1 := map[string]interface{}{"list/test": "foo"} + tags2 := map[string]string{"path": "type:model/extra", "foo": "bar", "source": "hostname", "subscription": "subscription", "list/name": "entry2"} + fields2 := map[string]interface{}{"list/test": "bar"} + acc.AssertContainsTaggedFields(t, "extra", fields1, tags1) + acc.AssertContainsTaggedFields(t, "extra", fields2, tags2) +} + +func TestHandleNXAPI(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"nxapi": "show nxapi"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "show nxapi", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "TABLE_nxapi", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "ROW_nxapi", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "index", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i1"}, + }, + { + Name: "value", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + }, + }, + }, + { + Fields: []*telemetry.TelemetryField{ + { + Name: "index", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "i2"}, + }, + { + Name: "value", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + + tags1 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i1", "source": "hostname", "subscription": "subscription"} + fields1 := map[string]interface{}{"value": "foo"} + tags2 := map[string]string{"path": "show nxapi", "foo": "bar", "TABLE_nxapi": "i2", "source": "hostname", "subscription": "subscription"} + fields2 := map[string]interface{}{"value": "bar"} + acc.AssertContainsTaggedFields(t, "nxapi", fields1, tags1) + acc.AssertContainsTaggedFields(t, "nxapi", fields2, tags2) +} + +func TestHandleNXDME(t *testing.T) { + c := &CiscoTelemetryMDT{Transport: "dummy", Aliases: map[string]string{"dme": "sys/dme"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + // error is expected since we are passing in dummy transport + require.Error(t, err) + + telemetry := &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "sys/dme", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "foo", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "bar"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "fooEntity", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "attributes", + Fields: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "rn", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "some-rn"}, + }, + { + Name: "value", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "foo"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + data, _ := proto.Marshal(telemetry) + + c.handleTelemetry(data) + require.Empty(t, acc.Errors) + + tags1 := map[string]string{"path": "sys/dme", "foo": "bar", "fooEntity": "some-rn", "source": "hostname", "subscription": "subscription"} + fields1 := map[string]interface{}{"value": "foo"} + acc.AssertContainsTaggedFields(t, "dme", fields1, tags1) +} + +func TestTCPDialoutOverflow(t *testing.T) { + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:0"} + acc := &testutil.Accumulator{} + err := c.Start(acc) + require.NoError(t, err) + + hdr := struct { + MsgType uint16 + MsgEncap uint16 + MsgHdrVersion uint16 + MsgFlags uint16 + MsgLen uint32 + }{MsgLen: uint32(1000000000)} + + addr := c.Address() + conn, err := net.Dial(addr.Network(), addr.String()) + require.NoError(t, err) + binary.Write(conn, binary.BigEndian, hdr) + conn.Read([]byte{0}) + conn.Close() + + c.Stop() + + require.Contains(t, acc.Errors, errors.New("dialout packet too long: 1000000000")) +} + +func mockTelemetryMessage() *telemetry.Telemetry { + return &telemetry.Telemetry{ + MsgTimestamp: 1543236572000, + EncodingPath: "type:model/some/path", + NodeId: &telemetry.Telemetry_NodeIdStr{NodeIdStr: "hostname"}, + Subscription: &telemetry.Telemetry_SubscriptionIdStr{SubscriptionIdStr: "subscription"}, + DataGpbkv: []*telemetry.TelemetryField{ + { + Fields: []*telemetry.TelemetryField{ + { + Name: "keys", + Fields: []*telemetry.TelemetryField{ + { + Name: "name", + ValueByType: &telemetry.TelemetryField_StringValue{StringValue: "str"}, + }, + }, + }, + { + Name: "content", + Fields: []*telemetry.TelemetryField{ + { + Name: "value", + ValueByType: &telemetry.TelemetryField_Sint64Value{Sint64Value: -1}, + }, + }, + }, + }, + }, + }, + } +} + +func TestTCPDialoutMultiple(t *testing.T) { + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "tcp", ServiceAddress: "127.0.0.1:0", Aliases: map[string]string{ + "some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + require.NoError(t, err) + + telemetry := mockTelemetryMessage() + + hdr := struct { + MsgType uint16 + MsgEncap uint16 + MsgHdrVersion uint16 + MsgFlags uint16 + MsgLen uint32 + }{} + + addr := c.Address() + conn, err := net.Dial(addr.Network(), addr.String()) + require.NoError(t, err) + + data, _ := proto.Marshal(telemetry) + hdr.MsgLen = uint32(len(data)) + binary.Write(conn, binary.BigEndian, hdr) + conn.Write(data) + + conn2, err := net.Dial(addr.Network(), addr.String()) + require.NoError(t, err) + + telemetry.EncodingPath = "type:model/parallel/path" + data, _ = proto.Marshal(telemetry) + hdr.MsgLen = uint32(len(data)) + binary.Write(conn2, binary.BigEndian, hdr) + conn2.Write(data) + conn2.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) + conn2.Read([]byte{0}) + conn2.Close() + + telemetry.EncodingPath = "type:model/other/path" + data, _ = proto.Marshal(telemetry) + hdr.MsgLen = uint32(len(data)) + binary.Write(conn, binary.BigEndian, hdr) + conn.Write(data) + conn.Write([]byte{0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0}) + conn.Read([]byte{0}) + c.Stop() + conn.Close() + + // We use the invalid dialout flags to let the server close the connection + require.Equal(t, acc.Errors, []error{errors.New("invalid dialout flags: 257"), errors.New("invalid dialout flags: 257")}) + + tags := map[string]string{"path": "type:model/some/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields := map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "some", fields, tags) + + tags = map[string]string{"path": "type:model/parallel/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields = map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "parallel", fields, tags) + + tags = map[string]string{"path": "type:model/other/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields = map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "other", fields, tags) +} + +func TestGRPCDialoutError(t *testing.T) { + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:0"} + acc := &testutil.Accumulator{} + err := c.Start(acc) + require.NoError(t, err) + + addr := c.Address() + conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure()) + client := dialout.NewGRPCMdtDialoutClient(conn) + stream, _ := client.MdtDialout(context.Background()) + + args := &dialout.MdtDialoutArgs{Errors: "foobar"} + stream.Send(args) + + // Wait for the server to close + stream.Recv() + c.Stop() + + require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: foobar")}) +} + +func TestGRPCDialoutMultiple(t *testing.T) { + c := &CiscoTelemetryMDT{Log: testutil.Logger{}, Transport: "grpc", ServiceAddress: "127.0.0.1:0", Aliases: map[string]string{ + "some": "type:model/some/path", "parallel": "type:model/parallel/path", "other": "type:model/other/path"}} + acc := &testutil.Accumulator{} + err := c.Start(acc) + require.NoError(t, err) + telemetry := mockTelemetryMessage() + + addr := c.Address() + conn, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + client := dialout.NewGRPCMdtDialoutClient(conn) + stream, _ := client.MdtDialout(context.TODO()) + + data, _ := proto.Marshal(telemetry) + args := &dialout.MdtDialoutArgs{Data: data, ReqId: 456} + stream.Send(args) + + conn2, _ := grpc.Dial(addr.String(), grpc.WithInsecure(), grpc.WithBlock()) + client2 := dialout.NewGRPCMdtDialoutClient(conn2) + stream2, _ := client2.MdtDialout(context.TODO()) + + telemetry.EncodingPath = "type:model/parallel/path" + data, _ = proto.Marshal(telemetry) + args = &dialout.MdtDialoutArgs{Data: data} + stream2.Send(args) + stream2.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}) + stream2.Recv() + conn2.Close() + + telemetry.EncodingPath = "type:model/other/path" + data, _ = proto.Marshal(telemetry) + args = &dialout.MdtDialoutArgs{Data: data} + stream.Send(args) + stream.Send(&dialout.MdtDialoutArgs{Errors: "testclose"}) + stream.Recv() + + c.Stop() + conn.Close() + + require.Equal(t, acc.Errors, []error{errors.New("GRPC dialout error: testclose"), errors.New("GRPC dialout error: testclose")}) + + tags := map[string]string{"path": "type:model/some/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields := map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "some", fields, tags) + + tags = map[string]string{"path": "type:model/parallel/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields = map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "parallel", fields, tags) + + tags = map[string]string{"path": "type:model/other/path", "name": "str", "source": "hostname", "subscription": "subscription"} + fields = map[string]interface{}{"value": int64(-1)} + acc.AssertContainsTaggedFields(t, "other", fields, tags) + +} diff --git a/plugins/inputs/clickhouse/README.md b/plugins/inputs/clickhouse/README.md new file mode 100644 index 000000000..5c1d233e6 --- /dev/null +++ b/plugins/inputs/clickhouse/README.md @@ -0,0 +1,124 @@ +# ClickHouse Input Plugin + +This plugin gathers the statistic data from [ClickHouse](https://github.com/ClickHouse/ClickHouse) server. + +### Configuration +```toml +# Read metrics from one or many ClickHouse servers +[[inputs.clickhouse]] + ## Username for authorization on ClickHouse server + ## example: user = "default" + username = "default" + + ## Password for authorization on ClickHouse server + ## example: password = "super_secret" + + ## HTTP(s) timeout while getting metrics values + ## The timeout includes connection time, any redirects, and reading the response body. + ## example: timeout = 1s + # timeout = 5s + + ## List of servers for metrics scraping + ## metrics scrape via HTTP(s) clickhouse interface + ## https://clickhouse.tech/docs/en/interfaces/http/ + ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] + servers = ["http://127.0.0.1:8123"] + + ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster + ## with using same "user:password" described in "user" and "password" parameters + ## and get this server hostname list from "system.clusters" table + ## see + ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters + ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers + ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ + ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables + ## example: auto_discovery = false + # auto_discovery = true + + ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" + ## when this filter present then "WHERE cluster IN (...)" filter will apply + ## please use only full cluster names here, regexp and glob filters is not allowed + ## for "/etc/clickhouse-server/config.d/remote.xml" + ## + ## + ## + ## + ## clickhouse-ru-1.local9000 + ## clickhouse-ru-2.local9000 + ## + ## + ## clickhouse-eu-1.local9000 + ## clickhouse-eu-2.local9000 + ## + ## + ## + ## + ## + ## + ## example: cluster_include = ["my-own-cluster"] + # cluster_include = [] + + ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" + ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply + ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] + # cluster_exclude = [] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Metrics + +- clickhouse_events + - tags: + - source (ClickHouse server hostname) + - cluster (Name of the cluster [optional]) + - shard_num (Shard number in the cluster [optional]) + - fields: + - all rows from [system.events][] + ++ clickhouse_metrics + - tags: + - source (ClickHouse server hostname) + - cluster (Name of the cluster [optional]) + - shard_num (Shard number in the cluster [optional]) + - fields: + - all rows from [system.metrics][] + +- clickhouse_asynchronous_metrics + - tags: + - source (ClickHouse server hostname) + - cluster (Name of the cluster [optional]) + - shard_num (Shard number in the cluster [optional]) + - fields: + - all rows from [system.asynchronous_metrics][] + ++ clickhouse_tables + - tags: + - source (ClickHouse server hostname) + - table + - database + - cluster (Name of the cluster [optional]) + - shard_num (Shard number in the cluster [optional]) + - fields: + - bytes + - parts + - rows + +### Example Output + +``` +clickhouse_events,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 read_compressed_bytes=212i,arena_alloc_chunks=35i,function_execute=85i,merge_tree_data_writer_rows=3i,rw_lock_acquired_read_locks=421i,file_open=46i,io_buffer_alloc_bytes=86451985i,inserted_bytes=196i,regexp_created=3i,real_time_microseconds=116832i,query=23i,network_receive_elapsed_microseconds=268i,merge_tree_data_writer_compressed_bytes=1080i,arena_alloc_bytes=212992i,disk_write_elapsed_microseconds=556i,inserted_rows=3i,compressed_read_buffer_bytes=81i,read_buffer_from_file_descriptor_read_bytes=148i,write_buffer_from_file_descriptor_write=47i,merge_tree_data_writer_blocks=3i,soft_page_faults=896i,hard_page_faults=7i,select_query=21i,merge_tree_data_writer_uncompressed_bytes=196i,merge_tree_data_writer_blocks_already_sorted=3i,user_time_microseconds=40196i,compressed_read_buffer_blocks=5i,write_buffer_from_file_descriptor_write_bytes=3246i,io_buffer_allocs=296i,created_write_buffer_ordinary=12i,disk_read_elapsed_microseconds=59347044i,network_send_elapsed_microseconds=1538i,context_lock=1040i,insert_query=1i,system_time_microseconds=14582i,read_buffer_from_file_descriptor_read=3i 1569421000000000000 +clickhouse_asynchronous_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 jemalloc.metadata_thp=0i,replicas_max_relative_delay=0i,jemalloc.mapped=1803177984i,jemalloc.allocated=1724839256i,jemalloc.background_thread.run_interval=0i,jemalloc.background_thread.num_threads=0i,uncompressed_cache_cells=0i,replicas_max_absolute_delay=0i,mark_cache_bytes=0i,compiled_expression_cache_count=0i,replicas_sum_queue_size=0i,number_of_tables=35i,replicas_max_merges_in_queue=0i,replicas_max_inserts_in_queue=0i,replicas_sum_merges_in_queue=0i,replicas_max_queue_size=0i,mark_cache_files=0i,jemalloc.background_thread.num_runs=0i,jemalloc.active=1726210048i,uptime=158i,jemalloc.retained=380481536i,replicas_sum_inserts_in_queue=0i,uncompressed_cache_bytes=0i,number_of_databases=2i,jemalloc.metadata=9207704i,max_part_count_for_partition=1i,jemalloc.resident=1742442496i 1569421000000000000 +clickhouse_metrics,cluster=test_cluster_two_shards_localhost,host=kshvakov,source=localhost,shard_num=1 replicated_send=0i,write=0i,ephemeral_node=0i,zoo_keeper_request=0i,distributed_files_to_insert=0i,replicated_fetch=0i,background_schedule_pool_task=0i,interserver_connection=0i,leader_replica=0i,delayed_inserts=0i,global_thread_active=41i,merge=0i,readonly_replica=0i,memory_tracking_in_background_schedule_pool=0i,memory_tracking_for_merges=0i,zoo_keeper_session=0i,context_lock_wait=0i,storage_buffer_bytes=0i,background_pool_task=0i,send_external_tables=0i,zoo_keeper_watch=0i,part_mutation=0i,disk_space_reserved_for_merge=0i,distributed_send=0i,version_integer=19014003i,local_thread=0i,replicated_checks=0i,memory_tracking=0i,memory_tracking_in_background_processing_pool=0i,leader_election=0i,revision=54425i,open_file_for_read=0i,open_file_for_write=0i,storage_buffer_rows=0i,rw_lock_waiting_readers=0i,rw_lock_waiting_writers=0i,rw_lock_active_writers=0i,local_thread_active=0i,query_preempted=0i,tcp_connection=1i,http_connection=1i,read=2i,query_thread=0i,dict_cache_requests=0i,rw_lock_active_readers=1i,global_thread=43i,query=1i 1569421000000000000 +clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=system,host=kshvakov,source=localhost,shard_num=1,table=trace_log bytes=754i,parts=1i,rows=1i 1569421000000000000 +clickhouse_tables,cluster=test_cluster_two_shards_localhost,database=default,host=kshvakov,source=localhost,shard_num=1,table=example bytes=326i,parts=2i,rows=2i 1569421000000000000 +``` + +[system.events]: https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-events +[system.metrics]: https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-metrics +[system.asynchronous_metrics]: https://clickhouse.tech/docs/en/operations/system_tables/#system_tables-asynchronous_metrics diff --git a/plugins/inputs/clickhouse/clickhouse.go b/plugins/inputs/clickhouse/clickhouse.go new file mode 100644 index 000000000..4336444eb --- /dev/null +++ b/plugins/inputs/clickhouse/clickhouse.go @@ -0,0 +1,394 @@ +package clickhouse + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var defaultTimeout = 5 * time.Second + +var sampleConfig = ` + ## Username for authorization on ClickHouse server + ## example: user = "default"" + username = "default" + + ## Password for authorization on ClickHouse server + ## example: password = "super_secret" + + ## HTTP(s) timeout while getting metrics values + ## The timeout includes connection time, any redirects, and reading the response body. + ## example: timeout = 1s + # timeout = 5s + + ## List of servers for metrics scraping + ## metrics scrape via HTTP(s) clickhouse interface + ## https://clickhouse.tech/docs/en/interfaces/http/ + ## example: servers = ["http://127.0.0.1:8123","https://custom-server.mdb.yandexcloud.net"] + servers = ["http://127.0.0.1:8123"] + + ## If "auto_discovery"" is "true" plugin tries to connect to all servers available in the cluster + ## with using same "user:password" described in "user" and "password" parameters + ## and get this server hostname list from "system.clusters" table + ## see + ## - https://clickhouse.tech/docs/en/operations/system_tables/#system-clusters + ## - https://clickhouse.tech/docs/en/operations/server_settings/settings/#server_settings_remote_servers + ## - https://clickhouse.tech/docs/en/operations/table_engines/distributed/ + ## - https://clickhouse.tech/docs/en/operations/table_engines/replication/#creating-replicated-tables + ## example: auto_discovery = false + # auto_discovery = true + + ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" + ## when this filter present then "WHERE cluster IN (...)" filter will apply + ## please use only full cluster names here, regexp and glob filters is not allowed + ## for "/etc/clickhouse-server/config.d/remote.xml" + ## + ## + ## + ## + ## clickhouse-ru-1.local9000 + ## clickhouse-ru-2.local9000 + ## + ## + ## clickhouse-eu-1.local9000 + ## clickhouse-eu-2.local9000 + ## + ## + ## + ## + ## + ## + ## example: cluster_include = ["my-own-cluster"] + # cluster_include = [] + + ## Filter cluster names in "system.clusters" when "auto_discovery" is "true" + ## when this filter present then "WHERE cluster NOT IN (...)" filter will apply + ## example: cluster_exclude = ["my-internal-not-discovered-cluster"] + # cluster_exclude = [] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +type connect struct { + Cluster string `json:"cluster"` + ShardNum int `json:"shard_num"` + Hostname string `json:"host_name"` + url *url.URL +} + +func init() { + inputs.Add("clickhouse", func() telegraf.Input { + return &ClickHouse{ + AutoDiscovery: true, + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: false, + }, + Timeout: internal.Duration{Duration: defaultTimeout}, + } + }) +} + +// ClickHouse Telegraf Input Plugin +type ClickHouse struct { + Username string `toml:"username"` + Password string `toml:"password"` + Servers []string `toml:"servers"` + AutoDiscovery bool `toml:"auto_discovery"` + ClusterInclude []string `toml:"cluster_include"` + ClusterExclude []string `toml:"cluster_exclude"` + Timeout internal.Duration `toml:"timeout"` + client http.Client + tls.ClientConfig +} + +// SampleConfig returns the sample config +func (*ClickHouse) SampleConfig() string { + return sampleConfig +} + +// Description return plugin description +func (*ClickHouse) Description() string { + return "Read metrics from one or many ClickHouse servers" +} + +// Start ClickHouse input service +func (ch *ClickHouse) Start(telegraf.Accumulator) error { + timeout := defaultTimeout + if ch.Timeout.Duration != 0 { + timeout = ch.Timeout.Duration + } + tlsCfg, err := ch.ClientConfig.TLSConfig() + if err != nil { + return err + } + + ch.client = http.Client{ + Timeout: timeout, + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + } + return nil +} + +// Gather collect data from ClickHouse server +func (ch *ClickHouse) Gather(acc telegraf.Accumulator) (err error) { + var ( + connects []connect + exists = func(host string) bool { + for _, c := range connects { + if c.Hostname == host { + return true + } + } + return false + } + ) + + for _, server := range ch.Servers { + u, err := url.Parse(server) + if err != nil { + return err + } + switch { + case ch.AutoDiscovery: + var conns []connect + if err := ch.execQuery(u, "SELECT cluster, shard_num, host_name FROM system.clusters "+ch.clusterIncludeExcludeFilter(), &conns); err != nil { + acc.AddError(err) + continue + } + for _, c := range conns { + if !exists(c.Hostname) { + c.url = &url.URL{ + Scheme: u.Scheme, + Host: net.JoinHostPort(c.Hostname, u.Port()), + } + connects = append(connects, c) + } + } + default: + connects = append(connects, connect{ + url: u, + }) + } + } + + for _, conn := range connects { + if err := ch.tables(acc, &conn); err != nil { + acc.AddError(err) + } + for metric := range commonMetrics { + if err := ch.commonMetrics(acc, &conn, metric); err != nil { + acc.AddError(err) + } + } + } + return nil +} + +func (ch *ClickHouse) Stop() { + ch.client.CloseIdleConnections() +} + +func (ch *ClickHouse) clusterIncludeExcludeFilter() string { + if len(ch.ClusterInclude) == 0 && len(ch.ClusterExclude) == 0 { + return "" + } + var ( + escape = func(in string) string { + return "'" + strings.NewReplacer(`\`, `\\`, `'`, `\'`).Replace(in) + "'" + } + makeFilter = func(expr string, args []string) string { + in := make([]string, 0, len(args)) + for _, v := range args { + in = append(in, escape(v)) + } + return fmt.Sprintf("cluster %s (%s)", expr, strings.Join(in, ", ")) + } + includeFilter, excludeFilter string + ) + + if len(ch.ClusterInclude) != 0 { + includeFilter = makeFilter("IN", ch.ClusterInclude) + } + if len(ch.ClusterExclude) != 0 { + excludeFilter = makeFilter("NOT IN", ch.ClusterExclude) + } + if includeFilter != "" && excludeFilter != "" { + return "WHERE " + includeFilter + " OR " + excludeFilter + } + if includeFilter == "" && excludeFilter != "" { + return "WHERE " + excludeFilter + } + if includeFilter != "" && excludeFilter == "" { + return "WHERE " + includeFilter + } + return "" +} + +func (ch *ClickHouse) commonMetrics(acc telegraf.Accumulator, conn *connect, metric string) error { + var result []struct { + Metric string `json:"metric"` + Value chUInt64 `json:"value"` + } + if err := ch.execQuery(conn.url, commonMetrics[metric], &result); err != nil { + return err + } + + tags := map[string]string{ + "source": conn.Hostname, + } + if len(conn.Cluster) != 0 { + tags["cluster"] = conn.Cluster + } + if conn.ShardNum != 0 { + tags["shard_num"] = strconv.Itoa(conn.ShardNum) + } + + fields := make(map[string]interface{}) + for _, r := range result { + fields[internal.SnakeCase(r.Metric)] = uint64(r.Value) + } + + acc.AddFields("clickhouse_"+metric, fields, tags) + + return nil +} + +func (ch *ClickHouse) tables(acc telegraf.Accumulator, conn *connect) error { + var parts []struct { + Database string `json:"database"` + Table string `json:"table"` + Bytes chUInt64 `json:"bytes"` + Parts chUInt64 `json:"parts"` + Rows chUInt64 `json:"rows"` + } + + if err := ch.execQuery(conn.url, systemParts, &parts); err != nil { + return err + } + tags := map[string]string{ + "source": conn.Hostname, + } + if len(conn.Cluster) != 0 { + tags["cluster"] = conn.Cluster + } + if conn.ShardNum != 0 { + tags["shard_num"] = strconv.Itoa(conn.ShardNum) + } + for _, part := range parts { + tags["table"] = part.Table + tags["database"] = part.Database + acc.AddFields("clickhouse_tables", + map[string]interface{}{ + "bytes": uint64(part.Bytes), + "parts": uint64(part.Parts), + "rows": uint64(part.Rows), + }, + tags, + ) + } + return nil +} + +type clickhouseError struct { + StatusCode int + body []byte +} + +func (e *clickhouseError) Error() string { + return fmt.Sprintf("received error code %d: %s", e.StatusCode, e.body) +} + +func (ch *ClickHouse) execQuery(url *url.URL, query string, i interface{}) error { + q := url.Query() + q.Set("query", query+" FORMAT JSON") + url.RawQuery = q.Encode() + req, _ := http.NewRequest("GET", url.String(), nil) + if ch.Username != "" { + req.Header.Add("X-ClickHouse-User", ch.Username) + } + if ch.Password != "" { + req.Header.Add("X-ClickHouse-Key", ch.Password) + } + resp, err := ch.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode >= 300 { + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + return &clickhouseError{ + StatusCode: resp.StatusCode, + body: body, + } + } + var response struct { + Data json.RawMessage + } + if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { + return err + } + return json.Unmarshal(response.Data, i) +} + +// see https://clickhouse.yandex/docs/en/operations/settings/settings/#session_settings-output_format_json_quote_64bit_integers +type chUInt64 uint64 + +func (i *chUInt64) UnmarshalJSON(b []byte) error { + b = bytes.TrimPrefix(b, []byte(`"`)) + b = bytes.TrimSuffix(b, []byte(`"`)) + v, err := strconv.ParseUint(string(b), 10, 64) + if err != nil { + return err + } + *i = chUInt64(v) + return nil +} + +const ( + systemEventsSQL = "SELECT event AS metric, CAST(value AS UInt64) AS value FROM system.events" + systemMetricsSQL = "SELECT metric, CAST(value AS UInt64) AS value FROM system.metrics" + systemAsyncMetricsSQL = "SELECT metric, CAST(value AS UInt64) AS value FROM system.asynchronous_metrics" + systemParts = ` + SELECT + database, + table, + SUM(bytes) AS bytes, + COUNT(*) AS parts, + SUM(rows) AS rows + FROM system.parts + WHERE active = 1 + GROUP BY + database, table + ORDER BY + database, table + ` +) + +var commonMetrics = map[string]string{ + "events": systemEventsSQL, + "metrics": systemMetricsSQL, + "asynchronous_metrics": systemAsyncMetricsSQL, +} + +var _ telegraf.ServiceInput = &ClickHouse{} diff --git a/plugins/inputs/clickhouse/clickhouse_test.go b/plugins/inputs/clickhouse/clickhouse_test.go new file mode 100644 index 000000000..382d2148a --- /dev/null +++ b/plugins/inputs/clickhouse/clickhouse_test.go @@ -0,0 +1,161 @@ +package clickhouse + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestClusterIncludeExcludeFilter(t *testing.T) { + ch := ClickHouse{} + if assert.Equal(t, "", ch.clusterIncludeExcludeFilter()) { + ch.ClusterExclude = []string{"test_cluster"} + assert.Equal(t, "WHERE cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) + + ch.ClusterExclude = []string{"test_cluster"} + ch.ClusterInclude = []string{"cluster"} + assert.Equal(t, "WHERE cluster IN ('cluster') OR cluster NOT IN ('test_cluster')", ch.clusterIncludeExcludeFilter()) + + ch.ClusterExclude = []string{} + ch.ClusterInclude = []string{"cluster1", "cluster2"} + assert.Equal(t, "WHERE cluster IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) + + ch.ClusterExclude = []string{"cluster1", "cluster2"} + ch.ClusterInclude = []string{} + assert.Equal(t, "WHERE cluster NOT IN ('cluster1', 'cluster2')", ch.clusterIncludeExcludeFilter()) + } +} + +func TestChInt64(t *testing.T) { + assets := map[string]uint64{ + `"1"`: 1, + "1": 1, + "42": 42, + `"42"`: 42, + "18446743937525109187": 18446743937525109187, + } + for src, expected := range assets { + var v chUInt64 + if err := v.UnmarshalJSON([]byte(src)); assert.NoError(t, err) { + assert.Equal(t, expected, uint64(v)) + } + } +} + +func TestGather(t *testing.T) { + var ( + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + type result struct { + Data interface{} `json:"data"` + } + enc := json.NewEncoder(w) + switch query := r.URL.Query().Get("query"); { + case strings.Contains(query, "system.parts"): + enc.Encode(result{ + Data: []struct { + Database string `json:"database"` + Table string `json:"table"` + Bytes chUInt64 `json:"bytes"` + Parts chUInt64 `json:"parts"` + Rows chUInt64 `json:"rows"` + }{ + { + Database: "test_database", + Table: "test_table", + Bytes: 1, + Parts: 10, + Rows: 100, + }, + }, + }) + case strings.Contains(query, "system.events"): + enc.Encode(result{ + Data: []struct { + Metric string `json:"metric"` + Value chUInt64 `json:"value"` + }{ + { + Metric: "TestSystemEvent", + Value: 1000, + }, + { + Metric: "TestSystemEvent2", + Value: 2000, + }, + }, + }) + case strings.Contains(query, "system.metrics"): + enc.Encode(result{ + Data: []struct { + Metric string `json:"metric"` + Value chUInt64 `json:"value"` + }{ + { + Metric: "TestSystemMetric", + Value: 1000, + }, + { + Metric: "TestSystemMetric2", + Value: 2000, + }, + }, + }) + case strings.Contains(query, "system.asynchronous_metrics"): + enc.Encode(result{ + Data: []struct { + Metric string `json:"metric"` + Value chUInt64 `json:"value"` + }{ + { + Metric: "TestSystemAsynchronousMetric", + Value: 1000, + }, + { + Metric: "TestSystemAsynchronousMetric2", + Value: 2000, + }, + }, + }) + } + })) + ch = &ClickHouse{ + Servers: []string{ + ts.URL, + }, + } + acc = &testutil.Accumulator{} + ) + defer ts.Close() + ch.Gather(acc) + + acc.AssertContainsFields(t, "clickhouse_tables", + map[string]interface{}{ + "bytes": uint64(1), + "parts": uint64(10), + "rows": uint64(100), + }, + ) + acc.AssertContainsFields(t, "clickhouse_events", + map[string]interface{}{ + "test_system_event": uint64(1000), + "test_system_event2": uint64(2000), + }, + ) + acc.AssertContainsFields(t, "clickhouse_metrics", + map[string]interface{}{ + "test_system_metric": uint64(1000), + "test_system_metric2": uint64(2000), + }, + ) + acc.AssertContainsFields(t, "clickhouse_asynchronous_metrics", + map[string]interface{}{ + "test_system_asynchronous_metric": uint64(1000), + "test_system_asynchronous_metric2": uint64(2000), + }, + ) +} diff --git a/plugins/inputs/clickhouse/dev/dhparam.pem b/plugins/inputs/clickhouse/dev/dhparam.pem new file mode 100644 index 000000000..5ae6d7bbe --- /dev/null +++ b/plugins/inputs/clickhouse/dev/dhparam.pem @@ -0,0 +1,13 @@ +-----BEGIN DH PARAMETERS----- +MIICCAKCAgEAoo1x7wI5K57P1/AkHUmVWzKNfy46b/ni/QtClomTB78Ks1FP8dzs +CQBW/pfL8yidxTialNhMRCZO1J+uPjTvd8dG8SFZzVylkF41LBNrUD+MLyh/b6Nr +8uWf3tqYCtsiqsQsnq/oU7C29wn6UjhPPVbRRDPGyJUFOgp0ebPR0L2gOc5HhXSF +Tt0fuWnvgZJBKGvyodby3p2CSheu8K6ZteVc8ZgHuanhCQA30nVN+yNQzyozlB2H +B9jxTDPJy8+/4Mui3iiNyXg6FaiI9lWdH7xgKoZlHi8BWlLz5Se9JVNYg0dPrMTz +K0itQyyTKUlK73x+1uPm6q1AJwz08EZiCXNbk58/Sf+pdwDmAO2QSRrERC73vnvc +B1+4+Kf7RS7oYpAHknKm/MFnkCJLVIq1b6kikYcIgVCYe+Z1UytSmG1QfwdgL8QQ +TVYVHBg4w07+s3/IJ1ekvNhdxpkmmevYt7GjohWu8vKkip4se+reNdo+sqLsgFKf +1IuDMD36zn9FVukvs7e3BwZCTkdosGHvHGjA7zm2DwPPO16hCvJ4mE6ULLpp2NEw +EBYWm3Tv6M/xtrF5Afyh0gAh7eL767/qsarbx6jlqs+dnh3LptqsE3WerWK54+0B +3Hr5CVfgYbeXuW2HeFb+fS6CNUWmiAsq1XRiz5p16hpeMGYN/qyF1IsCAQI= +-----END DH PARAMETERS----- diff --git a/plugins/inputs/clickhouse/dev/docker-compose.yml b/plugins/inputs/clickhouse/dev/docker-compose.yml new file mode 100644 index 000000000..4dd4d1846 --- /dev/null +++ b/plugins/inputs/clickhouse/dev/docker-compose.yml @@ -0,0 +1,16 @@ +version: '3' + +services: + clickhouse: + image: yandex/clickhouse-server:latest + volumes: + - ./dhparam.pem:/etc/clickhouse-server/dhparam.pem + - ./tls_settings.xml:/etc/clickhouse-server/config.d/00-tls_settings.xml + - ../../../../testutil/pki/serverkey.pem:/etc/clickhouse-server/server.key + - ../../../../testutil/pki/servercert.pem:/etc/clickhouse-server/server.crt + restart: always + ports: + - 8123:8123 + - 8443:8443 + - 9000:9000 + - 9009:9009 diff --git a/plugins/inputs/clickhouse/dev/telegraf.conf b/plugins/inputs/clickhouse/dev/telegraf.conf new file mode 100644 index 000000000..883baf845 --- /dev/null +++ b/plugins/inputs/clickhouse/dev/telegraf.conf @@ -0,0 +1,12 @@ +### ClickHouse input plugin + +[[inputs.clickhouse]] + timeout = 2 + user = "default" + servers = ["http://127.0.0.1:8123"] + auto_discovery = true + cluster_include = [] + cluster_exclude = ["test_shard_localhost"] + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/clickhouse/dev/telegraf_ssl.conf b/plugins/inputs/clickhouse/dev/telegraf_ssl.conf new file mode 100644 index 000000000..21288d84f --- /dev/null +++ b/plugins/inputs/clickhouse/dev/telegraf_ssl.conf @@ -0,0 +1,16 @@ +### ClickHouse input plugin + +[[inputs.clickhouse]] + timeout = 2 + user = "default" + servers = ["https://127.0.0.1:8443"] + auto_discovery = true + cluster_include = [] + cluster_exclude = ["test_shard_localhost"] + insecure_skip_verify = false + tls_cert = "./testutil/pki/clientcert.pem" + tls_key = "./testutil/pki/clientkey.pem" + tls_ca = "./testutil/pki/cacert.pem" + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/clickhouse/dev/tls_settings.xml b/plugins/inputs/clickhouse/dev/tls_settings.xml new file mode 100644 index 000000000..6268b6a12 --- /dev/null +++ b/plugins/inputs/clickhouse/dev/tls_settings.xml @@ -0,0 +1,4 @@ + + 8443 + 9440 + diff --git a/plugins/inputs/cloud_pubsub/README.md b/plugins/inputs/cloud_pubsub/README.md new file mode 100644 index 000000000..460cf4b82 --- /dev/null +++ b/plugins/inputs/cloud_pubsub/README.md @@ -0,0 +1,98 @@ +# Google Cloud PubSub Input Plugin + +The GCP PubSub plugin ingests metrics from [Google Cloud PubSub][pubsub] +and creates metrics using one of the supported [input data formats][]. + + +### Configuration + +```toml +[[inputs.pubsub]] +## Required. Name of Google Cloud Platform (GCP) Project that owns + ## the given PubSub subscription. + project = "my-project" + + ## Required. Name of PubSub subscription to ingest metrics from. + subscription = "my-subscription" + + ## Required. Data format to consume. + ## Each data format has its own unique set of configuration options. + ## Read more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. + # credentials_file = "path/to/my/creds.json" + + ## Optional. Number of seconds to wait before attempting to restart the + ## PubSub subscription receiver after an unexpected error. + ## If the streaming pull for a PubSub Subscription fails (receiver), + ## the agent attempts to restart receiving messages after this many seconds. + # retry_delay_seconds = 5 + + ## Optional. Maximum byte length of a message to consume. + ## Larger messages are dropped with an error. If less than 0 or unspecified, + ## treated as no limit. + # max_message_len = 1000000 + + ## Optional. Maximum messages to read from PubSub that have not been written + ## to an output. Defaults to %d. + ## For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message contains 10 metrics and the output + ## metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## The following are optional Subscription ReceiveSettings in PubSub. + ## Read more about these values: + ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings + + ## Optional. Maximum number of seconds for which a PubSub subscription + ## should auto-extend the PubSub ACK deadline for each message. If less than + ## 0, auto-extension is disabled. + # max_extension = 0 + + ## Optional. Maximum number of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. + ## Negative values will be treated as unlimited. + # max_outstanding_messages = 0 + + ## Optional. Maximum size in bytes of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. + ## Negative values will be treated as unlimited. + # max_outstanding_bytes = 0 + + ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn + ## to pull messages from PubSub concurrently. This limit applies to each + ## subscription separately and is treated as the PubSub default if less than + ## 1. Note this setting does not limit the number of messages that can be + ## processed concurrently (use "max_outstanding_messages" instead). + # max_receiver_go_routines = 0 + + ## Optional. If true, Telegraf will attempt to base64 decode the + ## PubSub message data before parsing. Many GCP services that + ## output JSON to Google PubSub base64-encode the JSON payload. + # base64_data = false +``` + +### Multiple Subscriptions and Topics + +This plugin assumes you have already created a PULL subscription for a given +PubSub topic. To learn how to do so, see [how to create a subscription][pubsub create sub]. + +Each plugin agent can listen to one subscription at a time, so you will +need to run multiple instances of the plugin to pull messages from multiple +subscriptions/topics. + + + +[pubsub]: https://cloud.google.com/pubsub +[pubsub create sub]: https://cloud.google.com/pubsub/docs/admin#create_a_pull_subscription +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/cloud_pubsub/pubsub.go b/plugins/inputs/cloud_pubsub/pubsub.go new file mode 100644 index 000000000..b418274f3 --- /dev/null +++ b/plugins/inputs/cloud_pubsub/pubsub.go @@ -0,0 +1,368 @@ +package cloud_pubsub + +import ( + "context" + "fmt" + "sync" + + "encoding/base64" + "time" + + "cloud.google.com/go/pubsub" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" +) + +type empty struct{} +type semaphore chan empty + +const defaultMaxUndeliveredMessages = 1000 +const defaultRetryDelaySeconds = 5 + +type PubSub struct { + sync.Mutex + + CredentialsFile string `toml:"credentials_file"` + Project string `toml:"project"` + Subscription string `toml:"subscription"` + + // Subscription ReceiveSettings + MaxExtension internal.Duration `toml:"max_extension"` + MaxOutstandingMessages int `toml:"max_outstanding_messages"` + MaxOutstandingBytes int `toml:"max_outstanding_bytes"` + MaxReceiverGoRoutines int `toml:"max_receiver_go_routines"` + + // Agent settings + MaxMessageLen int `toml:"max_message_len"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + RetryReceiveDelaySeconds int `toml:"retry_delay_seconds"` + + Base64Data bool `toml:"base64_data"` + + Log telegraf.Logger + + sub subscription + stubSub func() subscription + + cancel context.CancelFunc + + parser parsers.Parser + wg *sync.WaitGroup + acc telegraf.TrackingAccumulator + + undelivered map[telegraf.TrackingID]message + sem semaphore +} + +func (ps *PubSub) Description() string { + return "Read metrics from Google PubSub" +} + +func (ps *PubSub) SampleConfig() string { + return fmt.Sprintf(sampleConfig, defaultMaxUndeliveredMessages) +} + +// Gather does nothing for this service input. +func (ps *PubSub) Gather(acc telegraf.Accumulator) error { + return nil +} + +// SetParser implements ParserInput interface. +func (ps *PubSub) SetParser(parser parsers.Parser) { + ps.parser = parser +} + +// Start initializes the plugin and processing messages from Google PubSub. +// Two goroutines are started - one pulling for the subscription, one +// receiving delivery notifications from the accumulator. +func (ps *PubSub) Start(ac telegraf.Accumulator) error { + if ps.Subscription == "" { + return fmt.Errorf(`"subscription" is required`) + } + + if ps.Project == "" { + return fmt.Errorf(`"project" is required`) + } + + ps.sem = make(semaphore, ps.MaxUndeliveredMessages) + ps.acc = ac.WithTracking(ps.MaxUndeliveredMessages) + + // Create top-level context with cancel that will be called on Stop(). + ctx, cancel := context.WithCancel(context.Background()) + ps.cancel = cancel + + if ps.stubSub != nil { + ps.sub = ps.stubSub() + } else { + subRef, err := ps.getGCPSubscription(ps.Subscription) + if err != nil { + return fmt.Errorf("unable to create subscription handle: %v", err) + } + ps.sub = subRef + } + + ps.wg = &sync.WaitGroup{} + // Start goroutine to handle delivery notifications from accumulator. + ps.wg.Add(1) + go func() { + defer ps.wg.Done() + ps.waitForDelivery(ctx) + }() + + // Start goroutine for subscription receiver. + ps.wg.Add(1) + go func() { + defer ps.wg.Done() + ps.receiveWithRetry(ctx) + }() + + return nil +} + +// Stop ensures the PubSub subscriptions receivers are stopped by +// canceling the context and waits for goroutines to finish. +func (ps *PubSub) Stop() { + ps.cancel() + ps.wg.Wait() +} + +// startReceiver is called within a goroutine and manages keeping a +// subscription.Receive() up and running while the plugin has not been stopped. +func (ps *PubSub) receiveWithRetry(parentCtx context.Context) { + err := ps.startReceiver(parentCtx) + + for err != nil && parentCtx.Err() == nil { + ps.Log.Errorf("Receiver for subscription %s exited with error: %v", ps.sub.ID(), err) + + delay := defaultRetryDelaySeconds + if ps.RetryReceiveDelaySeconds > 0 { + delay = ps.RetryReceiveDelaySeconds + } + + ps.Log.Infof("Waiting %d seconds before attempting to restart receiver...", delay) + time.Sleep(time.Duration(delay) * time.Second) + + err = ps.startReceiver(parentCtx) + } +} + +func (ps *PubSub) startReceiver(parentCtx context.Context) error { + ps.Log.Infof("Starting receiver for subscription %s...", ps.sub.ID()) + cctx, ccancel := context.WithCancel(parentCtx) + err := ps.sub.Receive(cctx, func(ctx context.Context, msg message) { + if err := ps.onMessage(ctx, msg); err != nil { + ps.acc.AddError(fmt.Errorf("unable to add message from subscription %s: %v", ps.sub.ID(), err)) + } + }) + if err != nil { + ps.acc.AddError(fmt.Errorf("receiver for subscription %s exited: %v", ps.sub.ID(), err)) + } else { + ps.Log.Info("Subscription pull ended (no error, most likely stopped)") + } + ccancel() + return err +} + +// onMessage handles parsing and adding a received message to the accumulator. +func (ps *PubSub) onMessage(ctx context.Context, msg message) error { + if ps.MaxMessageLen > 0 && len(msg.Data()) > ps.MaxMessageLen { + msg.Ack() + return fmt.Errorf("message longer than max_message_len (%d > %d)", len(msg.Data()), ps.MaxMessageLen) + } + + var data []byte + if ps.Base64Data { + strData, err := base64.StdEncoding.DecodeString(string(msg.Data())) + if err != nil { + return fmt.Errorf("unable to base64 decode message: %v", err) + } + data = []byte(strData) + } else { + data = msg.Data() + } + + metrics, err := ps.parser.Parse(data) + if err != nil { + msg.Ack() + return err + } + + if len(metrics) == 0 { + msg.Ack() + return nil + } + + select { + case <-ctx.Done(): + return ctx.Err() + case ps.sem <- empty{}: + break + } + + ps.Lock() + defer ps.Unlock() + + id := ps.acc.AddTrackingMetricGroup(metrics) + if ps.undelivered == nil { + ps.undelivered = make(map[telegraf.TrackingID]message) + } + ps.undelivered[id] = msg + + return nil +} + +func (ps *PubSub) waitForDelivery(parentCtx context.Context) { + for { + select { + case <-parentCtx.Done(): + return + case info := <-ps.acc.Delivered(): + <-ps.sem + msg := ps.removeDelivered(info.ID()) + + if msg != nil { + msg.Ack() + } + } + } +} + +func (ps *PubSub) removeDelivered(id telegraf.TrackingID) message { + ps.Lock() + defer ps.Unlock() + + msg, ok := ps.undelivered[id] + if !ok { + return nil + } + delete(ps.undelivered, id) + return msg +} + +func (ps *PubSub) getPubSubClient() (*pubsub.Client, error) { + var credsOpt option.ClientOption + if ps.CredentialsFile != "" { + credsOpt = option.WithCredentialsFile(ps.CredentialsFile) + } else { + creds, err := google.FindDefaultCredentials(context.Background(), pubsub.ScopeCloudPlatform) + if err != nil { + return nil, fmt.Errorf( + "unable to find GCP Application Default Credentials: %v."+ + "Either set ADC or provide CredentialsFile config", err) + } + credsOpt = option.WithCredentials(creds) + } + client, err := pubsub.NewClient( + context.Background(), + ps.Project, + credsOpt, + option.WithScopes(pubsub.ScopeCloudPlatform), + option.WithUserAgent(internal.ProductToken()), + ) + if err != nil { + return nil, fmt.Errorf("unable to generate PubSub client: %v", err) + } + return client, nil +} + +func (ps *PubSub) getGCPSubscription(subId string) (subscription, error) { + client, err := ps.getPubSubClient() + if err != nil { + return nil, err + } + s := client.Subscription(subId) + s.ReceiveSettings = pubsub.ReceiveSettings{ + NumGoroutines: ps.MaxReceiverGoRoutines, + MaxExtension: ps.MaxExtension.Duration, + MaxOutstandingMessages: ps.MaxOutstandingMessages, + MaxOutstandingBytes: ps.MaxOutstandingBytes, + } + return &gcpSubscription{s}, nil +} + +func init() { + inputs.Add("cloud_pubsub", func() telegraf.Input { + ps := &PubSub{ + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } + return ps + }) +} + +const sampleConfig = ` + ## Required. Name of Google Cloud Platform (GCP) Project that owns + ## the given PubSub subscription. + project = "my-project" + + ## Required. Name of PubSub subscription to ingest metrics from. + subscription = "my-subscription" + + ## Required. Data format to consume. + ## Each data format has its own unique set of configuration options. + ## Read more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. + # credentials_file = "path/to/my/creds.json" + + ## Optional. Number of seconds to wait before attempting to restart the + ## PubSub subscription receiver after an unexpected error. + ## If the streaming pull for a PubSub Subscription fails (receiver), + ## the agent attempts to restart receiving messages after this many seconds. + # retry_delay_seconds = 5 + + ## Optional. Maximum byte length of a message to consume. + ## Larger messages are dropped with an error. If less than 0 or unspecified, + ## treated as no limit. + # max_message_len = 1000000 + + ## Optional. Maximum messages to read from PubSub that have not been written + ## to an output. Defaults to %d. + ## For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message contains 10 metrics and the output + ## metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## The following are optional Subscription ReceiveSettings in PubSub. + ## Read more about these values: + ## https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings + + ## Optional. Maximum number of seconds for which a PubSub subscription + ## should auto-extend the PubSub ACK deadline for each message. If less than + ## 0, auto-extension is disabled. + # max_extension = 0 + + ## Optional. Maximum number of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. + ## Negative values will be treated as unlimited. + # max_outstanding_messages = 0 + + ## Optional. Maximum size in bytes of unprocessed messages in PubSub + ## (unacknowledged but not yet expired in PubSub). + ## A value of 0 is treated as the default PubSub value. + ## Negative values will be treated as unlimited. + # max_outstanding_bytes = 0 + + ## Optional. Max number of goroutines a PubSub Subscription receiver can spawn + ## to pull messages from PubSub concurrently. This limit applies to each + ## subscription separately and is treated as the PubSub default if less than + ## 1. Note this setting does not limit the number of messages that can be + ## processed concurrently (use "max_outstanding_messages" instead). + # max_receiver_go_routines = 0 + + ## Optional. If true, Telegraf will attempt to base64 decode the + ## PubSub message data before parsing + # base64_data = false +` diff --git a/plugins/inputs/cloud_pubsub/pubsub_test.go b/plugins/inputs/cloud_pubsub/pubsub_test.go new file mode 100644 index 000000000..2045cf4cc --- /dev/null +++ b/plugins/inputs/cloud_pubsub/pubsub_test.go @@ -0,0 +1,239 @@ +package cloud_pubsub + +import ( + "encoding/base64" + "errors" + "testing" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +const ( + msgInflux = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" +) + +// Test ingesting InfluxDB-format PubSub message +func TestRunParse(t *testing.T) { + subId := "sub-run-parse" + + testParser, _ := parsers.NewInfluxParser() + + sub := &stubSub{ + id: subId, + messages: make(chan *testMsg, 100), + } + sub.receiver = testMessagesReceive(sub) + + ps := &PubSub{ + Log: testutil.Logger{}, + parser: testParser, + stubSub: func() subscription { return sub }, + Project: "projectIDontMatterForTests", + Subscription: subId, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } + + acc := &testutil.Accumulator{} + if err := ps.Start(acc); err != nil { + t.Fatalf("test PubSub failed to start: %s", err) + } + defer ps.Stop() + + if ps.sub == nil { + t.Fatal("expected plugin subscription to be non-nil") + } + + testTracker := &testTracker{} + msg := &testMsg{ + value: msgInflux, + tracker: testTracker, + } + sub.messages <- msg + + acc.Wait(1) + assert.Equal(t, acc.NFields(), 1) + metric := acc.Metrics[0] + validateTestInfluxMetric(t, metric) +} + +// Test ingesting InfluxDB-format PubSub message +func TestRunBase64(t *testing.T) { + subId := "sub-run-base64" + + testParser, _ := parsers.NewInfluxParser() + + sub := &stubSub{ + id: subId, + messages: make(chan *testMsg, 100), + } + sub.receiver = testMessagesReceive(sub) + + ps := &PubSub{ + Log: testutil.Logger{}, + parser: testParser, + stubSub: func() subscription { return sub }, + Project: "projectIDontMatterForTests", + Subscription: subId, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + Base64Data: true, + } + + acc := &testutil.Accumulator{} + if err := ps.Start(acc); err != nil { + t.Fatalf("test PubSub failed to start: %s", err) + } + defer ps.Stop() + + if ps.sub == nil { + t.Fatal("expected plugin subscription to be non-nil") + } + + testTracker := &testTracker{} + msg := &testMsg{ + value: base64.StdEncoding.EncodeToString([]byte(msgInflux)), + tracker: testTracker, + } + sub.messages <- msg + + acc.Wait(1) + assert.Equal(t, acc.NFields(), 1) + metric := acc.Metrics[0] + validateTestInfluxMetric(t, metric) +} + +func TestRunInvalidMessages(t *testing.T) { + subId := "sub-invalid-messages" + + testParser, _ := parsers.NewInfluxParser() + + sub := &stubSub{ + id: subId, + messages: make(chan *testMsg, 100), + } + sub.receiver = testMessagesReceive(sub) + + ps := &PubSub{ + Log: testutil.Logger{}, + parser: testParser, + stubSub: func() subscription { return sub }, + Project: "projectIDontMatterForTests", + Subscription: subId, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } + + acc := &testutil.Accumulator{} + + if err := ps.Start(acc); err != nil { + t.Fatalf("test PubSub failed to start: %s", err) + } + defer ps.Stop() + if ps.sub == nil { + t.Fatal("expected plugin subscription to be non-nil") + } + + testTracker := &testTracker{} + msg := &testMsg{ + value: "~invalidInfluxMsg~", + tracker: testTracker, + } + sub.messages <- msg + + acc.WaitError(1) + + // Make sure we acknowledged message so we don't receive it again. + testTracker.WaitForAck(1) + + assert.Equal(t, acc.NFields(), 0) +} + +func TestRunOverlongMessages(t *testing.T) { + subId := "sub-message-too-long" + + acc := &testutil.Accumulator{} + + testParser, _ := parsers.NewInfluxParser() + + sub := &stubSub{ + id: subId, + messages: make(chan *testMsg, 100), + } + sub.receiver = testMessagesReceive(sub) + + ps := &PubSub{ + Log: testutil.Logger{}, + parser: testParser, + stubSub: func() subscription { return sub }, + Project: "projectIDontMatterForTests", + Subscription: subId, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + // Add MaxMessageLen Param + MaxMessageLen: 1, + } + + if err := ps.Start(acc); err != nil { + t.Fatalf("test PubSub failed to start: %s", err) + } + defer ps.Stop() + if ps.sub == nil { + t.Fatal("expected plugin subscription to be non-nil") + } + + testTracker := &testTracker{} + msg := &testMsg{ + value: msgInflux, + tracker: testTracker, + } + sub.messages <- msg + + acc.WaitError(1) + + // Make sure we acknowledged message so we don't receive it again. + testTracker.WaitForAck(1) + + assert.Equal(t, acc.NFields(), 0) +} + +func TestRunErrorInSubscriber(t *testing.T) { + subId := "sub-unexpected-error" + + acc := &testutil.Accumulator{} + + testParser, _ := parsers.NewInfluxParser() + + sub := &stubSub{ + id: subId, + messages: make(chan *testMsg, 100), + } + fakeErrStr := "a fake error" + sub.receiver = testMessagesError(sub, errors.New("a fake error")) + + ps := &PubSub{ + Log: testutil.Logger{}, + parser: testParser, + stubSub: func() subscription { return sub }, + Project: "projectIDontMatterForTests", + Subscription: subId, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + RetryReceiveDelaySeconds: 1, + } + + if err := ps.Start(acc); err != nil { + t.Fatalf("test PubSub failed to start: %s", err) + } + defer ps.Stop() + + if ps.sub == nil { + t.Fatal("expected plugin subscription to be non-nil") + } + acc.WaitError(1) + assert.Regexp(t, fakeErrStr, acc.Errors[0]) +} + +func validateTestInfluxMetric(t *testing.T, m *testutil.Metric) { + assert.Equal(t, "cpu_load_short", m.Measurement) + assert.Equal(t, "server01", m.Tags["host"]) + assert.Equal(t, 23422.0, m.Fields["value"]) + assert.Equal(t, int64(1422568543702900257), m.Time.UnixNano()) +} diff --git a/plugins/inputs/cloud_pubsub/subscription_gcp.go b/plugins/inputs/cloud_pubsub/subscription_gcp.go new file mode 100644 index 000000000..f436d5219 --- /dev/null +++ b/plugins/inputs/cloud_pubsub/subscription_gcp.go @@ -0,0 +1,68 @@ +package cloud_pubsub + +import ( + "cloud.google.com/go/pubsub" + "context" + "time" +) + +type ( + subscription interface { + ID() string + Receive(ctx context.Context, f func(context.Context, message)) error + } + + message interface { + Ack() + Nack() + ID() string + Data() []byte + Attributes() map[string]string + PublishTime() time.Time + } + + gcpSubscription struct { + sub *pubsub.Subscription + } + + gcpMessage struct { + msg *pubsub.Message + } +) + +func (s *gcpSubscription) ID() string { + if s.sub == nil { + return "" + } + return s.sub.ID() +} + +func (s *gcpSubscription) Receive(ctx context.Context, f func(context.Context, message)) error { + return s.sub.Receive(ctx, func(cctx context.Context, m *pubsub.Message) { + f(cctx, &gcpMessage{m}) + }) +} + +func (env *gcpMessage) Ack() { + env.msg.Ack() +} + +func (env *gcpMessage) Nack() { + env.msg.Nack() +} + +func (env *gcpMessage) ID() string { + return env.msg.ID +} + +func (env *gcpMessage) Data() []byte { + return env.msg.Data +} + +func (env *gcpMessage) Attributes() map[string]string { + return env.msg.Attributes +} + +func (env *gcpMessage) PublishTime() time.Time { + return env.msg.PublishTime +} diff --git a/plugins/inputs/cloud_pubsub/subscription_stub.go b/plugins/inputs/cloud_pubsub/subscription_stub.go new file mode 100644 index 000000000..e061728ca --- /dev/null +++ b/plugins/inputs/cloud_pubsub/subscription_stub.go @@ -0,0 +1,119 @@ +package cloud_pubsub + +import ( + "context" + "sync" + "time" +) + +type stubSub struct { + id string + messages chan *testMsg + receiver receiveFunc +} + +func (s *stubSub) ID() string { + return s.id +} + +func (s *stubSub) Receive(ctx context.Context, f func(context.Context, message)) error { + return s.receiver(ctx, f) +} + +type receiveFunc func(ctx context.Context, f func(context.Context, message)) error + +func testMessagesError(s *stubSub, expectedErr error) receiveFunc { + return func(ctx context.Context, f func(context.Context, message)) error { + return expectedErr + } +} + +func testMessagesReceive(s *stubSub) receiveFunc { + return func(ctx context.Context, f func(context.Context, message)) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case m := <-s.messages: + f(ctx, m) + } + } + } +} + +type testMsg struct { + id string + value string + attributes map[string]string + publishTime time.Time + + tracker *testTracker +} + +func (tm *testMsg) Ack() { + tm.tracker.Ack() +} + +func (tm *testMsg) Nack() { + tm.tracker.Nack() +} + +func (tm *testMsg) ID() string { + return tm.id +} + +func (tm *testMsg) Data() []byte { + return []byte(tm.value) +} + +func (tm *testMsg) Attributes() map[string]string { + return tm.attributes +} + +func (tm *testMsg) PublishTime() time.Time { + return tm.publishTime +} + +type testTracker struct { + sync.Mutex + *sync.Cond + + numAcks int + numNacks int +} + +func (t *testTracker) WaitForAck(num int) { + t.Lock() + if t.Cond == nil { + t.Cond = sync.NewCond(&t.Mutex) + } + for t.numAcks < num { + t.Wait() + } + t.Unlock() +} + +func (t *testTracker) WaitForNack(num int) { + t.Lock() + if t.Cond == nil { + t.Cond = sync.NewCond(&t.Mutex) + } + for t.numNacks < num { + t.Wait() + } + t.Unlock() +} + +func (t *testTracker) Ack() { + t.Lock() + defer t.Unlock() + + t.numAcks++ +} + +func (t *testTracker) Nack() { + t.Lock() + defer t.Unlock() + + t.numNacks++ +} diff --git a/plugins/inputs/cloud_pubsub_push/README.md b/plugins/inputs/cloud_pubsub_push/README.md new file mode 100644 index 000000000..76725c997 --- /dev/null +++ b/plugins/inputs/cloud_pubsub_push/README.md @@ -0,0 +1,72 @@ +# Google Cloud PubSub Push Input Service Plugin + +The Google Cloud PubSub Push listener is a service input plugin that listens for messages sent via an HTTP POST from [Google Cloud PubSub][pubsub]. +The plugin expects messages in Google's Pub/Sub JSON Format ONLY. +The intent of the plugin is to allow Telegraf to serve as an endpoint of the Google Pub/Sub 'Push' service. +Google's PubSub service will **only** send over HTTPS/TLS so this plugin must be behind a valid proxy or must be configured to use TLS. + +Enable TLS by specifying the file names of a service TLS certificate and key. + +Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in `tls_allowed_cacerts`. + + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +[[inputs.cloud_pubsub_push]] + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Application secret to verify messages originate from Cloud Pub/Sub + # token = "" + + ## Path to listen to. + # path = "/" + + ## Maximum duration before timing out read of the request + # read_timeout = "10s" + ## Maximum duration before timing out write of the response. This should be set to a value + ## large enough that you can send at least 'metric_batch_size' number of messages within the + ## duration. + # write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) + # max_body_size = "500MB" + + ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. + # add_meta = false + + ## Optional. Maximum messages to read from PubSub that have not been written + ## to an output. Defaults to 1000. + ## For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message contains 10 metrics and the output + ## metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +This plugin assumes you have already created a PUSH subscription for a given +PubSub topic. + +[pubsub]: https://cloud.google.com/pubsub +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push.go b/plugins/inputs/cloud_pubsub_push/pubsub_push.go new file mode 100644 index 000000000..d1c521349 --- /dev/null +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push.go @@ -0,0 +1,323 @@ +package cloud_pubsub_push + +import ( + "context" + "crypto/subtle" + "encoding/base64" + "encoding/json" + "io/ioutil" + "net" + "net/http" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +// defaultMaxBodySize is the default maximum request body size, in bytes. +// if the request body is over this size, we will return an HTTP 413 error. +// 500 MB +const defaultMaxBodySize = 500 * 1024 * 1024 +const defaultMaxUndeliveredMessages = 1000 + +type PubSubPush struct { + ServiceAddress string + Token string + Path string + ReadTimeout internal.Duration + WriteTimeout internal.Duration + MaxBodySize internal.Size + AddMeta bool + Log telegraf.Logger + + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + + tlsint.ServerConfig + parsers.Parser + + listener net.Listener + server *http.Server + acc telegraf.TrackingAccumulator + ctx context.Context + cancel context.CancelFunc + wg *sync.WaitGroup + mu *sync.Mutex + + undelivered map[telegraf.TrackingID]chan bool + sem chan struct{} +} + +// Message defines the structure of a Google Pub/Sub message. +type Message struct { + Atts map[string]string `json:"attributes"` + Data string `json:"data"` // Data is base64 encoded data +} + +// Payload is the received Google Pub/Sub data. (https://cloud.google.com/pubsub/docs/push) +type Payload struct { + Msg Message `json:"message"` + Subscription string `json:"subscription"` +} + +const sampleConfig = ` + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Application secret to verify messages originate from Cloud Pub/Sub + # token = "" + + ## Path to listen to. + # path = "/" + + ## Maximum duration before timing out read of the request + # read_timeout = "10s" + ## Maximum duration before timing out write of the response. This should be set to a value + ## large enough that you can send at least 'metric_batch_size' number of messages within the + ## duration. + # write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) + # max_body_size = "500MB" + + ## Whether to add the pubsub metadata, such as message attributes and subscription as a tag. + # add_meta = false + + ## Optional. Maximum messages to read from PubSub that have not been written + ## to an output. Defaults to 1000. + ## For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message contains 10 metrics and the output + ## metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +func (p *PubSubPush) SampleConfig() string { + return sampleConfig +} + +func (p *PubSubPush) Description() string { + return "Google Cloud Pub/Sub Push HTTP listener" +} + +func (p *PubSubPush) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (p *PubSubPush) SetParser(parser parsers.Parser) { + p.Parser = parser +} + +// Start starts the http listener service. +func (p *PubSubPush) Start(acc telegraf.Accumulator) error { + if p.MaxBodySize.Size == 0 { + p.MaxBodySize.Size = defaultMaxBodySize + } + + if p.ReadTimeout.Duration < time.Second { + p.ReadTimeout.Duration = time.Second * 10 + } + if p.WriteTimeout.Duration < time.Second { + p.WriteTimeout.Duration = time.Second * 10 + } + + tlsConf, err := p.ServerConfig.TLSConfig() + if err != nil { + return err + } + + p.server = &http.Server{ + Addr: p.ServiceAddress, + Handler: http.TimeoutHandler(p, p.WriteTimeout.Duration, "timed out processing metric"), + ReadTimeout: p.ReadTimeout.Duration, + TLSConfig: tlsConf, + } + + p.ctx, p.cancel = context.WithCancel(context.Background()) + p.wg = &sync.WaitGroup{} + p.acc = acc.WithTracking(p.MaxUndeliveredMessages) + p.sem = make(chan struct{}, p.MaxUndeliveredMessages) + p.undelivered = make(map[telegraf.TrackingID]chan bool) + p.mu = &sync.Mutex{} + + p.wg.Add(1) + go func() { + defer p.wg.Done() + p.receiveDelivered() + }() + + p.wg.Add(1) + go func() { + defer p.wg.Done() + if tlsConf != nil { + p.server.ListenAndServeTLS("", "") + } else { + p.server.ListenAndServe() + } + }() + + return nil +} + +// Stop cleans up all resources +func (p *PubSubPush) Stop() { + p.cancel() + p.server.Shutdown(p.ctx) + p.wg.Wait() +} + +func (p *PubSubPush) ServeHTTP(res http.ResponseWriter, req *http.Request) { + if req.URL.Path == p.Path { + p.AuthenticateIfSet(p.serveWrite, res, req) + } else { + p.AuthenticateIfSet(http.NotFound, res, req) + } +} + +func (p *PubSubPush) serveWrite(res http.ResponseWriter, req *http.Request) { + select { + case <-req.Context().Done(): + res.WriteHeader(http.StatusServiceUnavailable) + return + case <-p.ctx.Done(): + res.WriteHeader(http.StatusServiceUnavailable) + return + case p.sem <- struct{}{}: + break + } + + // Check that the content length is not too large for us to handle. + if req.ContentLength > p.MaxBodySize.Size { + res.WriteHeader(http.StatusRequestEntityTooLarge) + return + } + + if req.Method != http.MethodPost { + res.WriteHeader(http.StatusMethodNotAllowed) + return + } + + body := http.MaxBytesReader(res, req.Body, p.MaxBodySize.Size) + bytes, err := ioutil.ReadAll(body) + if err != nil { + res.WriteHeader(http.StatusRequestEntityTooLarge) + return + } + + var payload Payload + if err = json.Unmarshal(bytes, &payload); err != nil { + p.Log.Errorf("Error decoding payload %s", err.Error()) + res.WriteHeader(http.StatusBadRequest) + return + } + + sDec, err := base64.StdEncoding.DecodeString(payload.Msg.Data) + if err != nil { + p.Log.Errorf("Base64-decode failed %s", err.Error()) + res.WriteHeader(http.StatusBadRequest) + return + } + + metrics, err := p.Parse(sDec) + if err != nil { + p.Log.Debug(err.Error()) + res.WriteHeader(http.StatusBadRequest) + return + } + + if p.AddMeta { + for i := range metrics { + for k, v := range payload.Msg.Atts { + metrics[i].AddTag(k, v) + } + metrics[i].AddTag("subscription", payload.Subscription) + } + } + + ch := make(chan bool, 1) + p.mu.Lock() + p.undelivered[p.acc.AddTrackingMetricGroup(metrics)] = ch + p.mu.Unlock() + + select { + case <-req.Context().Done(): + res.WriteHeader(http.StatusServiceUnavailable) + return + case success := <-ch: + if success { + res.WriteHeader(http.StatusNoContent) + } else { + res.WriteHeader(http.StatusInternalServerError) + } + } +} + +func (p *PubSubPush) receiveDelivered() { + for { + select { + case <-p.ctx.Done(): + return + case info := <-p.acc.Delivered(): + <-p.sem + + p.mu.Lock() + ch, ok := p.undelivered[info.ID()] + if !ok { + p.mu.Unlock() + continue + } + + delete(p.undelivered, info.ID()) + p.mu.Unlock() + + if info.Delivered() { + ch <- true + } else { + ch <- false + p.Log.Debug("Metric group failed to process") + } + } + } +} + +func (p *PubSubPush) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { + if p.Token != "" { + if subtle.ConstantTimeCompare([]byte(req.FormValue("token")), []byte(p.Token)) != 1 { + http.Error(res, "Unauthorized.", http.StatusUnauthorized) + return + } + } + + handler(res, req) +} + +func init() { + inputs.Add("cloud_pubsub_push", func() telegraf.Input { + return &PubSubPush{ + ServiceAddress: ":8080", + Path: "/", + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } + }) +} diff --git a/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go new file mode 100644 index 000000000..ae7601b20 --- /dev/null +++ b/plugins/inputs/cloud_pubsub_push/pubsub_push_test.go @@ -0,0 +1,226 @@ +package cloud_pubsub_push + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/models" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" +) + +func TestServeHTTP(t *testing.T) { + tests := []struct { + name string + method string + path string + body io.Reader + status int + maxsize int64 + expected string + fail bool + full bool + }{ + { + name: "bad method get", + method: "GET", + path: "/", + status: http.StatusMethodNotAllowed, + }, + { + name: "post not found", + method: "POST", + path: "/allthings", + status: http.StatusNotFound, + }, + { + name: "post large date", + method: "POST", + path: "/", + status: http.StatusRequestEntityTooLarge, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"dGVzdGluZ0dvb2dsZSxzZW5zb3I9Ym1lXzI4MCB0ZW1wX2M9MjMuOTUsaHVtaWRpdHk9NjIuODMgMTUzNjk1Mjk3NDU1MzUxMDIzMQ==","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + }, + { + name: "post valid data", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusNoContent, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"dGVzdGluZ0dvb2dsZSxzZW5zb3I9Ym1lXzI4MCB0ZW1wX2M9MjMuOTUsaHVtaWRpdHk9NjIuODMgMTUzNjk1Mjk3NDU1MzUxMDIzMQ==","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + }, + { + name: "fail write", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusServiceUnavailable, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"dGVzdGluZ0dvb2dsZSxzZW5zb3I9Ym1lXzI4MCB0ZW1wX2M9MjMuOTUsaHVtaWRpdHk9NjIuODMgMTUzNjk1Mjk3NDU1MzUxMDIzMQ==","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + fail: true, + }, + { + name: "full buffer", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusServiceUnavailable, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"dGVzdGluZ0dvb2dsZSxzZW5zb3I9Ym1lXzI4MCB0ZW1wX2M9MjMuOTUsaHVtaWRpdHk9NjIuODMgMTUzNjk1Mjk3NDU1MzUxMDIzMQ==","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + full: true, + }, + { + name: "post invalid body", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusBadRequest, + body: strings.NewReader(`invalid body`), + }, + { + name: "post invalid data", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusBadRequest, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"not base 64 encoded data","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + }, + { + name: "post invalid data format", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusBadRequest, + body: strings.NewReader(`{"message":{"attributes":{"deviceId":"myPi","deviceNumId":"2808946627307959","deviceRegistryId":"my-registry","deviceRegistryLocation":"us-central1","projectId":"conference-demos","subFolder":""},"data":"bm90IHZhbGlkIGZvcm1hdHRlZCBkYXRh","messageId":"204004313210337","message_id":"204004313210337","publishTime":"2018-09-14T19:22:54.587Z","publish_time":"2018-09-14T19:22:54.587Z"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + }, + { + name: "post invalid structured body", + method: "POST", + path: "/", + maxsize: 500 * 1024 * 1024, + status: http.StatusBadRequest, + body: strings.NewReader(`{"message":{"attributes":{"thing":1},"data":"bm90IHZhbGlkIGZvcm1hdHRlZCBkYXRh"},"subscription":"projects/conference-demos/subscriptions/my-subscription"}`), + }, + } + + for _, test := range tests { + wg := &sync.WaitGroup{} + req, err := http.NewRequest(test.method, test.path, test.body) + require.NoError(t, err) + + rr := httptest.NewRecorder() + pubPush := &PubSubPush{ + Log: testutil.Logger{}, + Path: "/", + MaxBodySize: internal.Size{ + Size: test.maxsize, + }, + sem: make(chan struct{}, 1), + undelivered: make(map[telegraf.TrackingID]chan bool), + mu: &sync.Mutex{}, + WriteTimeout: internal.Duration{Duration: time.Second * 1}, + } + + pubPush.ctx, pubPush.cancel = context.WithCancel(context.Background()) + + if test.full { + // fill buffer with fake message + pubPush.sem <- struct{}{} + } + + p, _ := parsers.NewParser(&parsers.Config{ + MetricName: "cloud_pubsub_push", + DataFormat: "influx", + }) + pubPush.SetParser(p) + + dst := make(chan telegraf.Metric, 1) + ro := models.NewRunningOutput("test", &testOutput{failWrite: test.fail}, &models.OutputConfig{}, 1, 1) + pubPush.acc = agent.NewAccumulator(&testMetricMaker{}, dst).WithTracking(1) + + wg.Add(1) + go func() { + defer wg.Done() + pubPush.receiveDelivered() + }() + + wg.Add(1) + go func(status int, d chan telegraf.Metric) { + defer wg.Done() + for m := range d { + ro.AddMetric(m) + ro.Write() + } + }(test.status, dst) + + ctx, cancel := context.WithTimeout(req.Context(), pubPush.WriteTimeout.Duration) + req = req.WithContext(ctx) + + pubPush.ServeHTTP(rr, req) + require.Equal(t, test.status, rr.Code, test.name) + + if test.expected != "" { + require.Equal(t, test.expected, rr.Body.String(), test.name) + } + + pubPush.cancel() + cancel() + close(dst) + wg.Wait() + } +} + +type testMetricMaker struct{} + +func (tm *testMetricMaker) Name() string { + return "TestPlugin" +} + +func (tm *testMetricMaker) LogName() string { + return tm.Name() +} + +func (tm *testMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { + return metric +} + +func (tm *testMetricMaker) Log() telegraf.Logger { + return models.NewLogger("test", "test", "") +} + +type testOutput struct { + // if true, mock a write failure + failWrite bool +} + +func (*testOutput) Connect() error { + return nil +} + +func (*testOutput) Close() error { + return nil +} + +func (*testOutput) Description() string { + return "" +} + +func (*testOutput) SampleConfig() string { + return "" +} + +func (t *testOutput) Write(metrics []telegraf.Metric) error { + if t.failWrite { + return fmt.Errorf("failed write") + } + return nil +} diff --git a/plugins/inputs/cloudwatch/README.md b/plugins/inputs/cloudwatch/README.md index 88a5b098f..3cd098f47 100644 --- a/plugins/inputs/cloudwatch/README.md +++ b/plugins/inputs/cloudwatch/README.md @@ -17,7 +17,7 @@ API endpoint. In the following order the plugin will attempt to authenticate. ```toml [[inputs.cloudwatch]] - ## Amazon Region (required) + ## Amazon Region region = "us-east-1" ## Amazon Credentials @@ -28,12 +28,18 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## 4) environment variables ## 5) shared credentials file ## 6) EC2 Instance Profile - #access_key = "" - #secret_key = "" - #token = "" - #role_arn = "" - #profile = "" - #shared_credential_file = "" + # access_key = "" + # secret_key = "" + # token = "" + # role_arn = "" + # profile = "" + # shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at @@ -48,32 +54,46 @@ API endpoint. In the following order the plugin will attempt to authenticate. ## Collection Delay (required - must account for metrics availability via CloudWatch API) delay = "5m" - ## Override global run interval (optional - defaults to global interval) - ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid + ## Recommended: use metric 'interval' that is a multiple of 'period' to avoid ## gaps or overlap in pulled data interval = "5m" + ## Configure the TTL for the internal cache of metrics. + # cache_ttl = "1h" + ## Metric Statistic Namespace (required) namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is - ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a - ## maximum of 400. Optional - default value is 200. + ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a + ## maximum of 50. ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html - ratelimit = 200 + # ratelimit = 25 - ## Metrics to Pull (optional) + ## Timeout for http requests made by the cloudwatch client. + # timeout = "5s" + + ## Namespace-wide statistic filters. These allow fewer queries to be made to + ## cloudwatch. + # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] + # statistic_exclude = [] + + ## Metrics to Pull ## Defaults to all Metrics in Namespace if nothing is provided ## Refreshes Namespace available metrics every 1h - [[inputs.cloudwatch.metrics]] - names = ["Latency", "RequestCount"] - - ## Dimension filters for Metric. These are optional however all dimensions - ## defined for the metric names must be specified in order to retrieve - ## the metric statistics. - [[inputs.cloudwatch.metrics.dimensions]] - name = "LoadBalancerName" - value = "p-example" + #[[inputs.cloudwatch.metrics]] + # names = ["Latency", "RequestCount"] + # + # ## Statistic filters for Metric. These allow for retrieving specific + # ## statistics for an individual metric. + # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] + # # statistic_exclude = [] + # + # ## Dimension filters for Metric. All dimensions defined for the metric names + # ## must be specified in order to retrieve the metric statistics. + # [[inputs.cloudwatch.metrics.dimensions]] + # name = "LoadBalancerName" + # value = "p-example" ``` #### Requirements and Terminology @@ -91,17 +111,21 @@ wildcard dimension is ignored. Example: ``` -[[inputs.cloudwatch.metrics]] - names = ["Latency"] +[[inputs.cloudwatch]] + period = "1m" + interval = "5m" - ## Dimension filters for Metric (optional) - [[inputs.cloudwatch.metrics.dimensions]] - name = "LoadBalancerName" - value = "p-example" + [[inputs.cloudwatch.metrics]] + names = ["Latency"] - [[inputs.cloudwatch.metrics.dimensions]] - name = "AvailabilityZone" - value = "*" + ## Dimension filters for Metric (optional) + [[inputs.cloudwatch.metrics.dimensions]] + name = "LoadBalancerName" + value = "p-example" + + [[inputs.cloudwatch.metrics.dimensions]] + name = "AvailabilityZone" + value = "*" ``` If the following ELBs are available: @@ -118,9 +142,11 @@ Then 2 metrics will be output: If the `AvailabilityZone` wildcard dimension was omitted, then a single metric (name: `p-example`) would be exported containing the aggregate values of the ELB across availability zones. +To maximize efficiency and savings, consider making fewer requests by increasing `interval` but keeping `period` at the duration you would like metrics to be reported. The above example will request metrics from Cloudwatch every 5 minutes but will output five metrics timestamped one minute apart. + #### Restrictions and Limitations - CloudWatch metrics are not available instantly via the CloudWatch API. You should adjust your collection `delay` to account for this lag in metrics availability based on your [monitoring subscription level](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch-new.html) -- CloudWatch API usage incurs cost - see [GetMetricStatistics Pricing](https://aws.amazon.com/cloudwatch/pricing/) +- CloudWatch API usage incurs cost - see [GetMetricData Pricing](https://aws.amazon.com/cloudwatch/pricing/) ### Measurements & Fields: @@ -141,7 +167,6 @@ Tag Dimension names are represented in [snake case](https://en.wikipedia.org/wik - All measurements have the following tags: - region (CloudWatch Region) - - unit (CloudWatch Metric Unit) - {dimension-name} (Cloudwatch Dimension value - one for each metric dimension) ### Troubleshooting: @@ -155,12 +180,34 @@ aws cloudwatch list-metrics --namespace AWS/EC2 --region us-east-1 --metric-name If the expected metrics are not returned, you can try getting them manually for a short period of time: ``` -aws cloudwatch get-metric-statistics --namespace AWS/EC2 --region us-east-1 --period 300 --start-time 2018-07-01T00:00:00Z --end-time 2018-07-01T00:15:00Z --statistics Average --metric-name CPUCreditBalance --dimensions Name=InstanceId,Value=i-deadbeef +aws cloudwatch get-metric-data \ + --start-time 2018-07-01T00:00:00Z \ + --end-time 2018-07-01T00:15:00Z \ + --metric-data-queries '[ + { + "Id": "avgCPUCreditBalance", + "MetricStat": { + "Metric": { + "Namespace": "AWS/EC2", + "MetricName": "CPUCreditBalance", + "Dimensions": [ + { + "Name": "InstanceId", + "Value": "i-deadbeef" + } + ] + }, + "Period": 300, + "Stat": "Average" + }, + "Label": "avgCPUCreditBalance" + } +]' ``` ### Example Output: ``` $ ./telegraf --config telegraf.conf --input-filter cloudwatch --test -> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1,unit=seconds latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000 +> cloudwatch_aws_elb,load_balancer_name=p-example,region=us-east-1 latency_average=0.004810798017284538,latency_maximum=0.1100282669067383,latency_minimum=0.0006084442138671875,latency_sample_count=4029,latency_sum=19.382705211639404 1459542420000000000 ``` diff --git a/plugins/inputs/cloudwatch/cloudwatch.go b/plugins/inputs/cloudwatch/cloudwatch.go index b4f91f745..9a728d989 100644 --- a/plugins/inputs/cloudwatch/cloudwatch.go +++ b/plugins/inputs/cloudwatch/cloudwatch.go @@ -2,63 +2,85 @@ package cloudwatch import ( "fmt" + "net" + "net/http" + "strconv" "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/influxdata/telegraf" + internalaws "github.com/influxdata/telegraf/config/aws" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" - internalaws "github.com/influxdata/telegraf/internal/config/aws" "github.com/influxdata/telegraf/internal/limiter" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/inputs" ) type ( + // CloudWatch contains the configuration and cache for the cloudwatch plugin. CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + CredentialPath string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` + StatisticExclude []string `toml:"statistic_exclude"` + StatisticInclude []string `toml:"statistic_include"` + Timeout internal.Duration `toml:"timeout"` - Period internal.Duration `toml:"period"` - Delay internal.Duration `toml:"delay"` - Namespace string `toml:"namespace"` - Metrics []*Metric `toml:"metrics"` - CacheTTL internal.Duration `toml:"cache_ttl"` - RateLimit int `toml:"ratelimit"` - client cloudwatchClient - metricCache *MetricCache + Period internal.Duration `toml:"period"` + Delay internal.Duration `toml:"delay"` + Namespace string `toml:"namespace"` + Metrics []*Metric `toml:"metrics"` + CacheTTL internal.Duration `toml:"cache_ttl"` + RateLimit int `toml:"ratelimit"` + + Log telegraf.Logger `toml:"-"` + + client cloudwatchClient + statFilter filter.Filter + metricCache *metricCache + queryDimensions map[string]*map[string]string + windowStart time.Time + windowEnd time.Time } + // Metric defines a simplified Cloudwatch metric. Metric struct { - MetricNames []string `toml:"names"` - Dimensions []*Dimension `toml:"dimensions"` + StatisticExclude *[]string `toml:"statistic_exclude"` + StatisticInclude *[]string `toml:"statistic_include"` + MetricNames []string `toml:"names"` + Dimensions []*Dimension `toml:"dimensions"` } + // Dimension defines a simplified Cloudwatch dimension (provides metric filtering). Dimension struct { Name string `toml:"name"` Value string `toml:"value"` } - MetricCache struct { - TTL time.Duration - Fetched time.Time - Metrics []*cloudwatch.Metric + // metricCache caches metrics, their filters, and generated queries. + metricCache struct { + ttl time.Duration + built time.Time + metrics []filteredMetric + queries []*cloudwatch.MetricDataQuery } cloudwatchClient interface { ListMetrics(*cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) - GetMetricStatistics(*cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) + GetMetricData(*cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) } ) +// SampleConfig returns the default configuration of the Cloudwatch input plugin. func (c *CloudWatch) SampleConfig() string { return ` ## Amazon Region @@ -72,12 +94,18 @@ func (c *CloudWatch) SampleConfig() string { ## 4) environment variables ## 5) shared credentials file ## 6) EC2 Instance Profile - #access_key = "" - #secret_key = "" - #token = "" - #role_arn = "" - #profile = "" - #shared_credential_file = "" + # access_key = "" + # secret_key = "" + # token = "" + # role_arn = "" + # profile = "" + # shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all # metrics are made available to the 1 minute period. Some are collected at @@ -97,45 +125,177 @@ func (c *CloudWatch) SampleConfig() string { interval = "5m" ## Configure the TTL for the internal cache of metrics. - ## Defaults to 1 hr if not specified - #cache_ttl = "10m" + # cache_ttl = "1h" ## Metric Statistic Namespace (required) namespace = "AWS/ELB" ## Maximum requests per second. Note that the global default AWS rate limit is - ## 400 reqs/sec, so if you define multiple namespaces, these should add up to a - ## maximum of 400. Optional - default value is 200. + ## 50 reqs/sec, so if you define multiple namespaces, these should add up to a + ## maximum of 50. ## See http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_limits.html - ratelimit = 200 + # ratelimit = 25 - ## Metrics to Pull (optional) + ## Timeout for http requests made by the cloudwatch client. + # timeout = "5s" + + ## Namespace-wide statistic filters. These allow fewer queries to be made to + ## cloudwatch. + # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] + # statistic_exclude = [] + + ## Metrics to Pull ## Defaults to all Metrics in Namespace if nothing is provided ## Refreshes Namespace available metrics every 1h #[[inputs.cloudwatch.metrics]] # names = ["Latency", "RequestCount"] # - # ## Dimension filters for Metric. These are optional however all dimensions - # ## defined for the metric names must be specified in order to retrieve - # ## the metric statistics. + # ## Statistic filters for Metric. These allow for retrieving specific + # ## statistics for an individual metric. + # # statistic_include = [ "average", "sum", "minimum", "maximum", sample_count" ] + # # statistic_exclude = [] + # + # ## Dimension filters for Metric. All dimensions defined for the metric names + # ## must be specified in order to retrieve the metric statistics. # [[inputs.cloudwatch.metrics.dimensions]] # name = "LoadBalancerName" # value = "p-example" ` } +// Description returns a one-sentence description on the Cloudwatch input plugin. func (c *CloudWatch) Description() string { return "Pull Metric Statistics from Amazon CloudWatch" } -func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) { - var metrics []*cloudwatch.Metric +// Gather takes in an accumulator and adds the metrics that the Input +// gathers. This is called every "interval". +func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { + if c.statFilter == nil { + var err error + // Set config level filter (won't change throughout life of plugin). + c.statFilter, err = filter.NewIncludeExcludeFilter(c.StatisticInclude, c.StatisticExclude) + if err != nil { + return err + } + } + + if c.client == nil { + c.initializeCloudWatch() + } + + filteredMetrics, err := getFilteredMetrics(c) + if err != nil { + return err + } + + c.updateWindow(time.Now()) + + // Get all of the possible queries so we can send groups of 100. + queries, err := c.getDataQueries(filteredMetrics) + if err != nil { + return err + } + + if len(queries) == 0 { + return nil + } + + // Limit concurrency or we can easily exhaust user connection limit. + // See cloudwatch API request limits: + // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html + lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second) + defer lmtr.Stop() + wg := sync.WaitGroup{} + rLock := sync.Mutex{} + + results := []*cloudwatch.MetricDataResult{} + + // 100 is the maximum number of metric data queries a `GetMetricData` request can contain. + batchSize := 500 + var batches [][]*cloudwatch.MetricDataQuery + + for batchSize < len(queries) { + queries, batches = queries[batchSize:], append(batches, queries[0:batchSize:batchSize]) + } + batches = append(batches, queries) + + for i := range batches { + wg.Add(1) + <-lmtr.C + go func(inm []*cloudwatch.MetricDataQuery) { + defer wg.Done() + result, err := c.gatherMetrics(c.getDataInputs(inm)) + if err != nil { + acc.AddError(err) + return + } + + rLock.Lock() + results = append(results, result...) + rLock.Unlock() + }(batches[i]) + } + + wg.Wait() + + return c.aggregateMetrics(acc, results) +} + +func (c *CloudWatch) initializeCloudWatch() { + credentialConfig := &internalaws.CredentialConfig{ + Region: c.Region, + AccessKey: c.AccessKey, + SecretKey: c.SecretKey, + RoleARN: c.RoleARN, + Profile: c.Profile, + Filename: c.CredentialPath, + Token: c.Token, + EndpointURL: c.EndpointURL, + } + configProvider := credentialConfig.Credentials() + + cfg := &aws.Config{ + HTTPClient: &http.Client{ + // use values from DefaultTransport + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + }, + Timeout: c.Timeout.Duration, + }, + } + + loglevel := aws.LogOff + c.client = cloudwatch.New(configProvider, cfg.WithLogLevel(loglevel)) +} + +type filteredMetric struct { + metrics []*cloudwatch.Metric + statFilter filter.Filter +} + +// getFilteredMetrics returns metrics specified in the config file or metrics listed from Cloudwatch. +func getFilteredMetrics(c *CloudWatch) ([]filteredMetric, error) { + if c.metricCache != nil && c.metricCache.isValid() { + return c.metricCache.metrics, nil + } + + fMetrics := []filteredMetric{} // check for provided metric filter if c.Metrics != nil { - metrics = []*cloudwatch.Metric{} for _, m := range c.Metrics { - if !hasWilcard(m.Dimensions) { + metrics := []*cloudwatch.Metric{} + if !hasWildcard(m.Dimensions) { dimensions := make([]*cloudwatch.Dimension, len(m.Dimensions)) for k, d := range m.Dimensions { dimensions[k] = &cloudwatch.Dimension{ @@ -167,215 +327,283 @@ func SelectMetrics(c *CloudWatch) ([]*cloudwatch.Metric, error) { } } } + + if m.StatisticExclude == nil { + m.StatisticExclude = &c.StatisticExclude + } + if m.StatisticInclude == nil { + m.StatisticInclude = &c.StatisticInclude + } + statFilter, err := filter.NewIncludeExcludeFilter(*m.StatisticInclude, *m.StatisticExclude) + if err != nil { + return nil, err + } + + fMetrics = append(fMetrics, filteredMetric{ + metrics: metrics, + statFilter: statFilter, + }) } } else { - var err error - metrics, err = c.fetchNamespaceMetrics() + metrics, err := c.fetchNamespaceMetrics() if err != nil { return nil, err } + + fMetrics = []filteredMetric{{ + metrics: metrics, + statFilter: c.statFilter, + }} } - return metrics, nil + + c.metricCache = &metricCache{ + metrics: fMetrics, + built: time.Now(), + ttl: c.CacheTTL.Duration, + } + + return fMetrics, nil } -func (c *CloudWatch) Gather(acc telegraf.Accumulator) error { - if c.client == nil { - c.initializeCloudWatch() - } - - metrics, err := SelectMetrics(c) - if err != nil { - return err - } - - now := time.Now() - - // limit concurrency or we can easily exhaust user connection limit - // see cloudwatch API request limits: - // http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html - lmtr := limiter.NewRateLimiter(c.RateLimit, time.Second) - defer lmtr.Stop() - var wg sync.WaitGroup - wg.Add(len(metrics)) - for _, m := range metrics { - <-lmtr.C - go func(inm *cloudwatch.Metric) { - defer wg.Done() - acc.AddError(c.gatherMetric(acc, inm, now)) - }(m) - } - wg.Wait() - - return nil -} - -func init() { - inputs.Add("cloudwatch", func() telegraf.Input { - ttl, _ := time.ParseDuration("1hr") - return &CloudWatch{ - CacheTTL: internal.Duration{Duration: ttl}, - RateLimit: 200, - } - }) -} - -/* - * Initialize CloudWatch client - */ -func (c *CloudWatch) initializeCloudWatch() error { - credentialConfig := &internalaws.CredentialConfig{ - Region: c.Region, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - RoleARN: c.RoleARN, - Profile: c.Profile, - Filename: c.Filename, - Token: c.Token, - } - configProvider := credentialConfig.Credentials() - - c.client = cloudwatch.New(configProvider) - return nil -} - -/* - * Fetch available metrics for given CloudWatch Namespace - */ +// fetchNamespaceMetrics retrieves available metrics for a given CloudWatch namespace. func (c *CloudWatch) fetchNamespaceMetrics() ([]*cloudwatch.Metric, error) { - if c.metricCache != nil && c.metricCache.IsValid() { - return c.metricCache.Metrics, nil - } - metrics := []*cloudwatch.Metric{} var token *string - for more := true; more; { - params := &cloudwatch.ListMetricsInput{ - Namespace: aws.String(c.Namespace), - Dimensions: []*cloudwatch.DimensionFilter{}, - NextToken: token, - MetricName: nil, - } + params := &cloudwatch.ListMetricsInput{ + Namespace: aws.String(c.Namespace), + Dimensions: []*cloudwatch.DimensionFilter{}, + NextToken: token, + MetricName: nil, + } + for { resp, err := c.client.ListMetrics(params) if err != nil { return nil, err } metrics = append(metrics, resp.Metrics...) + if resp.NextToken == nil { + break + } - token = resp.NextToken - more = token != nil - } - - c.metricCache = &MetricCache{ - Metrics: metrics, - Fetched: time.Now(), - TTL: c.CacheTTL.Duration, + params.NextToken = resp.NextToken } return metrics, nil } -/* - * Gather given Metric and emit any error - */ -func (c *CloudWatch) gatherMetric( - acc telegraf.Accumulator, - metric *cloudwatch.Metric, - now time.Time, -) error { - params := c.getStatisticsInput(metric, now) - resp, err := c.client.GetMetricStatistics(params) - if err != nil { - return err +func (c *CloudWatch) updateWindow(relativeTo time.Time) { + windowEnd := relativeTo.Add(-c.Delay.Duration) + + if c.windowEnd.IsZero() { + // this is the first run, no window info, so just get a single period + c.windowStart = windowEnd.Add(-c.Period.Duration) + } else { + // subsequent window, start where last window left off + c.windowStart = c.windowEnd } - for _, point := range resp.Datapoints { - tags := map[string]string{ - "region": c.Region, - "unit": snakeCase(*point.Unit), + c.windowEnd = windowEnd +} + +// getDataQueries gets all of the possible queries so we can maximize the request payload. +func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) ([]*cloudwatch.MetricDataQuery, error) { + if c.metricCache != nil && c.metricCache.queries != nil && c.metricCache.isValid() { + return c.metricCache.queries, nil + } + + c.queryDimensions = map[string]*map[string]string{} + + dataQueries := []*cloudwatch.MetricDataQuery{} + for i, filtered := range filteredMetrics { + for j, metric := range filtered.metrics { + id := strconv.Itoa(j) + "_" + strconv.Itoa(i) + dimension := ctod(metric.Dimensions) + if filtered.statFilter.Match("average") { + c.queryDimensions["average_"+id] = dimension + dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + Id: aws.String("average_" + id), + Label: aws.String(snakeCase(*metric.MetricName + "_average")), + MetricStat: &cloudwatch.MetricStat{ + Metric: metric, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Stat: aws.String(cloudwatch.StatisticAverage), + }, + }) + } + if filtered.statFilter.Match("maximum") { + c.queryDimensions["maximum_"+id] = dimension + dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + Id: aws.String("maximum_" + id), + Label: aws.String(snakeCase(*metric.MetricName + "_maximum")), + MetricStat: &cloudwatch.MetricStat{ + Metric: metric, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Stat: aws.String(cloudwatch.StatisticMaximum), + }, + }) + } + if filtered.statFilter.Match("minimum") { + c.queryDimensions["minimum_"+id] = dimension + dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + Id: aws.String("minimum_" + id), + Label: aws.String(snakeCase(*metric.MetricName + "_minimum")), + MetricStat: &cloudwatch.MetricStat{ + Metric: metric, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Stat: aws.String(cloudwatch.StatisticMinimum), + }, + }) + } + if filtered.statFilter.Match("sum") { + c.queryDimensions["sum_"+id] = dimension + dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + Id: aws.String("sum_" + id), + Label: aws.String(snakeCase(*metric.MetricName + "_sum")), + MetricStat: &cloudwatch.MetricStat{ + Metric: metric, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Stat: aws.String(cloudwatch.StatisticSum), + }, + }) + } + if filtered.statFilter.Match("sample_count") { + c.queryDimensions["sample_count_"+id] = dimension + dataQueries = append(dataQueries, &cloudwatch.MetricDataQuery{ + Id: aws.String("sample_count_" + id), + Label: aws.String(snakeCase(*metric.MetricName + "_sample_count")), + MetricStat: &cloudwatch.MetricStat{ + Metric: metric, + Period: aws.Int64(int64(c.Period.Duration.Seconds())), + Stat: aws.String(cloudwatch.StatisticSampleCount), + }, + }) + } + } + } + + if len(dataQueries) == 0 { + c.Log.Debug("no metrics found to collect") + return nil, nil + } + + if c.metricCache == nil { + c.metricCache = &metricCache{ + queries: dataQueries, + built: time.Now(), + ttl: c.CacheTTL.Duration, + } + } else { + c.metricCache.queries = dataQueries + } + + return dataQueries, nil +} + +// gatherMetrics gets metric data from Cloudwatch. +func (c *CloudWatch) gatherMetrics( + params *cloudwatch.GetMetricDataInput, +) ([]*cloudwatch.MetricDataResult, error) { + results := []*cloudwatch.MetricDataResult{} + + for { + resp, err := c.client.GetMetricData(params) + if err != nil { + return nil, fmt.Errorf("failed to get metric data: %v", err) } - for _, d := range metric.Dimensions { - tags[snakeCase(*d.Name)] = *d.Value + results = append(results, resp.MetricDataResults...) + if resp.NextToken == nil { + break } + params.NextToken = resp.NextToken + } - // record field for each statistic - fields := map[string]interface{}{} + return results, nil +} - if point.Average != nil { - fields[formatField(*metric.MetricName, cloudwatch.StatisticAverage)] = *point.Average - } - if point.Maximum != nil { - fields[formatField(*metric.MetricName, cloudwatch.StatisticMaximum)] = *point.Maximum - } - if point.Minimum != nil { - fields[formatField(*metric.MetricName, cloudwatch.StatisticMinimum)] = *point.Minimum - } - if point.SampleCount != nil { - fields[formatField(*metric.MetricName, cloudwatch.StatisticSampleCount)] = *point.SampleCount - } - if point.Sum != nil { - fields[formatField(*metric.MetricName, cloudwatch.StatisticSum)] = *point.Sum - } +func (c *CloudWatch) aggregateMetrics( + acc telegraf.Accumulator, + metricDataResults []*cloudwatch.MetricDataResult, +) error { + var ( + grouper = metric.NewSeriesGrouper() + namespace = sanitizeMeasurement(c.Namespace) + ) - acc.AddFields(formatMeasurement(c.Namespace), fields, tags, *point.Timestamp) + for _, result := range metricDataResults { + tags := map[string]string{} + + if dimensions, ok := c.queryDimensions[*result.Id]; ok { + tags = *dimensions + } + tags["region"] = c.Region + + for i := range result.Values { + grouper.Add(namespace, tags, *result.Timestamps[i], *result.Label, *result.Values[i]) + } + } + + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) } return nil } -/* - * Formatting helpers - */ -func formatField(metricName string, statistic string) string { - return fmt.Sprintf("%s_%s", snakeCase(metricName), snakeCase(statistic)) +func init() { + inputs.Add("cloudwatch", func() telegraf.Input { + return &CloudWatch{ + CacheTTL: internal.Duration{Duration: time.Hour}, + RateLimit: 25, + Timeout: internal.Duration{Duration: time.Second * 5}, + } + }) } -func formatMeasurement(namespace string) string { +func sanitizeMeasurement(namespace string) string { namespace = strings.Replace(namespace, "/", "_", -1) namespace = snakeCase(namespace) - return fmt.Sprintf("cloudwatch_%s", namespace) + return "cloudwatch_" + namespace } func snakeCase(s string) string { s = internal.SnakeCase(s) + s = strings.Replace(s, " ", "_", -1) s = strings.Replace(s, "__", "_", -1) return s } -/* - * Map Metric to *cloudwatch.GetMetricStatisticsInput for given timeframe - */ -func (c *CloudWatch) getStatisticsInput(metric *cloudwatch.Metric, now time.Time) *cloudwatch.GetMetricStatisticsInput { - end := now.Add(-c.Delay.Duration) +type dimension struct { + name string + value string +} - input := &cloudwatch.GetMetricStatisticsInput{ - StartTime: aws.Time(end.Add(-c.Period.Duration)), - EndTime: aws.Time(end), - MetricName: metric.MetricName, - Namespace: metric.Namespace, - Period: aws.Int64(int64(c.Period.Duration.Seconds())), - Dimensions: metric.Dimensions, - Statistics: []*string{ - aws.String(cloudwatch.StatisticAverage), - aws.String(cloudwatch.StatisticMaximum), - aws.String(cloudwatch.StatisticMinimum), - aws.String(cloudwatch.StatisticSum), - aws.String(cloudwatch.StatisticSampleCount)}, +// ctod converts cloudwatch dimensions to regular dimensions. +func ctod(cDimensions []*cloudwatch.Dimension) *map[string]string { + dimensions := map[string]string{} + for i := range cDimensions { + dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value } - return input + return &dimensions } -/* - * Check Metric Cache validity - */ -func (c *MetricCache) IsValid() bool { - return c.Metrics != nil && time.Since(c.Fetched) < c.TTL +func (c *CloudWatch) getDataInputs(dataQueries []*cloudwatch.MetricDataQuery) *cloudwatch.GetMetricDataInput { + return &cloudwatch.GetMetricDataInput{ + StartTime: aws.Time(c.windowStart), + EndTime: aws.Time(c.windowEnd), + MetricDataQueries: dataQueries, + } } -func hasWilcard(dimensions []*Dimension) bool { +// isValid checks the validity of the metric cache. +func (f *metricCache) isValid() bool { + return f.metrics != nil && time.Since(f.built) < f.ttl +} + +func hasWildcard(dimensions []*Dimension) bool { for _, d := range dimensions { if d.Value == "" || d.Value == "*" { return true diff --git a/plugins/inputs/cloudwatch/cloudwatch_test.go b/plugins/inputs/cloudwatch/cloudwatch_test.go index c52b3a353..f28473a57 100644 --- a/plugins/inputs/cloudwatch/cloudwatch_test.go +++ b/plugins/inputs/cloudwatch/cloudwatch_test.go @@ -6,46 +6,98 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" ) type mockGatherCloudWatchClient struct{} func (m *mockGatherCloudWatchClient) ListMetrics(params *cloudwatch.ListMetricsInput) (*cloudwatch.ListMetricsOutput, error) { - metric := &cloudwatch.Metric{ - Namespace: params.Namespace, - MetricName: aws.String("Latency"), - Dimensions: []*cloudwatch.Dimension{ - &cloudwatch.Dimension{ - Name: aws.String("LoadBalancerName"), - Value: aws.String("p-example"), + return &cloudwatch.ListMetricsOutput{ + Metrics: []*cloudwatch.Metric{ + { + Namespace: params.Namespace, + MetricName: aws.String("Latency"), + Dimensions: []*cloudwatch.Dimension{ + { + Name: aws.String("LoadBalancerName"), + Value: aws.String("p-example"), + }, + }, }, }, - } - - result := &cloudwatch.ListMetricsOutput{ - Metrics: []*cloudwatch.Metric{metric}, - } - return result, nil + }, nil } -func (m *mockGatherCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) { - dataPoint := &cloudwatch.Datapoint{ - Timestamp: params.EndTime, - Minimum: aws.Float64(0.1), - Maximum: aws.Float64(0.3), - Average: aws.Float64(0.2), - Sum: aws.Float64(123), - SampleCount: aws.Float64(100), - Unit: aws.String("Seconds"), - } - result := &cloudwatch.GetMetricStatisticsOutput{ - Label: aws.String("Latency"), - Datapoints: []*cloudwatch.Datapoint{dataPoint}, - } - return result, nil +func (m *mockGatherCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { + return &cloudwatch.GetMetricDataOutput{ + MetricDataResults: []*cloudwatch.MetricDataResult{ + { + Id: aws.String("minimum_0_0"), + Label: aws.String("latency_minimum"), + StatusCode: aws.String("completed"), + Timestamps: []*time.Time{ + params.EndTime, + }, + Values: []*float64{ + aws.Float64(0.1), + }, + }, + { + Id: aws.String("maximum_0_0"), + Label: aws.String("latency_maximum"), + StatusCode: aws.String("completed"), + Timestamps: []*time.Time{ + params.EndTime, + }, + Values: []*float64{ + aws.Float64(0.3), + }, + }, + { + Id: aws.String("average_0_0"), + Label: aws.String("latency_average"), + StatusCode: aws.String("completed"), + Timestamps: []*time.Time{ + params.EndTime, + }, + Values: []*float64{ + aws.Float64(0.2), + }, + }, + { + Id: aws.String("sum_0_0"), + Label: aws.String("latency_sum"), + StatusCode: aws.String("completed"), + Timestamps: []*time.Time{ + params.EndTime, + }, + Values: []*float64{ + aws.Float64(123), + }, + }, + { + Id: aws.String("sample_count_0_0"), + Label: aws.String("latency_sample_count"), + StatusCode: aws.String("completed"), + Timestamps: []*time.Time{ + params.EndTime, + }, + Values: []*float64{ + aws.Float64(100), + }, + }, + }, + }, nil +} + +func TestSnakeCase(t *testing.T) { + assert.Equal(t, "cluster_name", snakeCase("Cluster Name")) + assert.Equal(t, "broker_id", snakeCase("Broker ID")) } func TestGather(t *testing.T) { @@ -64,7 +116,7 @@ func TestGather(t *testing.T) { var acc testutil.Accumulator c.client = &mockGatherCloudWatchClient{} - acc.GatherError(c.Gather) + assert.NoError(t, acc.GatherError(c.Gather)) fields := map[string]interface{}{} fields["latency_minimum"] = 0.1 @@ -74,13 +126,11 @@ func TestGather(t *testing.T) { fields["latency_sample_count"] = 100.0 tags := map[string]string{} - tags["unit"] = "seconds" tags["region"] = "us-east-1" tags["load_balancer_name"] = "p-example" assert.True(t, acc.HasMeasurement("cloudwatch_aws_elb")) acc.AssertContainsTaggedFields(t, "cloudwatch_aws_elb", fields, tags) - } type mockSelectMetricsCloudWatchClient struct{} @@ -100,7 +150,7 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), Dimensions: []*cloudwatch.Dimension{ - &cloudwatch.Dimension{ + { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), }, @@ -112,11 +162,11 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM Namespace: aws.String("AWS/ELB"), MetricName: aws.String(m), Dimensions: []*cloudwatch.Dimension{ - &cloudwatch.Dimension{ + { Name: aws.String("LoadBalancerName"), Value: aws.String(lb), }, - &cloudwatch.Dimension{ + { Name: aws.String("AvailabilityZone"), Value: aws.String(az), }, @@ -132,7 +182,7 @@ func (m *mockSelectMetricsCloudWatchClient) ListMetrics(params *cloudwatch.ListM return result, nil } -func (m *mockSelectMetricsCloudWatchClient) GetMetricStatistics(params *cloudwatch.GetMetricStatisticsInput) (*cloudwatch.GetMetricStatisticsOutput, error) { +func (m *mockSelectMetricsCloudWatchClient) GetMetricData(params *cloudwatch.GetMetricDataInput) (*cloudwatch.GetMetricDataOutput, error) { return nil, nil } @@ -148,14 +198,14 @@ func TestSelectMetrics(t *testing.T) { Period: internalDuration, RateLimit: 200, Metrics: []*Metric{ - &Metric{ + { MetricNames: []string{"Latency", "RequestCount"}, Dimensions: []*Dimension{ - &Dimension{ + { Name: "LoadBalancerName", Value: "*", }, - &Dimension{ + { Name: "AvailabilityZone", Value: "*", }, @@ -164,10 +214,10 @@ func TestSelectMetrics(t *testing.T) { }, } c.client = &mockSelectMetricsCloudWatchClient{} - metrics, err := SelectMetrics(c) + filtered, err := getFilteredMetrics(c) // We've asked for 2 (out of 4) metrics, over all 3 load balancers in all 2 // AZs. We should get 12 metrics. - assert.Equal(t, 12, len(metrics)) + assert.Equal(t, 12, len(filtered[0].metrics)) assert.Nil(t, err) } @@ -197,23 +247,99 @@ func TestGenerateStatisticsInputParams(t *testing.T) { now := time.Now() - params := c.getStatisticsInput(m, now) + c.updateWindow(now) + + statFilter, _ := filter.NewIncludeExcludeFilter(nil, nil) + queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) + params := c.getDataInputs(queries) assert.EqualValues(t, *params.EndTime, now.Add(-c.Delay.Duration)) assert.EqualValues(t, *params.StartTime, now.Add(-c.Period.Duration).Add(-c.Delay.Duration)) - assert.Len(t, params.Dimensions, 1) - assert.Len(t, params.Statistics, 5) - assert.EqualValues(t, *params.Period, 60) + require.Len(t, params.MetricDataQueries, 5) + assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) + assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) +} + +func TestGenerateStatisticsInputParamsFiltered(t *testing.T) { + d := &cloudwatch.Dimension{ + Name: aws.String("LoadBalancerName"), + Value: aws.String("p-example"), + } + + m := &cloudwatch.Metric{ + MetricName: aws.String("Latency"), + Dimensions: []*cloudwatch.Dimension{d}, + } + + duration, _ := time.ParseDuration("1m") + internalDuration := internal.Duration{ + Duration: duration, + } + + c := &CloudWatch{ + Namespace: "AWS/ELB", + Delay: internalDuration, + Period: internalDuration, + } + + c.initializeCloudWatch() + + now := time.Now() + + c.updateWindow(now) + + statFilter, _ := filter.NewIncludeExcludeFilter([]string{"average", "sample_count"}, nil) + queries, _ := c.getDataQueries([]filteredMetric{{metrics: []*cloudwatch.Metric{m}, statFilter: statFilter}}) + params := c.getDataInputs(queries) + + assert.EqualValues(t, *params.EndTime, now.Add(-c.Delay.Duration)) + assert.EqualValues(t, *params.StartTime, now.Add(-c.Period.Duration).Add(-c.Delay.Duration)) + require.Len(t, params.MetricDataQueries, 2) + assert.Len(t, params.MetricDataQueries[0].MetricStat.Metric.Dimensions, 1) + assert.EqualValues(t, *params.MetricDataQueries[0].MetricStat.Period, 60) } func TestMetricsCacheTimeout(t *testing.T) { - cache := &MetricCache{ - Metrics: []*cloudwatch.Metric{}, - Fetched: time.Now(), - TTL: time.Minute, + cache := &metricCache{ + metrics: []filteredMetric{}, + built: time.Now(), + ttl: time.Minute, } - assert.True(t, cache.IsValid()) - cache.Fetched = time.Now().Add(-time.Minute) - assert.False(t, cache.IsValid()) + assert.True(t, cache.isValid()) + cache.built = time.Now().Add(-time.Minute) + assert.False(t, cache.isValid()) +} + +func TestUpdateWindow(t *testing.T) { + duration, _ := time.ParseDuration("1m") + internalDuration := internal.Duration{ + Duration: duration, + } + + c := &CloudWatch{ + Namespace: "AWS/ELB", + Delay: internalDuration, + Period: internalDuration, + } + + now := time.Now() + + assert.True(t, c.windowEnd.IsZero()) + assert.True(t, c.windowStart.IsZero()) + + c.updateWindow(now) + + newStartTime := c.windowEnd + + // initial window just has a single period + assert.EqualValues(t, c.windowEnd, now.Add(-c.Delay.Duration)) + assert.EqualValues(t, c.windowStart, now.Add(-c.Delay.Duration).Add(-c.Period.Duration)) + + now = time.Now() + c.updateWindow(now) + + // subsequent window uses previous end time as start time + assert.EqualValues(t, c.windowEnd, now.Add(-c.Delay.Duration)) + assert.EqualValues(t, c.windowStart, newStartTime) } diff --git a/plugins/inputs/conntrack/README.md b/plugins/inputs/conntrack/README.md index 0eae4b3c3..813bc4861 100644 --- a/plugins/inputs/conntrack/README.md +++ b/plugins/inputs/conntrack/README.md @@ -34,7 +34,7 @@ For more information on conntrack-tools, see the "nf_conntrack_count","nf_conntrack_max"] ## Directories to search within for the conntrack files above. - ## Missing directrories will be ignored. + ## Missing directories will be ignored. dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] ``` diff --git a/plugins/inputs/conntrack/conntrack.go b/plugins/inputs/conntrack/conntrack.go index 4df01a31f..bf6c021c8 100644 --- a/plugins/inputs/conntrack/conntrack.go +++ b/plugins/inputs/conntrack/conntrack.go @@ -61,7 +61,7 @@ var sampleConfig = ` "nf_conntrack_count","nf_conntrack_max"] ## Directories to search within for the conntrack files above. - ## Missing directrories will be ignored. + ## Missing directories will be ignored. dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"] ` diff --git a/plugins/inputs/consul/README.md b/plugins/inputs/consul/README.md index f7dcdf362..8e1ecc094 100644 --- a/plugins/inputs/consul/README.md +++ b/plugins/inputs/consul/README.md @@ -12,7 +12,7 @@ report those stats already using StatsD protocol if needed. # Gather health check statuses from services registered in Consul [[inputs.consul]] ## Consul server address - # address = "localhost" + # address = "localhost:8500" ## URI scheme for the Consul server, one of "http", "https" # scheme = "http" @@ -24,8 +24,8 @@ report those stats already using StatsD protocol if needed. # username = "" # password = "" - ## Data centre to query the health checks from - # datacentre = "" + ## Data center to query the health checks from + # datacenter = "" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -44,7 +44,7 @@ report those stats already using StatsD protocol if needed. - consul_health_checks - tags: - - node (node that check/service is registred on) + - node (node that check/service is registered on) - service_name - check_id - fields: diff --git a/plugins/inputs/consul/consul.go b/plugins/inputs/consul/consul.go index 8649184dd..964eb9394 100644 --- a/plugins/inputs/consul/consul.go +++ b/plugins/inputs/consul/consul.go @@ -16,7 +16,8 @@ type Consul struct { Token string Username string Password string - Datacentre string + Datacentre string // deprecated in 1.10; use Datacenter + Datacenter string tls.ClientConfig TagDelimiter string @@ -26,7 +27,7 @@ type Consul struct { var sampleConfig = ` ## Consul server address - # address = "localhost" + # address = "localhost:8500" ## URI scheme for the Consul server, one of "http", "https" # scheme = "http" @@ -38,8 +39,8 @@ var sampleConfig = ` # username = "" # password = "" - ## Data centre to query the health checks from - # datacentre = "" + ## Data center to query the health checks from + # datacenter = "" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -77,6 +78,10 @@ func (c *Consul) createAPIClient() (*api.Client, error) { config.Datacenter = c.Datacentre } + if c.Datacenter != "" { + config.Datacenter = c.Datacenter + } + if c.Token != "" { config.Token = c.Token } @@ -121,12 +126,12 @@ func (c *Consul) GatherHealthCheck(acc telegraf.Accumulator, checks []*api.Healt for _, checkTag := range check.ServiceTags { if c.TagDelimiter != "" { splittedTag := strings.SplitN(checkTag, c.TagDelimiter, 2) - if len(splittedTag) == 1 { + if len(splittedTag) == 1 && checkTag != "" { tags[checkTag] = checkTag - } else if len(splittedTag) == 2 { + } else if len(splittedTag) == 2 && splittedTag[1] != "" { tags[splittedTag[0]] = splittedTag[1] } - } else { + } else if checkTag != "" { tags[checkTag] = checkTag } } diff --git a/plugins/inputs/consul/consul_test.go b/plugins/inputs/consul/consul_test.go index e3a7f2fdc..da345ce89 100644 --- a/plugins/inputs/consul/consul_test.go +++ b/plugins/inputs/consul/consul_test.go @@ -8,7 +8,7 @@ import ( ) var sampleChecks = []*api.HealthCheck{ - &api.HealthCheck{ + { Node: "localhost", CheckID: "foo.health123", Name: "foo.health", diff --git a/plugins/inputs/couchbase/README.md b/plugins/inputs/couchbase/README.md index 91e197b43..6db7d3db9 100644 --- a/plugins/inputs/couchbase/README.md +++ b/plugins/inputs/couchbase/README.md @@ -12,7 +12,7 @@ ## http://admin:secret@couchbase-0.example.com:8091/ ## ## If no servers are specified, then localhost is used as the host. - ## If no protocol is specifed, HTTP is used. + ## If no protocol is specified, HTTP is used. ## If no port is specified, 8091 is used. servers = ["http://localhost:8091"] ``` @@ -48,16 +48,6 @@ Fields: ## Example output ``` -$ telegraf --config telegraf.conf --input-filter couchbase --test -* Plugin: couchbase, Collection 1 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.187:8091 memory_free=22927384576,memory_total=64424656896 1458381183695864929 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.10.65:8091 memory_free=23520161792,memory_total=64424656896 1458381183695972112 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.13.105:8091 memory_free=23531704320,memory_total=64424656896 1458381183695995259 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.13.173:8091 memory_free=23628767232,memory_total=64424656896 1458381183696010870 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.15.120:8091 memory_free=23616692224,memory_total=64424656896 1458381183696027406 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.8.127:8091 memory_free=23431770112,memory_total=64424656896 1458381183696041040 -> couchbase_node,cluster=https://couchbase-0.example.com/,hostname=172.16.8.148:8091 memory_free=23811371008,memory_total=64424656896 1458381183696059060 -> couchbase_bucket,bucket=default,cluster=https://couchbase-0.example.com/ data_used=25743360,disk_fetches=0,disk_used=31744886,item_count=0,mem_used=77729224,ops_per_sec=0,quota_percent_used=10.58976636614118 1458381183696210074 -> couchbase_bucket,bucket=demoncat,cluster=https://couchbase-0.example.com/ data_used=38157584951,disk_fetches=0,disk_used=62730302441,item_count=14662532,mem_used=24015304256,ops_per_sec=1207.753207753208,quota_percent_used=79.87855353525707 1458381183696242695 -> couchbase_bucket,bucket=blastro-df,cluster=https://couchbase-0.example.com/ data_used=212552491622,disk_fetches=0,disk_used=413323157621,item_count=944655680,mem_used=202421103760,ops_per_sec=1692.176692176692,quota_percent_used=68.9442170551845 1458381183696272206 +couchbase_node,cluster=http://localhost:8091/,hostname=172.17.0.2:8091 memory_free=7705575424,memory_total=16558182400 1547829754000000000 +couchbase_bucket,bucket=beer-sample,cluster=http://localhost:8091/ quota_percent_used=27.09285736083984,ops_per_sec=0,disk_fetches=0,item_count=7303,disk_used=21662946,data_used=9325087,mem_used=28408920 1547829754000000000 ``` diff --git a/plugins/inputs/couchbase/couchbase.go b/plugins/inputs/couchbase/couchbase.go index f773f5d5b..de7f0bec0 100644 --- a/plugins/inputs/couchbase/couchbase.go +++ b/plugins/inputs/couchbase/couchbase.go @@ -86,7 +86,7 @@ func (r *Couchbase) gatherServer(addr string, acc telegraf.Accumulator, pool *co } for bucketName := range pool.BucketMap { - tags := map[string]string{"cluster": addr, "bucket": bucketName} + tags := map[string]string{"cluster": regexpURI.ReplaceAllString(addr, "${1}"), "bucket": bucketName} bs := pool.BucketMap[bucketName].BasicStats fields := make(map[string]interface{}) fields["quota_percent_used"] = bs["quotaPercentUsed"] diff --git a/plugins/inputs/couchdb/README.md b/plugins/inputs/couchdb/README.md index 686914583..3a7f127db 100644 --- a/plugins/inputs/couchdb/README.md +++ b/plugins/inputs/couchdb/README.md @@ -1,14 +1,18 @@ # CouchDB Input Plugin ---- -The CouchDB plugin gathers metrics of CouchDB using [_stats](http://docs.couchdb.org/en/1.6.1/api/server/common.html?highlight=stats#get--_stats) endpoint. +The CouchDB plugin gathers metrics of CouchDB using [_stats] endpoint. -### Configuration: +### Configuration -``` -# Sample Config: +```toml [[inputs.couchdb]] - hosts = ["http://localhost:5984/_stats"] + ## Works with CouchDB stats endpoints out of the box + ## Multiple Hosts from which to read CouchDB stats: + hosts = ["http://localhost:8086/_stats"] + + ## Use HTTP Basic Authentication. + # basic_username = "telegraf" + # basic_password = "p@ssw0rd" ``` ### Measurements & Fields: @@ -62,194 +66,14 @@ httpd statistics: ### Example output: +**Post Couchdb 2.0** ``` -➜ telegraf git:(master) ✗ ./telegraf --config ./config.conf --input-filter couchdb --test -* Plugin: couchdb, - Collection 1 -> couchdb,server=http://localhost:5984/_stats couchdb_auth_cache_hits_current=0, -couchdb_auth_cache_hits_max=0, -couchdb_auth_cache_hits_mean=0, -couchdb_auth_cache_hits_min=0, -couchdb_auth_cache_hits_stddev=0, -couchdb_auth_cache_hits_sum=0, -couchdb_auth_cache_misses_current=0, -couchdb_auth_cache_misses_max=0, -couchdb_auth_cache_misses_mean=0, -couchdb_auth_cache_misses_min=0, -couchdb_auth_cache_misses_stddev=0, -couchdb_auth_cache_misses_sum=0, -couchdb_database_reads_current=0, -couchdb_database_reads_max=0, -couchdb_database_reads_mean=0, -couchdb_database_reads_min=0, -couchdb_database_reads_stddev=0, -couchdb_database_reads_sum=0, -couchdb_database_writes_current=1102, -couchdb_database_writes_max=131, -couchdb_database_writes_mean=0.116, -couchdb_database_writes_min=0, -couchdb_database_writes_stddev=3.536, -couchdb_database_writes_sum=1102, -couchdb_open_databases_current=1, -couchdb_open_databases_max=1, -couchdb_open_databases_mean=0, -couchdb_open_databases_min=0, -couchdb_open_databases_stddev=0.01, -couchdb_open_databases_sum=1, -couchdb_open_os_files_current=2, -couchdb_open_os_files_max=2, -couchdb_open_os_files_mean=0, -couchdb_open_os_files_min=0, -couchdb_open_os_files_stddev=0.02, -couchdb_open_os_files_sum=2, -couchdb_request_time_current=242.21, -couchdb_request_time_max=102, -couchdb_request_time_mean=5.767, -couchdb_request_time_min=1, -couchdb_request_time_stddev=17.369, -couchdb_request_time_sum=242.21, -httpd_bulk_requests_current=0, -httpd_bulk_requests_max=0, -httpd_bulk_requests_mean=0, -httpd_bulk_requests_min=0, -httpd_bulk_requests_stddev=0, -httpd_bulk_requests_sum=0, -httpd_clients_requesting_changes_current=0, -httpd_clients_requesting_changes_max=0, -httpd_clients_requesting_changes_mean=0, -httpd_clients_requesting_changes_min=0, -httpd_clients_requesting_changes_stddev=0, -httpd_clients_requesting_changes_sum=0, -httpd_request_methods_copy_current=0, -httpd_request_methods_copy_max=0, -httpd_request_methods_copy_mean=0, -httpd_request_methods_copy_min=0, -httpd_request_methods_copy_stddev=0, -httpd_request_methods_copy_sum=0, -httpd_request_methods_delete_current=0, -httpd_request_methods_delete_max=0, -httpd_request_methods_delete_mean=0, -httpd_request_methods_delete_min=0, -httpd_request_methods_delete_stddev=0, -httpd_request_methods_delete_sum=0, -httpd_request_methods_get_current=31, -httpd_request_methods_get_max=1, -httpd_request_methods_get_mean=0.003, -httpd_request_methods_get_min=0, -httpd_request_methods_get_stddev=0.057, -httpd_request_methods_get_sum=31, -httpd_request_methods_head_current=0, -httpd_request_methods_head_max=0, -httpd_request_methods_head_mean=0, -httpd_request_methods_head_min=0, -httpd_request_methods_head_stddev=0, -httpd_request_methods_head_sum=0, -httpd_request_methods_post_current=1102, -httpd_request_methods_post_max=131, -httpd_request_methods_post_mean=0.116, -httpd_request_methods_post_min=0, -httpd_request_methods_post_stddev=3.536, -httpd_request_methods_post_sum=1102, -httpd_request_methods_put_current=1, -httpd_request_methods_put_max=1, -httpd_request_methods_put_mean=0, -httpd_request_methods_put_min=0, -httpd_request_methods_put_stddev=0.01, -httpd_request_methods_put_sum=1, -httpd_requests_current=1133, -httpd_requests_max=130, -httpd_requests_mean=0.118, -httpd_requests_min=0, -httpd_requests_stddev=3.512, -httpd_requests_sum=1133, -httpd_status_codes_200_current=31, -httpd_status_codes_200_max=1, -httpd_status_codes_200_mean=0.003, -httpd_status_codes_200_min=0, -httpd_status_codes_200_stddev=0.057, -httpd_status_codes_200_sum=31, -httpd_status_codes_201_current=1103, -httpd_status_codes_201_max=130, -httpd_status_codes_201_mean=0.116, -httpd_status_codes_201_min=0, -httpd_status_codes_201_stddev=3.532, -httpd_status_codes_201_sum=1103, -httpd_status_codes_202_current=0, -httpd_status_codes_202_max=0, -httpd_status_codes_202_mean=0, -httpd_status_codes_202_min=0, -httpd_status_codes_202_stddev=0, -httpd_status_codes_202_sum=0, -httpd_status_codes_301_current=0, -httpd_status_codes_301_max=0, -httpd_status_codes_301_mean=0, -httpd_status_codes_301_min=0, -httpd_status_codes_301_stddev=0, -httpd_status_codes_301_sum=0, -httpd_status_codes_304_current=0, -httpd_status_codes_304_max=0, -httpd_status_codes_304_mean=0, -httpd_status_codes_304_min=0, -httpd_status_codes_304_stddev=0, -httpd_status_codes_304_sum=0, -httpd_status_codes_400_current=0, -httpd_status_codes_400_max=0, -httpd_status_codes_400_mean=0, -httpd_status_codes_400_min=0, -httpd_status_codes_400_stddev=0, -httpd_status_codes_400_sum=0, -httpd_status_codes_401_current=0, -httpd_status_codes_401_max=0, -httpd_status_codes_401_mean=0, -httpd_status_codes_401_min=0, -httpd_status_codes_401_stddev=0, -httpd_status_codes_401_sum=0, -httpd_status_codes_403_current=0, -httpd_status_codes_403_max=0, -httpd_status_codes_403_mean=0, -httpd_status_codes_403_min=0, -httpd_status_codes_403_stddev=0, -httpd_status_codes_403_sum=0, -httpd_status_codes_404_current=0, -httpd_status_codes_404_max=0, -httpd_status_codes_404_mean=0, -httpd_status_codes_404_min=0, -httpd_status_codes_404_stddev=0, -httpd_status_codes_404_sum=0, -httpd_status_codes_405_current=0, -httpd_status_codes_405_max=0, -httpd_status_codes_405_mean=0, -httpd_status_codes_405_min=0, -httpd_status_codes_405_stddev=0, -httpd_status_codes_405_sum=0, -httpd_status_codes_409_current=0, -httpd_status_codes_409_max=0, -httpd_status_codes_409_mean=0, -httpd_status_codes_409_min=0, -httpd_status_codes_409_stddev=0, -httpd_status_codes_409_sum=0, -httpd_status_codes_412_current=0, -httpd_status_codes_412_max=0, -httpd_status_codes_412_mean=0, -httpd_status_codes_412_min=0, -httpd_status_codes_412_stddev=0, -httpd_status_codes_412_sum=0, -httpd_status_codes_500_current=0, -httpd_status_codes_500_max=0, -httpd_status_codes_500_mean=0, -httpd_status_codes_500_min=0, -httpd_status_codes_500_stddev=0, -httpd_status_codes_500_sum=0, -httpd_temporary_view_reads_current=0, -httpd_temporary_view_reads_max=0, -httpd_temporary_view_reads_mean=0, -httpd_temporary_view_reads_min=0, -httpd_temporary_view_reads_stddev=0, -httpd_temporary_view_reads_sum=0, -httpd_view_reads_current=0, -httpd_view_reads_max=0, -httpd_view_reads_mean=0, -httpd_view_reads_min=0, -httpd_view_reads_stddev=0, -httpd_view_reads_sum=0 1454692257621938169 +couchdb,server=http://couchdb22:5984/_node/_local/_stats couchdb_auth_cache_hits_value=0,httpd_request_methods_delete_value=0,couchdb_auth_cache_misses_value=0,httpd_request_methods_get_value=42,httpd_status_codes_304_value=0,httpd_status_codes_400_value=0,httpd_request_methods_head_value=0,httpd_status_codes_201_value=0,couchdb_database_reads_value=0,httpd_request_methods_copy_value=0,couchdb_request_time_max=0,httpd_status_codes_200_value=42,httpd_status_codes_301_value=0,couchdb_open_os_files_value=2,httpd_request_methods_put_value=0,httpd_request_methods_post_value=0,httpd_status_codes_202_value=0,httpd_status_codes_403_value=0,httpd_status_codes_409_value=0,couchdb_database_writes_value=0,couchdb_request_time_min=0,httpd_status_codes_412_value=0,httpd_status_codes_500_value=0,httpd_status_codes_401_value=0,httpd_status_codes_404_value=0,httpd_status_codes_405_value=0,couchdb_open_databases_value=0 1536707179000000000 ``` + +**Pre Couchdb 2.0** +``` +couchdb,server=http://couchdb16:5984/_stats couchdb_request_time_sum=96,httpd_status_codes_200_sum=37,httpd_status_codes_200_min=0,httpd_requests_mean=0.005,httpd_requests_min=0,couchdb_request_time_stddev=3.833,couchdb_request_time_min=1,httpd_request_methods_get_stddev=0.073,httpd_request_methods_get_min=0,httpd_status_codes_200_mean=0.005,httpd_status_codes_200_max=1,httpd_requests_sum=37,couchdb_request_time_current=96,httpd_request_methods_get_sum=37,httpd_request_methods_get_mean=0.005,httpd_request_methods_get_max=1,httpd_status_codes_200_stddev=0.073,couchdb_request_time_mean=2.595,couchdb_request_time_max=25,httpd_request_methods_get_current=37,httpd_status_codes_200_current=37,httpd_requests_current=37,httpd_requests_stddev=0.073,httpd_requests_max=1 1536707179000000000 +``` + +[_stats]: http://docs.couchdb.org/en/1.6.1/api/server/common.html?highlight=stats#get--_stats diff --git a/plugins/inputs/couchdb/couchdb.go b/plugins/inputs/couchdb/couchdb.go index da6ba67dc..1b542d042 100644 --- a/plugins/inputs/couchdb/couchdb.go +++ b/plugins/inputs/couchdb/couchdb.go @@ -3,44 +3,52 @@ package couchdb import ( "encoding/json" "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" "net/http" - "reflect" "sync" "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" ) -// Schema: -type metaData struct { - Description string `json:"description"` - Current float64 `json:"current"` - Sum float64 `json:"sum"` - Mean float64 `json:"mean"` - Stddev float64 `json:"stddev"` - Min float64 `json:"min"` - Max float64 `json:"max"` -} +type ( + metaData struct { + Current *float64 `json:"current"` + Sum *float64 `json:"sum"` + Mean *float64 `json:"mean"` + Stddev *float64 `json:"stddev"` + Min *float64 `json:"min"` + Max *float64 `json:"max"` + Value *float64 `json:"value"` + } -type Stats struct { - Couchdb struct { - AuthCacheMisses metaData `json:"auth_cache_misses"` - DatabaseWrites metaData `json:"database_writes"` - OpenDatabases metaData `json:"open_databases"` - AuthCacheHits metaData `json:"auth_cache_hits"` - RequestTime metaData `json:"request_time"` - DatabaseReads metaData `json:"database_reads"` - OpenOsFiles metaData `json:"open_os_files"` - } `json:"couchdb"` - HttpdRequestMethods struct { + oldValue struct { + Value metaData `json:"value"` + metaData + } + + couchdb struct { + AuthCacheHits metaData `json:"auth_cache_hits"` + AuthCacheMisses metaData `json:"auth_cache_misses"` + DatabaseWrites metaData `json:"database_writes"` + DatabaseReads metaData `json:"database_reads"` + OpenDatabases metaData `json:"open_databases"` + OpenOsFiles metaData `json:"open_os_files"` + RequestTime oldValue `json:"request_time"` + HttpdRequestMethods httpdRequestMethods `json:"httpd_request_methods"` + HttpdStatusCodes httpdStatusCodes `json:"httpd_status_codes"` + } + + httpdRequestMethods struct { Put metaData `json:"PUT"` Get metaData `json:"GET"` Copy metaData `json:"COPY"` Delete metaData `json:"DELETE"` Post metaData `json:"POST"` Head metaData `json:"HEAD"` - } `json:"httpd_request_methods"` - HttpdStatusCodes struct { + } + + httpdStatusCodes struct { Status200 metaData `json:"200"` Status201 metaData `json:"201"` Status202 metaData `json:"202"` @@ -54,19 +62,31 @@ type Stats struct { Status409 metaData `json:"409"` Status412 metaData `json:"412"` Status500 metaData `json:"500"` - } `json:"httpd_status_codes"` - Httpd struct { - ClientsRequestingChanges metaData `json:"clients_requesting_changes"` - TemporaryViewReads metaData `json:"temporary_view_reads"` - Requests metaData `json:"requests"` - BulkRequests metaData `json:"bulk_requests"` - ViewReads metaData `json:"view_reads"` - } `json:"httpd"` -} + } -type CouchDB struct { - HOSTs []string `toml:"hosts"` -} + httpd struct { + BulkRequests metaData `json:"bulk_requests"` + Requests metaData `json:"requests"` + TemporaryViewReads metaData `json:"temporary_view_reads"` + ViewReads metaData `json:"view_reads"` + ClientsRequestingChanges metaData `json:"clients_requesting_changes"` + } + + Stats struct { + Couchdb couchdb `json:"couchdb"` + HttpdRequestMethods httpdRequestMethods `json:"httpd_request_methods"` + HttpdStatusCodes httpdStatusCodes `json:"httpd_status_codes"` + Httpd httpd `json:"httpd"` + } + + CouchDB struct { + Hosts []string `toml:"hosts"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + + client *http.Client + } +) func (*CouchDB) Description() string { return "Read CouchDB Stats from one or more servers" @@ -75,14 +95,18 @@ func (*CouchDB) Description() string { func (*CouchDB) SampleConfig() string { return ` ## Works with CouchDB stats endpoints out of the box - ## Multiple HOSTs from which to read CouchDB stats: + ## Multiple Hosts from which to read CouchDB stats: hosts = ["http://localhost:8086/_stats"] + + ## Use HTTP Basic Authentication. + # basic_username = "telegraf" + # basic_password = "p@ssw0rd" ` } func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error { var wg sync.WaitGroup - for _, u := range c.HOSTs { + for _, u := range c.Hosts { wg.Add(1) go func(host string) { defer wg.Done() @@ -97,67 +121,135 @@ func (c *CouchDB) Gather(accumulator telegraf.Accumulator) error { return nil } -var tr = &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), -} - -var client = &http.Client{ - Transport: tr, - Timeout: time.Duration(4 * time.Second), -} - func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host string) error { + if c.client == nil { + c.client = &http.Client{ + Transport: &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), + }, + Timeout: time.Duration(4 * time.Second), + } + } - response, error := client.Get(host) + req, err := http.NewRequest("GET", host, nil) + if err != nil { + return err + } + + if c.BasicUsername != "" || c.BasicPassword != "" { + req.SetBasicAuth(c.BasicUsername, c.BasicPassword) + } + + response, error := c.client.Do(req) if error != nil { return error } defer response.Body.Close() - var stats Stats + if response.StatusCode != 200 { + return fmt.Errorf("Failed to get stats from couchdb: HTTP responded %d", response.StatusCode) + } + + stats := Stats{} decoder := json.NewDecoder(response.Body) decoder.Decode(&stats) fields := map[string]interface{}{} + // for couchdb 2.0 API changes + requestTime := metaData{ + Current: stats.Couchdb.RequestTime.Current, + Sum: stats.Couchdb.RequestTime.Sum, + Mean: stats.Couchdb.RequestTime.Mean, + Stddev: stats.Couchdb.RequestTime.Stddev, + Min: stats.Couchdb.RequestTime.Min, + Max: stats.Couchdb.RequestTime.Max, + } + + httpdRequestMethodsPut := stats.HttpdRequestMethods.Put + httpdRequestMethodsGet := stats.HttpdRequestMethods.Get + httpdRequestMethodsCopy := stats.HttpdRequestMethods.Copy + httpdRequestMethodsDelete := stats.HttpdRequestMethods.Delete + httpdRequestMethodsPost := stats.HttpdRequestMethods.Post + httpdRequestMethodsHead := stats.HttpdRequestMethods.Head + + httpdStatusCodesStatus200 := stats.HttpdStatusCodes.Status200 + httpdStatusCodesStatus201 := stats.HttpdStatusCodes.Status201 + httpdStatusCodesStatus202 := stats.HttpdStatusCodes.Status202 + httpdStatusCodesStatus301 := stats.HttpdStatusCodes.Status301 + httpdStatusCodesStatus304 := stats.HttpdStatusCodes.Status304 + httpdStatusCodesStatus400 := stats.HttpdStatusCodes.Status400 + httpdStatusCodesStatus401 := stats.HttpdStatusCodes.Status401 + httpdStatusCodesStatus403 := stats.HttpdStatusCodes.Status403 + httpdStatusCodesStatus404 := stats.HttpdStatusCodes.Status404 + httpdStatusCodesStatus405 := stats.HttpdStatusCodes.Status405 + httpdStatusCodesStatus409 := stats.HttpdStatusCodes.Status409 + httpdStatusCodesStatus412 := stats.HttpdStatusCodes.Status412 + httpdStatusCodesStatus500 := stats.HttpdStatusCodes.Status500 + // check if couchdb2.0 is used + if stats.Couchdb.HttpdRequestMethods.Get.Value != nil { + requestTime = stats.Couchdb.RequestTime.Value + + httpdRequestMethodsPut = stats.Couchdb.HttpdRequestMethods.Put + httpdRequestMethodsGet = stats.Couchdb.HttpdRequestMethods.Get + httpdRequestMethodsCopy = stats.Couchdb.HttpdRequestMethods.Copy + httpdRequestMethodsDelete = stats.Couchdb.HttpdRequestMethods.Delete + httpdRequestMethodsPost = stats.Couchdb.HttpdRequestMethods.Post + httpdRequestMethodsHead = stats.Couchdb.HttpdRequestMethods.Head + + httpdStatusCodesStatus200 = stats.Couchdb.HttpdStatusCodes.Status200 + httpdStatusCodesStatus201 = stats.Couchdb.HttpdStatusCodes.Status201 + httpdStatusCodesStatus202 = stats.Couchdb.HttpdStatusCodes.Status202 + httpdStatusCodesStatus301 = stats.Couchdb.HttpdStatusCodes.Status301 + httpdStatusCodesStatus304 = stats.Couchdb.HttpdStatusCodes.Status304 + httpdStatusCodesStatus400 = stats.Couchdb.HttpdStatusCodes.Status400 + httpdStatusCodesStatus401 = stats.Couchdb.HttpdStatusCodes.Status401 + httpdStatusCodesStatus403 = stats.Couchdb.HttpdStatusCodes.Status403 + httpdStatusCodesStatus404 = stats.Couchdb.HttpdStatusCodes.Status404 + httpdStatusCodesStatus405 = stats.Couchdb.HttpdStatusCodes.Status405 + httpdStatusCodesStatus409 = stats.Couchdb.HttpdStatusCodes.Status409 + httpdStatusCodesStatus412 = stats.Couchdb.HttpdStatusCodes.Status412 + httpdStatusCodesStatus500 = stats.Couchdb.HttpdStatusCodes.Status500 + } + // CouchDB meta stats: - c.MapCopy(fields, c.generateFields("couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses)) - c.MapCopy(fields, c.generateFields("couchdb_database_writes", stats.Couchdb.DatabaseWrites)) - c.MapCopy(fields, c.generateFields("couchdb_open_databases", stats.Couchdb.OpenDatabases)) - c.MapCopy(fields, c.generateFields("couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits)) - c.MapCopy(fields, c.generateFields("couchdb_request_time", stats.Couchdb.RequestTime)) - c.MapCopy(fields, c.generateFields("couchdb_database_reads", stats.Couchdb.DatabaseReads)) - c.MapCopy(fields, c.generateFields("couchdb_open_os_files", stats.Couchdb.OpenOsFiles)) + c.generateFields(fields, "couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses) + c.generateFields(fields, "couchdb_database_writes", stats.Couchdb.DatabaseWrites) + c.generateFields(fields, "couchdb_open_databases", stats.Couchdb.OpenDatabases) + c.generateFields(fields, "couchdb_auth_cache_hits", stats.Couchdb.AuthCacheHits) + c.generateFields(fields, "couchdb_request_time", requestTime) + c.generateFields(fields, "couchdb_database_reads", stats.Couchdb.DatabaseReads) + c.generateFields(fields, "couchdb_open_os_files", stats.Couchdb.OpenOsFiles) // http request methods stats: - c.MapCopy(fields, c.generateFields("httpd_request_methods_put", stats.HttpdRequestMethods.Put)) - c.MapCopy(fields, c.generateFields("httpd_request_methods_get", stats.HttpdRequestMethods.Get)) - c.MapCopy(fields, c.generateFields("httpd_request_methods_copy", stats.HttpdRequestMethods.Copy)) - c.MapCopy(fields, c.generateFields("httpd_request_methods_delete", stats.HttpdRequestMethods.Delete)) - c.MapCopy(fields, c.generateFields("httpd_request_methods_post", stats.HttpdRequestMethods.Post)) - c.MapCopy(fields, c.generateFields("httpd_request_methods_head", stats.HttpdRequestMethods.Head)) + c.generateFields(fields, "httpd_request_methods_put", httpdRequestMethodsPut) + c.generateFields(fields, "httpd_request_methods_get", httpdRequestMethodsGet) + c.generateFields(fields, "httpd_request_methods_copy", httpdRequestMethodsCopy) + c.generateFields(fields, "httpd_request_methods_delete", httpdRequestMethodsDelete) + c.generateFields(fields, "httpd_request_methods_post", httpdRequestMethodsPost) + c.generateFields(fields, "httpd_request_methods_head", httpdRequestMethodsHead) // status code stats: - c.MapCopy(fields, c.generateFields("httpd_status_codes_200", stats.HttpdStatusCodes.Status200)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_201", stats.HttpdStatusCodes.Status201)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_202", stats.HttpdStatusCodes.Status202)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_301", stats.HttpdStatusCodes.Status301)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_304", stats.HttpdStatusCodes.Status304)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_400", stats.HttpdStatusCodes.Status400)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_401", stats.HttpdStatusCodes.Status401)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_403", stats.HttpdStatusCodes.Status403)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_404", stats.HttpdStatusCodes.Status404)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_405", stats.HttpdStatusCodes.Status405)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_409", stats.HttpdStatusCodes.Status409)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_412", stats.HttpdStatusCodes.Status412)) - c.MapCopy(fields, c.generateFields("httpd_status_codes_500", stats.HttpdStatusCodes.Status500)) + c.generateFields(fields, "httpd_status_codes_200", httpdStatusCodesStatus200) + c.generateFields(fields, "httpd_status_codes_201", httpdStatusCodesStatus201) + c.generateFields(fields, "httpd_status_codes_202", httpdStatusCodesStatus202) + c.generateFields(fields, "httpd_status_codes_301", httpdStatusCodesStatus301) + c.generateFields(fields, "httpd_status_codes_304", httpdStatusCodesStatus304) + c.generateFields(fields, "httpd_status_codes_400", httpdStatusCodesStatus400) + c.generateFields(fields, "httpd_status_codes_401", httpdStatusCodesStatus401) + c.generateFields(fields, "httpd_status_codes_403", httpdStatusCodesStatus403) + c.generateFields(fields, "httpd_status_codes_404", httpdStatusCodesStatus404) + c.generateFields(fields, "httpd_status_codes_405", httpdStatusCodesStatus405) + c.generateFields(fields, "httpd_status_codes_409", httpdStatusCodesStatus409) + c.generateFields(fields, "httpd_status_codes_412", httpdStatusCodesStatus412) + c.generateFields(fields, "httpd_status_codes_500", httpdStatusCodesStatus500) // httpd stats: - c.MapCopy(fields, c.generateFields("httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges)) - c.MapCopy(fields, c.generateFields("httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads)) - c.MapCopy(fields, c.generateFields("httpd_requests", stats.Httpd.Requests)) - c.MapCopy(fields, c.generateFields("httpd_bulk_requests", stats.Httpd.BulkRequests)) - c.MapCopy(fields, c.generateFields("httpd_view_reads", stats.Httpd.ViewReads)) + c.generateFields(fields, "httpd_clients_requesting_changes", stats.Httpd.ClientsRequestingChanges) + c.generateFields(fields, "httpd_temporary_view_reads", stats.Httpd.TemporaryViewReads) + c.generateFields(fields, "httpd_requests", stats.Httpd.Requests) + c.generateFields(fields, "httpd_bulk_requests", stats.Httpd.BulkRequests) + c.generateFields(fields, "httpd_view_reads", stats.Httpd.ViewReads) tags := map[string]string{ "server": host, @@ -166,34 +258,39 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri return nil } -func (*CouchDB) MapCopy(dst, src interface{}) { - dv, sv := reflect.ValueOf(dst), reflect.ValueOf(src) - for _, k := range sv.MapKeys() { - dv.SetMapIndex(k, sv.MapIndex(k)) +func (c *CouchDB) generateFields(fields map[string]interface{}, prefix string, obj metaData) { + if obj.Value != nil { + fields[prefix+"_value"] = *obj.Value } -} - -func (*CouchDB) safeCheck(value interface{}) interface{} { - if value == nil { - return 0.0 + if obj.Current != nil { + fields[prefix+"_current"] = *obj.Current } - return value -} - -func (c *CouchDB) generateFields(prefix string, obj metaData) map[string]interface{} { - fields := map[string]interface{}{ - prefix + "_current": c.safeCheck(obj.Current), - prefix + "_sum": c.safeCheck(obj.Sum), - prefix + "_mean": c.safeCheck(obj.Mean), - prefix + "_stddev": c.safeCheck(obj.Stddev), - prefix + "_min": c.safeCheck(obj.Min), - prefix + "_max": c.safeCheck(obj.Max), + if obj.Sum != nil { + fields[prefix+"_sum"] = *obj.Sum + } + if obj.Mean != nil { + fields[prefix+"_mean"] = *obj.Mean + } + if obj.Stddev != nil { + fields[prefix+"_stddev"] = *obj.Stddev + } + if obj.Min != nil { + fields[prefix+"_min"] = *obj.Min + } + if obj.Max != nil { + fields[prefix+"_max"] = *obj.Max } - return fields } func init() { inputs.Add("couchdb", func() telegraf.Input { - return &CouchDB{} + return &CouchDB{ + client: &http.Client{ + Transport: &http.Transport{ + ResponseHeaderTimeout: time.Duration(3 * time.Second), + }, + Timeout: time.Duration(4 * time.Second), + }, + } }) } diff --git a/plugins/inputs/couchdb/couchdb_test.go b/plugins/inputs/couchdb/couchdb_test.go index 4c0370852..933d6cc8d 100644 --- a/plugins/inputs/couchdb/couchdb_test.go +++ b/plugins/inputs/couchdb/couchdb_test.go @@ -1,12 +1,13 @@ package couchdb_test import ( - "github.com/influxdata/telegraf/plugins/inputs/couchdb" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" "net/http" "net/http/httptest" "testing" + + "github.com/influxdata/telegraf/plugins/inputs/couchdb" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) func TestBasic(t *testing.T) { @@ -312,7 +313,7 @@ func TestBasic(t *testing.T) { defer fakeServer.Close() plugin := &couchdb.CouchDB{ - HOSTs: []string{fakeServer.URL + "/_stats"}, + Hosts: []string{fakeServer.URL + "/_stats"}, } var acc testutil.Accumulator diff --git a/plugins/inputs/couchdb/dev/telegraf.conf b/plugins/inputs/couchdb/dev/telegraf.conf new file mode 100644 index 000000000..30366e922 --- /dev/null +++ b/plugins/inputs/couchdb/dev/telegraf.conf @@ -0,0 +1,9 @@ +[agent] + interval="1s" + flush_interval="1s" + +[[inputs.couchdb]] + hosts = ["http://couchdb16:5984/_stats", "http://couchdb22:5984/_node/_local/_stats"] + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/cpu/README.md b/plugins/inputs/cpu/README.md new file mode 100644 index 000000000..bc86ae898 --- /dev/null +++ b/plugins/inputs/cpu/README.md @@ -0,0 +1,67 @@ +# CPU Input Plugin + +The `cpu` plugin gather metrics on the system CPUs. + +#### Configuration +```toml +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics. + collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states. + report_active = false +``` + +### Metrics + +On Linux, consult `man proc` for details on the meanings of these values. + +- cpu + - tags: + - cpu (CPU ID or `cpu-total`) + - fields: + - time_user (float) + - time_system (float) + - time_idle (float) + - time_active (float) + - time_nice (float) + - time_iowait (float) + - time_irq (float) + - time_softirq (float) + - time_steal (float) + - time_guest (float) + - time_guest_nice (float) + - usage_user (float, percent) + - usage_system (float, percent) + - usage_idle (float, percent) + - usage_active (float) + - usage_nice (float, percent) + - usage_iowait (float, percent) + - usage_irq (float, percent) + - usage_softirq (float, percent) + - usage_steal (float, percent) + - usage_guest (float, percent) + - usage_guest_nice (float, percent) + +### Troubleshooting + +On Linux systems the `/proc/stat` file is used to gather CPU times. +Percentages are based on the last 2 samples. + +### Example Output + +``` +cpu,cpu=cpu0,host=loaner time_active=202224.15999999992,time_guest=30250.35,time_guest_nice=0,time_idle=1527035.04,time_iowait=1352,time_irq=0,time_nice=169.28,time_softirq=6281.4,time_steal=0,time_system=40097.14,time_user=154324.34 1568760922000000000 +cpu,cpu=cpu0,host=loaner usage_active=31.249999981810106,usage_guest=2.083333333080696,usage_guest_nice=0,usage_idle=68.7500000181899,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.166666666161392,usage_user=25.000000002273737 1568760922000000000 +cpu,cpu=cpu1,host=loaner time_active=201890.02000000002,time_guest=30508.41,time_guest_nice=0,time_idle=264641.18,time_iowait=210.44,time_irq=0,time_nice=181.75,time_softirq=4537.88,time_steal=0,time_system=39480.7,time_user=157479.25 1568760922000000000 +cpu,cpu=cpu1,host=loaner usage_active=12.500000010610771,usage_guest=2.0833333328280585,usage_guest_nice=0,usage_idle=87.49999998938922,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=2.0833333332070145,usage_steal=0,usage_system=4.166666665656117,usage_user=4.166666666414029 1568760922000000000 +cpu,cpu=cpu2,host=loaner time_active=201382.78999999998,time_guest=30325.8,time_guest_nice=0,time_idle=264686.63,time_iowait=202.77,time_irq=0,time_nice=162.81,time_softirq=3378.34,time_steal=0,time_system=39270.59,time_user=158368.28 1568760922000000000 +cpu,cpu=cpu2,host=loaner usage_active=15.999999993480742,usage_guest=1.9999999999126885,usage_guest_nice=0,usage_idle=84.00000000651926,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=2.0000000002764864,usage_steal=0,usage_system=3.999999999825377,usage_user=7.999999998923158 1568760922000000000 +cpu,cpu=cpu3,host=loaner time_active=198953.51000000007,time_guest=30344.43,time_guest_nice=0,time_idle=265504.09,time_iowait=187.64,time_irq=0,time_nice=197.47,time_softirq=2301.47,time_steal=0,time_system=39313.73,time_user=156953.2 1568760922000000000 +cpu,cpu=cpu3,host=loaner usage_active=10.41666667424579,usage_guest=0,usage_guest_nice=0,usage_idle=89.58333332575421,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.166666666666667,usage_user=6.249999998484175 1568760922000000000 +cpu,cpu=cpu-total,host=loaner time_active=804450.5299999998,time_guest=121429,time_guest_nice=0,time_idle=2321866.96,time_iowait=1952.86,time_irq=0,time_nice=711.32,time_softirq=16499.1,time_steal=0,time_system=158162.17,time_user=627125.08 1568760922000000000 +cpu,cpu=cpu-total,host=loaner usage_active=17.616580305880305,usage_guest=1.036269430422946,usage_guest_nice=0,usage_idle=82.3834196941197,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=1.0362694300459534,usage_steal=0,usage_system=4.145077721691784,usage_user=11.398963731636465 1568760922000000000 +``` diff --git a/plugins/inputs/system/cpu.go b/plugins/inputs/cpu/cpu.go similarity index 95% rename from plugins/inputs/system/cpu.go rename to plugins/inputs/cpu/cpu.go index 99fa451b3..e073309e4 100644 --- a/plugins/inputs/system/cpu.go +++ b/plugins/inputs/cpu/cpu.go @@ -1,4 +1,4 @@ -package system +package cpu import ( "fmt" @@ -6,11 +6,12 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/shirou/gopsutil/cpu" ) type CPUStats struct { - ps PS + ps system.PS lastStats map[string]cpu.TimesStat PerCPU bool `toml:"percpu"` @@ -19,7 +20,7 @@ type CPUStats struct { ReportActive bool `toml:"report_active"` } -func NewCPUStats(ps PS) *CPUStats { +func NewCPUStats(ps system.PS) *CPUStats { return &CPUStats{ ps: ps, CollectCPUTime: true, @@ -146,7 +147,7 @@ func init() { return &CPUStats{ PerCPU: true, TotalCPU: true, - ps: newSystemPS(), + ps: system.NewSystemPS(), } }) } diff --git a/plugins/inputs/system/cpu_test.go b/plugins/inputs/cpu/cpu_test.go similarity index 94% rename from plugins/inputs/system/cpu_test.go rename to plugins/inputs/cpu/cpu_test.go index 43825fca7..bf356ec7b 100644 --- a/plugins/inputs/system/cpu_test.go +++ b/plugins/inputs/cpu/cpu_test.go @@ -1,9 +1,10 @@ -package system +package cpu import ( "fmt" "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/cpu" "github.com/stretchr/testify/assert" @@ -11,7 +12,7 @@ import ( ) func TestCPUStats(t *testing.T) { - var mps MockPS + var mps system.MockPS defer mps.AssertExpectations(t) var acc testutil.Accumulator @@ -54,7 +55,7 @@ func TestCPUStats(t *testing.T) { err := cs.Gather(&acc) require.NoError(t, err) - // Computed values are checked with delta > 0 because of floating point arithmatic + // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 8.8, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_system", 8.2, 0, cputags) @@ -68,7 +69,7 @@ func TestCPUStats(t *testing.T) { assertContainsTaggedFloat(t, &acc, "cpu", "time_guest", 3.1, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_guest_nice", 0.324, 0, cputags) - mps2 := MockPS{} + mps2 := system.MockPS{} mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) cs.ps = &mps2 @@ -101,7 +102,7 @@ func TestCPUStats(t *testing.T) { assertContainsTaggedFloat(t, &acc, "cpu", "usage_guest_nice", 2.2, 0.0005, cputags) } -// Asserts that a given accumulator contains a measurment of type float64 with +// Asserts that a given accumulator contains a measurement of type float64 with // specific tags within a certain distance of a given expected value. Asserts a failure // if the measurement is of the wrong type, or if no matching measurements are found // @@ -112,7 +113,7 @@ func TestCPUStats(t *testing.T) { // expectedValue float64 : Value to search for within the measurement // delta float64 : Maximum acceptable distance of an accumulated value // from the expectedValue parameter. Useful when -// floating-point arithmatic imprecision makes looking +// floating-point arithmetic imprecision makes looking // for an exact match impractical // tags map[string]string : Tag set the found measurement must have. Set to nil to // ignore the tag set. @@ -153,8 +154,8 @@ func assertContainsTaggedFloat( // TestCPUCountChange tests that no errors are encountered if the number of // CPUs increases as reported with LXC. func TestCPUCountIncrease(t *testing.T) { - var mps MockPS - var mps2 MockPS + var mps system.MockPS + var mps2 system.MockPS var acc testutil.Accumulator var err error @@ -162,7 +163,7 @@ func TestCPUCountIncrease(t *testing.T) { mps.On("CPUTimes").Return( []cpu.TimesStat{ - cpu.TimesStat{ + { CPU: "cpu0", }, }, nil) @@ -172,10 +173,10 @@ func TestCPUCountIncrease(t *testing.T) { mps2.On("CPUTimes").Return( []cpu.TimesStat{ - cpu.TimesStat{ + { CPU: "cpu0", }, - cpu.TimesStat{ + { CPU: "cpu1", }, }, nil) @@ -188,7 +189,7 @@ func TestCPUCountIncrease(t *testing.T) { // TestCPUTimesDecrease tests that telegraf continue to works after // CPU times decrease, which seems to occur when Linux system is suspended. func TestCPUTimesDecrease(t *testing.T) { - var mps MockPS + var mps system.MockPS defer mps.AssertExpectations(t) var acc testutil.Accumulator @@ -224,13 +225,13 @@ func TestCPUTimesDecrease(t *testing.T) { err := cs.Gather(&acc) require.NoError(t, err) - // Computed values are checked with delta > 0 because of floating point arithmatic + // Computed values are checked with delta > 0 because of floating point arithmetic // imprecision assertContainsTaggedFloat(t, &acc, "cpu", "time_user", 18, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_idle", 80, 0, cputags) assertContainsTaggedFloat(t, &acc, "cpu", "time_iowait", 2, 0, cputags) - mps2 := MockPS{} + mps2 := system.MockPS{} mps2.On("CPUTimes").Return([]cpu.TimesStat{cts2}, nil) cs.ps = &mps2 @@ -238,7 +239,7 @@ func TestCPUTimesDecrease(t *testing.T) { err = cs.Gather(&acc) require.Error(t, err) - mps3 := MockPS{} + mps3 := system.MockPS{} mps3.On("CPUTimes").Return([]cpu.TimesStat{cts3}, nil) cs.ps = &mps3 diff --git a/plugins/inputs/dcos/client_test.go b/plugins/inputs/dcos/client_test.go index 1b563c63f..7d154a43e 100644 --- a/plugins/inputs/dcos/client_test.go +++ b/plugins/inputs/dcos/client_test.go @@ -115,8 +115,8 @@ func TestGetSummary(t *testing.T) { expectedValue: &Summary{ Cluster: "a", Slaves: []Slave{ - Slave{ID: "a"}, - Slave{ID: "b"}, + {ID: "a"}, + {ID: "b"}, }, }, expectedError: nil, diff --git a/plugins/inputs/dcos/dcos_test.go b/plugins/inputs/dcos/dcos_test.go index 6a76f7b64..3914fa577 100644 --- a/plugins/inputs/dcos/dcos_test.go +++ b/plugins/inputs/dcos/dcos_test.go @@ -385,8 +385,8 @@ func TestGatherFilterNode(t *testing.T) { return &Summary{ Cluster: "a", Slaves: []Slave{ - Slave{ID: "x"}, - Slave{ID: "y"}, + {ID: "x"}, + {ID: "y"}, }, }, nil }, diff --git a/plugins/inputs/system/DISK_README.md b/plugins/inputs/disk/README.md similarity index 76% rename from plugins/inputs/system/DISK_README.md rename to plugins/inputs/disk/README.md index fa84264be..b0a8ac05a 100644 --- a/plugins/inputs/system/DISK_README.md +++ b/plugins/inputs/disk/README.md @@ -9,14 +9,13 @@ https://en.wikipedia.org/wiki/Df_(Unix) for more details. ### Configuration: ```toml -# Read metrics about disk usage by mount point [[inputs.disk]] ## By default stats will be gathered for all mount points. ## Set mount_points will restrict the stats to only the specified mount points. # mount_points = ["/"] ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] ``` #### Docker container @@ -49,6 +48,22 @@ docker run -v /:/hostfs:ro -e HOST_MOUNT_PREFIX=/hostfs -e HOST_PROC=/hostfs/pro - inodes_total (integer, files) - inodes_used (integer, files) +### Troubleshooting + +On Linux, the list of disks is taken from the `/proc/self/mounts` file and a +[statfs] call is made on the second column. If any expected filesystems are +missing ensure that the `telegraf` user can read these files: +``` +$ sudo -u telegraf cat /proc/self/mounts | grep sda2 +/dev/sda2 /home ext4 rw,relatime,data=ordered 0 0 +$ sudo -u telegraf stat /home +``` + +It may be desired to use POSIX ACLs to provide additional access: +``` +sudo setfacl -R -m u:telegraf:X /var/lib/docker/volumes/ +``` + ### Example Output: ``` @@ -58,4 +73,4 @@ disk,fstype=autofs,mode=rw,path=/net free=0i,inodes_free=0i,inodes_total=0i,inod disk,fstype=autofs,mode=rw,path=/home free=0i,inodes_free=0i,inodes_total=0i,inodes_used=0i,total=0i,used=0i,used_percent=0 1453832006274169688 ``` - +[statfs]: http://man7.org/linux/man-pages/man2/statfs.2.html diff --git a/plugins/inputs/system/disk.go b/plugins/inputs/disk/disk.go similarity index 88% rename from plugins/inputs/system/disk.go rename to plugins/inputs/disk/disk.go index 172261560..b2c7e5400 100644 --- a/plugins/inputs/system/disk.go +++ b/plugins/inputs/disk/disk.go @@ -1,4 +1,4 @@ -package system +package disk import ( "fmt" @@ -6,15 +6,16 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) type DiskStats struct { - ps PS + ps system.PS // Legacy support - Mountpoints []string + Mountpoints []string `toml:"mountpoints"` - MountPoints []string + MountPoints []string `toml:"mount_points"` IgnoreFS []string `toml:"ignore_fs"` } @@ -28,7 +29,7 @@ var diskSampleConfig = ` # mount_points = ["/"] ## Ignore mount points by filesystem type. - ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] + ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] ` func (_ *DiskStats) SampleConfig() string { @@ -105,7 +106,7 @@ func parseOptions(opts string) MountOptions { } func init() { - ps := newSystemPS() + ps := system.NewSystemPS() inputs.Add("disk", func() telegraf.Input { return &DiskStats{ps: ps} }) diff --git a/plugins/inputs/system/disk_test.go b/plugins/inputs/disk/disk_test.go similarity index 96% rename from plugins/inputs/system/disk_test.go rename to plugins/inputs/disk/disk_test.go index 938ca1b06..aeb2ae92b 100644 --- a/plugins/inputs/system/disk_test.go +++ b/plugins/inputs/disk/disk_test.go @@ -1,9 +1,10 @@ -package system +package disk import ( "os" "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/disk" "github.com/stretchr/testify/assert" @@ -17,7 +18,7 @@ type MockFileInfo struct { func TestDiskUsage(t *testing.T) { mck := &mock.Mock{} - mps := MockPSDisk{&systemPS{&mockDiskUsage{mck}}, mck} + mps := system.MockPSDisk{SystemPS: &system.SystemPS{PSDiskDeps: &system.MockDiskUsage{Mock: mck}}, Mock: mck} defer mps.AssertExpectations(t) var acc testutil.Accumulator @@ -137,7 +138,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, }, usageStats: []*disk.UsageStat{ - &disk.UsageStat{ + { Path: "/", Total: 42, }, @@ -169,7 +170,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, }, usageStats: []*disk.UsageStat{ - &disk.UsageStat{ + { Path: "/hostfs/var", Total: 42, }, @@ -202,7 +203,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { }, }, usageStats: []*disk.UsageStat{ - &disk.UsageStat{ + { Path: "/hostfs", Total: 42, }, @@ -229,7 +230,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mck := &mock.Mock{} - mps := MockPSDisk{&systemPS{&mockDiskUsage{mck}}, mck} + mps := system.MockPSDisk{SystemPS: &system.SystemPS{PSDiskDeps: &system.MockDiskUsage{Mock: mck}}, Mock: mck} defer mps.AssertExpectations(t) var acc testutil.Accumulator @@ -252,7 +253,7 @@ func TestDiskUsageHostMountPrefix(t *testing.T) { } func TestDiskStats(t *testing.T) { - var mps MockPS + var mps system.MockPS defer mps.AssertExpectations(t) var acc testutil.Accumulator var err error diff --git a/plugins/inputs/system/DISKIO_README.md b/plugins/inputs/diskio/README.md similarity index 73% rename from plugins/inputs/system/DISKIO_README.md rename to plugins/inputs/diskio/README.md index 3cec5cf55..11e68d696 100644 --- a/plugins/inputs/system/DISKIO_README.md +++ b/plugins/inputs/diskio/README.md @@ -19,6 +19,8 @@ The diskio input plugin gathers metrics about disk traffic and timing. ## Currently only Linux is supported via udev properties. You can view ## available properties for a device by running: ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] # ## Using the same metadata source as device_tags, you can also customize the @@ -62,6 +64,8 @@ docker run --privileged -v /:/hostfs:ro -v /run/udev:/run/udev:ro -e HOST_PROC=/ - io_time (integer, counter, milliseconds) - weighted_io_time (integer, counter, milliseconds) - iops_in_progress (integer, gauge) + - merged_reads (integer, counter) + - merged_writes (integer, counter) On linux these values correspond to the values in [`/proc/diskstats`](https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats) @@ -103,6 +107,13 @@ This value counts the number of I/O requests that have been issued to the device driver but have not yet completed. It does not include I/O requests that are in the queue but not yet issued to the device driver. +#### `merged_reads` & `merged_writes`: + +Reads and writes which are adjacent to each other may be merged for +efficiency. Thus two 4K reads may become one 8K read before it is +ultimately handed to the disk, and so it will be counted (and queued) +as only one I/O. These fields lets you know how often this was done. + ### Sample Queries: #### Calculate percent IO utilization per disk and host: @@ -113,17 +124,14 @@ SELECT non_negative_derivative(last("io_time"),1ms) FROM "diskio" WHERE time > n #### Calculate average queue depth: `iops_in_progress` will give you an instantaneous value. This will give you the average between polling intervals. ``` -SELECT non_negative_derivative(last("weighted_io_time",1ms)) from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) +SELECT non_negative_derivative(last("weighted_io_time"),1ms) from "diskio" WHERE time > now() - 30m GROUP BY "host","name",time(60s) ``` ### Example Output: ``` -diskio,name=sda weighted_io_time=8411917i,read_time=7446444i,write_time=971489i,io_time=866197i,write_bytes=5397686272i,iops_in_progress=0i,reads=2970519i,writes=361139i,read_bytes=119528903168i 1502467254359000000 -diskio,name=sda1 reads=2149i,read_bytes=10753536i,write_bytes=20697088i,write_time=346i,weighted_io_time=505i,writes=2110i,read_time=161i,io_time=208i,iops_in_progress=0i 1502467254359000000 -diskio,name=sda2 reads=2968279i,writes=359029i,write_bytes=5376989184i,iops_in_progress=0i,weighted_io_time=8411250i,read_bytes=119517334528i,read_time=7446249i,write_time=971143i,io_time=866010i 1502467254359000000 -diskio,name=sdb writes=99391856i,write_time=466700894i,io_time=630259874i,weighted_io_time=4245949844i,reads=2750773828i,read_bytes=80667939499008i,write_bytes=6329347096576i,read_time=3783042534i,iops_in_progress=2i 1502467254359000000 -diskio,name=centos/root read_time=7472461i,write_time=950014i,iops_in_progress=0i,weighted_io_time=8424447i,writes=298543i,read_bytes=119510105088i,io_time=837421i,reads=2971769i,write_bytes=5192795648i 1502467254359000000 -diskio,name=centos/var_log reads=1065i,writes=69711i,read_time=1083i,write_time=35376i,read_bytes=6828032i,write_bytes=184193536i,io_time=29699i,iops_in_progress=0i,weighted_io_time=36460i 1502467254359000000 -diskio,name=postgresql/pgsql write_time=478267417i,io_time=631098730i,iops_in_progress=2i,weighted_io_time=4263637564i,reads=2750777151i,writes=110044361i,read_bytes=80667939288064i,write_bytes=6329347096576i,read_time=3784499336i 1502467254359000000 +diskio,name=sda1 merged_reads=0i,reads=2353i,writes=10i,write_bytes=2117632i,write_time=49i,io_time=1271i,weighted_io_time=1350i,read_bytes=31350272i,read_time=1303i,iops_in_progress=0i,merged_writes=0i 1578326400000000000 +diskio,name=centos/var_log reads=1063077i,writes=591025i,read_bytes=139325491712i,write_bytes=144233131520i,read_time=650221i,write_time=24368817i,io_time=852490i,weighted_io_time=25037394i,iops_in_progress=1i,merged_reads=0i,merged_writes=0i 1578326400000000000 +diskio,name=sda write_time=49i,io_time=1317i,weighted_io_time=1404i,reads=2495i,read_time=1357i,write_bytes=2117632i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,writes=10i,read_bytes=38956544i 1578326400000000000 + ``` diff --git a/plugins/inputs/system/diskio.go b/plugins/inputs/diskio/diskio.go similarity index 75% rename from plugins/inputs/system/diskio.go rename to plugins/inputs/diskio/diskio.go index 21e70d5eb..9c1e20ebd 100644 --- a/plugins/inputs/system/diskio.go +++ b/plugins/inputs/diskio/diskio.go @@ -1,14 +1,14 @@ -package system +package diskio import ( "fmt" - "log" "regexp" "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) var ( @@ -16,13 +16,15 @@ var ( ) type DiskIO struct { - ps PS + ps system.PS Devices []string DeviceTags []string NameTemplates []string SkipSerialNumber bool + Log telegraf.Logger + infoCache map[string]diskInfoCache deviceFilter filter.Filter initialized bool @@ -45,6 +47,8 @@ var diskIOsampleConfig = ` ## Currently only Linux is supported via udev properties. You can view ## available properties for a device by running: ## 'udevadm info -q property -n /dev/sda' + ## Note: Most, but not all, udev properties can be accessed this way. Properties + ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] # ## Using the same metadata source as device_tags, you can also customize the @@ -72,7 +76,7 @@ func (s *DiskIO) init() error { if hasMeta(device) { filter, err := filter.Compile(s.Devices) if err != nil { - return fmt.Errorf("error compiling device pattern: %v", err) + return fmt.Errorf("error compiling device pattern: %s", err.Error()) } s.deviceFilter = filter } @@ -96,19 +100,36 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { diskio, err := s.ps.DiskIO(devices) if err != nil { - return fmt.Errorf("error getting disk io info: %s", err) + return fmt.Errorf("error getting disk io info: %s", err.Error()) } for _, io := range diskio { - if s.deviceFilter != nil && !s.deviceFilter.Match(io.Name) { - continue + + match := false + if s.deviceFilter != nil && s.deviceFilter.Match(io.Name) { + match = true } tags := map[string]string{} - tags["name"] = s.diskName(io.Name) + var devLinks []string + tags["name"], devLinks = s.diskName(io.Name) + + if s.deviceFilter != nil && !match { + for _, devLink := range devLinks { + if s.deviceFilter.Match(devLink) { + match = true + break + } + } + if !match { + continue + } + } + for t, v := range s.diskTags(io.Name) { tags[t] = v } + if !s.SkipSerialNumber { if len(io.SerialNumber) != 0 { tags["serial"] = io.SerialNumber @@ -127,6 +148,8 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { "io_time": io.IoTime, "weighted_io_time": io.WeightedIO, "iops_in_progress": io.IopsInProgress, + "merged_reads": io.MergedReadCount, + "merged_writes": io.MergedWriteCount, } acc.AddCounter("diskio", fields, tags) } @@ -134,15 +157,20 @@ func (s *DiskIO) Gather(acc telegraf.Accumulator) error { return nil } -func (s *DiskIO) diskName(devName string) string { - if len(s.NameTemplates) == 0 { - return devName +func (s *DiskIO) diskName(devName string) (string, []string) { + di, err := s.diskInfo(devName) + devLinks := strings.Split(di["DEVLINKS"], " ") + for i, devLink := range devLinks { + devLinks[i] = strings.TrimPrefix(devLink, "/dev/") + } + + if len(s.NameTemplates) == 0 { + return devName, devLinks } - di, err := s.diskInfo(devName) if err != nil { - log.Printf("W! Error gathering disk info: %s", err) - return devName + s.Log.Warnf("Error gathering disk info: %s", err) + return devName, devLinks } for _, nt := range s.NameTemplates { @@ -160,11 +188,11 @@ func (s *DiskIO) diskName(devName string) string { }) if !miss { - return name + return name, devLinks } } - return devName + return devName, devLinks } func (s *DiskIO) diskTags(devName string) map[string]string { @@ -174,7 +202,7 @@ func (s *DiskIO) diskTags(devName string) map[string]string { di, err := s.diskInfo(devName) if err != nil { - log.Printf("W! Error gathering disk info: %s", err) + s.Log.Warnf("Error gathering disk info: %s", err) return nil } @@ -189,7 +217,7 @@ func (s *DiskIO) diskTags(devName string) map[string]string { } func init() { - ps := newSystemPS() + ps := system.NewSystemPS() inputs.Add("diskio", func() telegraf.Input { return &DiskIO{ps: ps, SkipSerialNumber: true} }) diff --git a/plugins/inputs/system/diskio_linux.go b/plugins/inputs/diskio/diskio_linux.go similarity index 61% rename from plugins/inputs/system/diskio_linux.go rename to plugins/inputs/diskio/diskio_linux.go index b15f74383..f2499ca17 100644 --- a/plugins/inputs/system/diskio_linux.go +++ b/plugins/inputs/diskio/diskio_linux.go @@ -1,7 +1,8 @@ -package system +package diskio import ( "bufio" + "bytes" "fmt" "os" "strings" @@ -10,6 +11,7 @@ import ( ) type diskInfoCache struct { + modifiedAt int64 // Unix Nano timestamp of the last modification of the device. This value is used to invalidate the cache udevDataPath string values map[string]string } @@ -30,17 +32,19 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { s.infoCache = map[string]diskInfoCache{} } ic, ok := s.infoCache[devName] - if ok { + + if ok && stat.Mtim.Nano() == ic.modifiedAt { return ic.values, nil } - major := stat.Rdev >> 8 & 0xff - minor := stat.Rdev & 0xff + major := unix.Major(uint64(stat.Rdev)) + minor := unix.Minor(uint64(stat.Rdev)) udevDataPath := fmt.Sprintf("%s/b%d:%d", udevPath, major, minor) di := map[string]string{} s.infoCache[devName] = diskInfoCache{ + modifiedAt: stat.Mtim.Nano(), udevDataPath: udevDataPath, values: di, } @@ -52,9 +56,21 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { defer f.Close() scnr := bufio.NewScanner(f) + var devlinks bytes.Buffer for scnr.Scan() { l := scnr.Text() - if len(l) < 4 || l[:2] != "E:" { + if len(l) < 4 { + continue + } + if l[:2] == "S:" { + if devlinks.Len() > 0 { + devlinks.WriteString(" ") + } + devlinks.WriteString("/dev/") + devlinks.WriteString(l[2:]) + continue + } + if l[:2] != "E:" { continue } kv := strings.SplitN(l[2:], "=", 2) @@ -64,5 +80,9 @@ func (s *DiskIO) diskInfo(devName string) (map[string]string, error) { di[kv[0]] = kv[1] } + if devlinks.Len() > 0 { + di["DEVLINKS"] = devlinks.String() + } + return di, nil } diff --git a/plugins/inputs/system/diskio_linux_test.go b/plugins/inputs/diskio/diskio_linux_test.go similarity index 88% rename from plugins/inputs/system/diskio_linux_test.go rename to plugins/inputs/diskio/diskio_linux_test.go index 96aed211b..1cb031bdc 100644 --- a/plugins/inputs/system/diskio_linux_test.go +++ b/plugins/inputs/diskio/diskio_linux_test.go @@ -1,6 +1,6 @@ // +build linux -package system +package diskio import ( "io/ioutil" @@ -14,6 +14,8 @@ import ( var nullDiskInfo = []byte(` E:MY_PARAM_1=myval1 E:MY_PARAM_2=myval2 +S:foo/bar/devlink +S:foo/bar/devlink1 `) // setupNullDisk sets up fake udev info as if /dev/null were a disk. @@ -47,6 +49,7 @@ func TestDiskInfo(t *testing.T) { require.NoError(t, err) assert.Equal(t, "myval1", di["MY_PARAM_1"]) assert.Equal(t, "myval2", di["MY_PARAM_2"]) + assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) // test that data is cached err = clean() @@ -56,6 +59,7 @@ func TestDiskInfo(t *testing.T) { require.NoError(t, err) assert.Equal(t, "myval1", di["MY_PARAM_1"]) assert.Equal(t, "myval2", di["MY_PARAM_2"]) + assert.Equal(t, "/dev/foo/bar/devlink /dev/foo/bar/devlink1", di["DEVLINKS"]) // unfortunately we can't adjust mtime on /dev/null to test cache invalidation } @@ -84,7 +88,8 @@ func TestDiskIOStats_diskName(t *testing.T) { s := DiskIO{ NameTemplates: tc.templates, } - assert.Equal(t, tc.expected, s.diskName("null"), "Templates: %#v", tc.templates) + name, _ := s.diskName("null") + assert.Equal(t, tc.expected, name, "Templates: %#v", tc.templates) } } diff --git a/plugins/inputs/system/diskio_other.go b/plugins/inputs/diskio/diskio_other.go similarity index 90% rename from plugins/inputs/system/diskio_other.go rename to plugins/inputs/diskio/diskio_other.go index 0a3abb686..07fb8c3b8 100644 --- a/plugins/inputs/system/diskio_other.go +++ b/plugins/inputs/diskio/diskio_other.go @@ -1,6 +1,6 @@ // +build !linux -package system +package diskio type diskInfoCache struct{} diff --git a/plugins/inputs/system/diskio_test.go b/plugins/inputs/diskio/diskio_test.go similarity index 78% rename from plugins/inputs/system/diskio_test.go rename to plugins/inputs/diskio/diskio_test.go index d8b908c3e..3ad203de0 100644 --- a/plugins/inputs/system/diskio_test.go +++ b/plugins/inputs/diskio/diskio_test.go @@ -1,8 +1,9 @@ -package system +package diskio import ( "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/disk" "github.com/stretchr/testify/require" @@ -29,23 +30,25 @@ func TestDiskIO(t *testing.T) { name: "minimal", result: Result{ stats: map[string]disk.IOCountersStat{ - "sda": disk.IOCountersStat{ - ReadCount: 888, - WriteCount: 5341, - ReadBytes: 100000, - WriteBytes: 200000, - ReadTime: 7123, - WriteTime: 9087, - Name: "sda", - IoTime: 123552, - SerialNumber: "ab-123-ad", + "sda": { + ReadCount: 888, + WriteCount: 5341, + ReadBytes: 100000, + WriteBytes: 200000, + ReadTime: 7123, + WriteTime: 9087, + MergedReadCount: 11, + MergedWriteCount: 12, + Name: "sda", + IoTime: 123552, + SerialNumber: "ab-123-ad", }, }, err: nil, }, err: nil, metrics: []Metric{ - Metric{ + { tags: map[string]string{ "name": "sda", "serial": "ab-123-ad", @@ -60,6 +63,8 @@ func TestDiskIO(t *testing.T) { "io_time": uint64(123552), "weighted_io_time": uint64(0), "iops_in_progress": uint64(0), + "merged_reads": uint64(11), + "merged_writes": uint64(12), }, }, }, @@ -69,11 +74,11 @@ func TestDiskIO(t *testing.T) { devices: []string{"sd*"}, result: Result{ stats: map[string]disk.IOCountersStat{ - "sda": disk.IOCountersStat{ + "sda": { Name: "sda", ReadCount: 42, }, - "vda": disk.IOCountersStat{ + "vda": { Name: "vda", ReadCount: 42, }, @@ -82,7 +87,7 @@ func TestDiskIO(t *testing.T) { }, err: nil, metrics: []Metric{ - Metric{ + { tags: map[string]string{ "name": "sda", "serial": "unknown", @@ -96,12 +101,13 @@ func TestDiskIO(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var mps MockPS + var mps system.MockPS mps.On("DiskIO").Return(tt.result.stats, tt.result.err) var acc testutil.Accumulator diskio := &DiskIO{ + Log: testutil.Logger{}, ps: &mps, Devices: tt.devices, } diff --git a/plugins/inputs/disque/README.md b/plugins/inputs/disque/README.md new file mode 100644 index 000000000..0df757061 --- /dev/null +++ b/plugins/inputs/disque/README.md @@ -0,0 +1,38 @@ +# Disque Input + +[Disque](https://github.com/antirez/disque) is an ongoing experiment to build a distributed, in-memory, message broker. + + +### Configuration: + +```toml +[[inputs.disque]] + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port and password. + ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. + ## If no servers are specified, then localhost is used as the host. + servers = ["localhost"] +``` + +### Metrics + + +- disque + - disque_host + - uptime_in_seconds + - connected_clients + - blocked_clients + - used_memory + - used_memory_rss + - used_memory_peak + - total_connections_received + - total_commands_processed + - instantaneous_ops_per_sec + - latest_fork_usec + - mem_fragmentation_ratio + - used_cpu_sys + - used_cpu_user + - used_cpu_sys_children + - used_cpu_user_children + - registered_jobs + - registered_queues diff --git a/plugins/inputs/dns_query/README.md b/plugins/inputs/dns_query/README.md index 766d9811f..dc8ddd903 100644 --- a/plugins/inputs/dns_query/README.md +++ b/plugins/inputs/dns_query/README.md @@ -16,7 +16,7 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi # domains = ["."] ## Query record type. - ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. + ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # record_type = "A" ## Dns server port. @@ -34,12 +34,40 @@ The DNS plugin gathers dns query times in miliseconds - like [Dig](https://en.wi - domain - record_type - result + - rcode - fields: - query_time_ms (float) - result_code (int, success = 0, timeout = 1, error = 2) + - rcode_value (int) + + +### Rcode Descriptions +|rcode_value|rcode|Description| +|---|-----------|-----------------------------------| +|0 | NoError | No Error | +|1 | FormErr | Format Error | +|2 | ServFail | Server Failure | +|3 | NXDomain | Non-Existent Domain | +|4 | NotImp | Not Implemented | +|5 | Refused | Query Refused | +|6 | YXDomain | Name Exists when it should not | +|7 | YXRRSet | RR Set Exists when it should not | +|8 | NXRRSet | RR Set that should exist does not | +|9 | NotAuth | Server Not Authoritative for zone | +|10 | NotZone | Name not contained in zone | +|16 | BADSIG | TSIG Signature Failure | +|16 | BADVERS | Bad OPT Version | +|17 | BADKEY | Key not recognized | +|18 | BADTIME | Signature out of time window | +|19 | BADMODE | Bad TKEY Mode | +|20 | BADNAME | Duplicate key name | +|21 | BADALG | Algorithm not supported | +|22 | BADTRUNC | Bad Truncation | +|23 | BADCOOKIE | Bad/missing Server Cookie | + ### Example Output: ``` -dns_query,domain=mjasion.pl,record_type=A,server=8.8.8.8 query_time_ms=67.189842 1456082743585760680 +dns_query,domain=google.com,rcode=NOERROR,record_type=A,result=success,server=127.0.0.1 rcode_value=0i,result_code=0i,query_time_ms=0.13746 1550020750001000000 ``` diff --git a/plugins/inputs/dns_query/dns_query.go b/plugins/inputs/dns_query/dns_query.go index 98fcc09c2..c56572770 100644 --- a/plugins/inputs/dns_query/dns_query.go +++ b/plugins/inputs/dns_query/dns_query.go @@ -1,10 +1,10 @@ package dns_query import ( - "errors" "fmt" "net" "strconv" + "sync" "time" "github.com/miekg/dns" @@ -52,7 +52,7 @@ var sampleConfig = ` # domains = ["."] ## Query record type. - ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. + ## Possible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. # record_type = "A" ## Dns server port. @@ -70,32 +70,43 @@ func (d *DnsQuery) Description() string { return "Query given DNS server and gives statistics" } func (d *DnsQuery) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup d.setDefaultValues() for _, domain := range d.Domains { for _, server := range d.Servers { - fields := make(map[string]interface{}, 2) - tags := map[string]string{ - "server": server, - "domain": domain, - "record_type": d.RecordType, - } + wg.Add(1) + go func(domain, server string) { + fields := make(map[string]interface{}, 2) + tags := map[string]string{ + "server": server, + "domain": domain, + "record_type": d.RecordType, + } - dnsQueryTime, err := d.getDnsQueryTime(domain, server) - if err == nil { - setResult(Success, fields, tags) - fields["query_time_ms"] = dnsQueryTime - } else if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { - setResult(Timeout, fields, tags) - } else if err != nil { - setResult(Error, fields, tags) - acc.AddError(err) - } + dnsQueryTime, rcode, err := d.getDnsQueryTime(domain, server) + if rcode >= 0 { + tags["rcode"] = dns.RcodeToString[rcode] + fields["rcode_value"] = rcode + } + if err == nil { + setResult(Success, fields, tags) + fields["query_time_ms"] = dnsQueryTime + } else if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { + setResult(Timeout, fields, tags) + } else if err != nil { + setResult(Error, fields, tags) + acc.AddError(err) + } - acc.AddFields("dns_query", fields, tags) + acc.AddFields("dns_query", fields, tags) + + wg.Done() + }(domain, server) } } + wg.Wait() return nil } @@ -122,7 +133,7 @@ func (d *DnsQuery) setDefaultValues() { } } -func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error) { +func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, int, error) { dnsQueryTime := float64(0) c := new(dns.Client) @@ -132,25 +143,25 @@ func (d *DnsQuery) getDnsQueryTime(domain string, server string) (float64, error m := new(dns.Msg) recordType, err := d.parseRecordType() if err != nil { - return dnsQueryTime, err + return dnsQueryTime, -1, err } m.SetQuestion(dns.Fqdn(domain), recordType) m.RecursionDesired = true r, rtt, err := c.Exchange(m, net.JoinHostPort(server, strconv.Itoa(d.Port))) if err != nil { - return dnsQueryTime, err + return dnsQueryTime, -1, err } if r.Rcode != dns.RcodeSuccess { - return dnsQueryTime, errors.New(fmt.Sprintf("Invalid answer name %s after %s query for %s\n", domain, d.RecordType, domain)) + return dnsQueryTime, r.Rcode, fmt.Errorf("Invalid answer (%s) from %s after %s query for %s", dns.RcodeToString[r.Rcode], server, d.RecordType, domain) } dnsQueryTime = float64(rtt.Nanoseconds()) / 1e6 - return dnsQueryTime, nil + return dnsQueryTime, r.Rcode, nil } func (d *DnsQuery) parseRecordType() (uint16, error) { var recordType uint16 - var error error + var err error switch d.RecordType { case "A": @@ -176,10 +187,10 @@ func (d *DnsQuery) parseRecordType() (uint16, error) { case "TXT": recordType = dns.TypeTXT default: - error = errors.New(fmt.Sprintf("Record type %s not recognized", d.RecordType)) + err = fmt.Errorf("Record type %s not recognized", d.RecordType) } - return recordType, error + return recordType, err } func setResult(result ResultType, fields map[string]interface{}, tags map[string]string) { diff --git a/plugins/inputs/dns_query/dns_query_test.go b/plugins/inputs/dns_query/dns_query_test.go index 3f70153e4..5a1379764 100644 --- a/plugins/inputs/dns_query/dns_query_test.go +++ b/plugins/inputs/dns_query/dns_query_test.go @@ -117,21 +117,17 @@ func TestGatheringTimeout(t *testing.T) { var acc testutil.Accumulator dnsConfig.Port = 60054 dnsConfig.Timeout = 1 - var err error channel := make(chan error, 1) go func() { channel <- acc.GatherError(dnsConfig.Gather) }() select { - case res := <-channel: - err = res + case err := <-channel: + assert.NoError(t, err) case <-time.After(time.Second * 2): - err = nil + assert.Fail(t, "DNS query did not timeout") } - - assert.Error(t, err) - assert.Contains(t, err.Error(), "i/o timeout") } func TestSettingDefaultValues(t *testing.T) { diff --git a/plugins/inputs/docker/README.md b/plugins/inputs/docker/README.md index a95fb61e9..95394c94e 100644 --- a/plugins/inputs/docker/README.md +++ b/plugins/inputs/docker/README.md @@ -26,12 +26,17 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) ## Deprecated (1.4.0), use container_name_include container_names = [] + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + ## Containers to include and exclude. Collect all if empty. Globs accepted. container_name_include = [] container_name_exclude = [] ## Container states to include and exclude. Globs accepted. ## When empty only containers in the "running" state will be captured. + ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] + ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] # container_state_include = [] # container_state_exclude = [] @@ -66,6 +71,42 @@ to gather stats from the [Engine API](https://docs.docker.com/engine/api/v1.24/) When using the `"ENV"` endpoint, the connection is configured using the [cli Docker environment variables](https://godoc.org/github.com/moby/moby/client#NewEnvClient). +#### Security + +Giving telegraf access to the Docker daemon expands the [attack surface](https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface) that could result in an attacker gaining root access to a machine. This is especially relevant if the telegraf configuration can be changed by untrusted users. + +#### Docker Daemon Permissions + +Typically, telegraf must be given permission to access the docker daemon unix +socket when using the default endpoint. This can be done by adding the +`telegraf` unix user (created when installing a Telegraf package) to the +`docker` unix group with the following command: + +``` +sudo usermod -aG docker telegraf +``` + +If telegraf is run within a container, the unix socket will need to be exposed +within the telegraf container. This can be done in the docker CLI by add the +option `-v /var/run/docker.sock:/var/run/docker.sock` or adding the following +lines to the telegraf container definition in a docker compose file: + +``` +volumes: + - /var/run/docker.sock:/var/run/docker.sock +``` + +#### source tag + +Selecting the containers measurements can be tricky if you have many containers with the same name. +To alleviate this issue you can set the below value to `true` + +```toml +source_tag = true +``` + +This will cause all measurements to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker. + #### Kubernetes Labels Kubernetes may add many labels to your containers, if they are not needed you @@ -74,18 +115,14 @@ may prefer to exclude them: docker_label_exclude = ["annotation.kubernetes*"] ``` - ### Metrics: -Every effort was made to preserve the names based on the JSON response from the -docker API. - - docker - tags: - unit - engine_host - server_version - - fields: + + fields: - n_used_file_descriptors - n_cpus - n_containers @@ -96,29 +133,49 @@ docker API. - n_goroutines - n_listener_events - memory_total - - pool_blocksize + - pool_blocksize (requires devicemapper storage driver) (deprecated see: `docker_devicemapper`) -- docker_data +The `docker_data` and `docker_metadata` measurements are available only for +some storage drivers such as devicemapper. + ++ docker_data (deprecated see: `docker_devicemapper`) - tags: - unit - engine_host - server_version - - fields: + + fields: - available - total - used -- docker_metadata +- docker_metadata (deprecated see: `docker_devicemapper`) - tags: - unit - engine_host - server_version - - fields: + + fields: - available - total - used -- docker_container_mem +The above measurements for the devicemapper storage driver can now be found in the new `docker_devicemapper` measurement + +- docker_devicemapper + - tags: + - engine_host + - server_version + - pool_name + + fields: + - pool_blocksize_bytes + - data_space_used_bytes + - data_space_total_bytes + - data_space_available_bytes + - metadata_space_used_bytes + - metadata_space_total_bytes + - metadata_space_available_bytes + - thin_pool_minimum_free_space_bytes + ++ docker_container_mem - tags: - engine_host - server_version @@ -126,8 +183,8 @@ docker API. - container_name - container_status - container_version - - fields: - - total_pgmafault + + fields: + - total_pgmajfault - cache - mapped_file - total_inactive_file @@ -171,7 +228,7 @@ docker API. - container_status - container_version - cpu - - fields: + + fields: - throttling_periods - throttling_throttled_periods - throttling_throttled_time @@ -182,7 +239,7 @@ docker API. - usage_percent - container_id -- docker_container_net ++ docker_container_net - tags: - engine_host - server_version @@ -191,7 +248,7 @@ docker API. - container_status - container_version - network - - fields: + + fields: - rx_dropped - rx_bytes - rx_errors @@ -224,7 +281,11 @@ docker API. - io_serviced_recursive_write - container_id -- docker_container_health +The `docker_container_health` measurements report on a containers +[HEALTHCHECK](https://docs.docker.com/engine/reference/builder/#healthcheck) +status if configured. + +- docker_container_health (container must use the HEALTHCHECK) - tags: - engine_host - server_version @@ -245,11 +306,13 @@ docker API. - container_status - container_version - fields: + - container_id - oomkilled (boolean) - pid (integer) - exitcode (integer) - started_at (integer) - finished_at (integer) + - uptime_ns (integer) - docker_swarm - tags: diff --git a/plugins/inputs/docker/client.go b/plugins/inputs/docker/client.go index b66ad009d..3ea24ea74 100644 --- a/plugins/inputs/docker/client.go +++ b/plugins/inputs/docker/client.go @@ -8,11 +8,10 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" docker "github.com/docker/docker/client" - "github.com/docker/go-connections/sockets" ) var ( - version = "1.24" + version = "1.21" // 1.24 is when server first started returning its version defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"} ) @@ -27,7 +26,7 @@ type Client interface { } func NewEnvClient() (Client, error) { - client, err := docker.NewEnvClient() + client, err := docker.NewClientWithOpts(docker.FromEnv) if err != nil { return nil, err } @@ -35,21 +34,20 @@ func NewEnvClient() (Client, error) { } func NewClient(host string, tlsConfig *tls.Config) (Client, error) { - proto, addr, _, err := docker.ParseHost(host) - if err != nil { - return nil, err - } - transport := &http.Transport{ TLSClientConfig: tlsConfig, } - sockets.ConfigureTransport(transport, proto, addr) httpClient := &http.Client{Transport: transport} - client, err := docker.NewClient(host, version, httpClient, defaultHeaders) + client, err := docker.NewClientWithOpts( + docker.WithHTTPHeaders(defaultHeaders), + docker.WithHTTPClient(httpClient), + docker.WithVersion(version), + docker.WithHost(host)) if err != nil { return nil, err } + return &SocketClient{client}, nil } diff --git a/plugins/inputs/docker/dev/telegraf.conf b/plugins/inputs/docker/dev/telegraf.conf new file mode 100644 index 000000000..06bbb46ae --- /dev/null +++ b/plugins/inputs/docker/dev/telegraf.conf @@ -0,0 +1,13 @@ +[agent] + interval="1s" + flush_interval="1s" + +[[inputs.docker]] + endpoint = "unix:///var/run/docker.sock" + timeout = "5s" + perdevice = true + total = false + container_names = [] + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/docker/docker.go b/plugins/inputs/docker/docker.go index aa1de7479..915d3a3e3 100644 --- a/plugins/inputs/docker/docker.go +++ b/plugins/inputs/docker/docker.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "io" - "log" "net/http" "regexp" "strconv" @@ -20,6 +19,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/docker" tlsint "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) @@ -44,6 +44,10 @@ type Docker struct { ContainerStateInclude []string `toml:"container_state_include"` ContainerStateExclude []string `toml:"container_state_exclude"` + IncludeSourceTag bool `toml:"source_tag"` + + Log telegraf.Logger + tlsint.ClientConfig newEnvClient func() (Client, error) @@ -51,7 +55,7 @@ type Docker struct { client Client httpClient *http.Client - engine_host string + engineHost string serverVersion string filtersCreated bool labelFilter filter.Filter @@ -73,6 +77,7 @@ const ( var ( sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} + now = time.Now ) var sampleConfig = ` @@ -87,6 +92,9 @@ var sampleConfig = ` ## Only collect metrics for these containers, collect all if empty container_names = [] + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + ## Containers to include and exclude. Globs accepted. ## Note that an empty array for both will include all containers container_name_include = [] @@ -94,6 +102,8 @@ var sampleConfig = ` ## Container states to include and exclude. Globs accepted. ## When empty only containers in the "running" state will be captured. + ## example: container_state_include = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] + ## example: container_state_exclude = ["created", "restarting", "running", "removing", "paused", "exited", "dead"] # container_state_include = [] # container_state_exclude = [] @@ -103,8 +113,10 @@ var sampleConfig = ` ## Whether to report for each container per-device blkio (8:0, 8:1...) and ## network (eth0, eth1, ...) stats or not perdevice = true + ## Whether to report for each container total blkio and network stats or not total = false + ## Which environment variables should we use as a tag ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] @@ -121,26 +133,18 @@ var sampleConfig = ` # insecure_skip_verify = false ` +// SampleConfig returns the default Docker TOML configuration. +func (d *Docker) SampleConfig() string { return sampleConfig } + +// Description the metrics returned. func (d *Docker) Description() string { return "Read metrics about docker containers" } -func (d *Docker) SampleConfig() string { return sampleConfig } - +// Gather metrics from the docker server. func (d *Docker) Gather(acc telegraf.Accumulator) error { if d.client == nil { - var c Client - var err error - if d.Endpoint == "ENV" { - c, err = d.newEnvClient() - } else { - tlsConfig, err := d.ClientConfig.TLSConfig() - if err != nil { - return err - } - - c, err = d.newClient(d.Endpoint, tlsConfig) - } + c, err := d.getNewClient() if err != nil { return err } @@ -195,7 +199,11 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { } ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() + containers, err := d.client.ContainerList(ctx, opts) + if err == context.DeadlineExceeded { + return errListTimeout + } if err != nil { return err } @@ -206,10 +214,8 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { for _, container := range containers { go func(c types.Container) { defer wg.Done() - err := d.gatherContainer(c, acc) - if err != nil { - acc.AddError(fmt.Errorf("E! Error gathering container %s stats: %s\n", - c.Names, err.Error())) + if err := d.gatherContainer(c, acc); err != nil { + acc.AddError(err) } }(container) } @@ -219,16 +225,18 @@ func (d *Docker) Gather(acc telegraf.Accumulator) error { } func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() + services, err := d.client.ServiceList(ctx, types.ServiceListOptions{}) + if err == context.DeadlineExceeded { + return errServiceTimeout + } if err != nil { return err } if len(services) > 0 { - tasks, err := d.client.TaskList(ctx, types.TaskListOptions{}) if err != nil { return err @@ -274,7 +282,7 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error { fields["tasks_running"] = running[service.ID] fields["tasks_desired"] = tasksNoShutdown[service.ID] } else { - log.Printf("E! Unknow Replicas Mode") + d.Log.Error("Unknown replica mode") } // Add metrics acc.AddFields("docker_swarm", @@ -292,19 +300,24 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { dataFields := make(map[string]interface{}) metadataFields := make(map[string]interface{}) now := time.Now() + // Get info from docker daemon ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() + info, err := d.client.Info(ctx) + if err == context.DeadlineExceeded { + return errInfoTimeout + } if err != nil { return err } - d.engine_host = info.Name + d.engineHost = info.Name d.serverVersion = info.ServerVersion tags := map[string]string{ - "engine_host": d.engine_host, + "engine_host": d.engineHost, "server_version": d.serverVersion, } @@ -319,21 +332,50 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { "n_goroutines": info.NGoroutines, "n_listener_events": info.NEventsListener, } + // Add metrics acc.AddFields("docker", fields, tags, now) acc.AddFields("docker", map[string]interface{}{"memory_total": info.MemTotal}, tags, now) + // Get storage metrics tags["unit"] = "bytes" + + var ( + // "docker_devicemapper" measurement fields + poolName string + deviceMapperFields = map[string]interface{}{} + ) + for _, rawData := range info.DriverStatus { + name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1)) + if name == "pool_name" { + poolName = rawData[1] + continue + } + // Try to convert string to int (bytes) value, err := parseSize(rawData[1]) if err != nil { continue } - name := strings.ToLower(strings.Replace(rawData[0], " ", "_", -1)) + + switch name { + case "pool_blocksize", + "base_device_size", + "data_space_used", + "data_space_total", + "data_space_available", + "metadata_space_used", + "metadata_space_total", + "metadata_space_available", + "thin_pool_minimum_free_space": + deviceMapperFields[name+"_bytes"] = value + } + + // Legacy devicemapper measurements if name == "pool_blocksize" { // pool blocksize acc.AddFields("docker", @@ -350,67 +392,99 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error { metadataFields[fieldName] = value } } + if len(dataFields) > 0 { acc.AddFields("docker_data", dataFields, tags, now) } + if len(metadataFields) > 0 { acc.AddFields("docker_metadata", metadataFields, tags, now) } + + if len(deviceMapperFields) > 0 { + tags := map[string]string{ + "engine_host": d.engineHost, + "server_version": d.serverVersion, + } + + if poolName != "" { + tags["pool_name"] = poolName + } + + acc.AddFields("docker_devicemapper", deviceMapperFields, tags, now) + } + return nil } +func hostnameFromID(id string) string { + if len(id) > 12 { + return id[0:12] + } + return id +} + func (d *Docker) gatherContainer( container types.Container, acc telegraf.Accumulator, ) error { var v *types.StatsJSON + // Parse container name - cname := "unknown" - if len(container.Names) > 0 { - // Not sure what to do with other names, just take the first. - cname = strings.TrimPrefix(container.Names[0], "/") + var cname string + for _, name := range container.Names { + trimmedName := strings.TrimPrefix(name, "/") + match := d.containerFilter.Match(trimmedName) + if match { + cname = trimmedName + break + } } - // the image name sometimes has a version part, or a private repo - // ie, rabbitmq:3-management or docker.someco.net:4443/rabbitmq:3-management - imageName := "" - imageVersion := "unknown" - i := strings.LastIndex(container.Image, ":") // index of last ':' character - if i > -1 { - imageVersion = container.Image[i+1:] - imageName = container.Image[:i] - } else { - imageName = container.Image + if cname == "" { + return nil } + imageName, imageVersion := docker.ParseImage(container.Image) + tags := map[string]string{ - "engine_host": d.engine_host, + "engine_host": d.engineHost, "server_version": d.serverVersion, "container_name": cname, "container_image": imageName, "container_version": imageVersion, } - if !d.containerFilter.Match(cname) { - return nil + if d.IncludeSourceTag { + tags["source"] = hostnameFromID(container.ID) } ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) defer cancel() + r, err := d.client.ContainerStats(ctx, container.ID, false) - if err != nil { - return fmt.Errorf("Error getting docker stats: %s", err.Error()) + if err == context.DeadlineExceeded { + return errStatsTimeout } + if err != nil { + return fmt.Errorf("error getting docker stats: %v", err) + } + defer r.Body.Close() dec := json.NewDecoder(r.Body) if err = dec.Decode(&v); err != nil { if err == io.EOF { return nil } - return fmt.Errorf("Error decoding: %s", err.Error()) + return fmt.Errorf("error decoding: %v", err) } daemonOSType := r.OSType + // use common (printed at `docker ps`) name for container + if v.Name != "" { + tags["container_name"] = strings.TrimPrefix(v.Name, "/") + } + // Add labels to tags for k, label := range container.Labels { if d.labelFilter.Match(k) { @@ -418,55 +492,85 @@ func (d *Docker) gatherContainer( } } + return d.gatherContainerInspect(container, acc, tags, daemonOSType, v) +} + +func (d *Docker) gatherContainerInspect( + container types.Container, + acc telegraf.Accumulator, + tags map[string]string, + daemonOSType string, + v *types.StatsJSON, +) error { + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + defer cancel() + info, err := d.client.ContainerInspect(ctx, container.ID) + if err == context.DeadlineExceeded { + return errInspectTimeout + } if err != nil { - return fmt.Errorf("Error inspecting docker container: %s", err.Error()) + return fmt.Errorf("error inspecting docker container: %v", err) } // Add whitelisted environment variables to tags if len(d.TagEnvironment) > 0 { for _, envvar := range info.Config.Env { for _, configvar := range d.TagEnvironment { - dock_env := strings.SplitN(envvar, "=", 2) + dockEnv := strings.SplitN(envvar, "=", 2) //check for presence of tag in whitelist - if len(dock_env) == 2 && len(strings.TrimSpace(dock_env[1])) != 0 && configvar == dock_env[0] { - tags[dock_env[0]] = dock_env[1] + if len(dockEnv) == 2 && len(strings.TrimSpace(dockEnv[1])) != 0 && configvar == dockEnv[0] { + tags[dockEnv[0]] = dockEnv[1] } } } } + if info.State != nil { tags["container_status"] = info.State.Status statefields := map[string]interface{}{ - "oomkilled": info.State.OOMKilled, - "pid": info.State.Pid, - "exitcode": info.State.ExitCode, + "oomkilled": info.State.OOMKilled, + "pid": info.State.Pid, + "exitcode": info.State.ExitCode, + "container_id": container.ID, } - container_time, err := time.Parse(time.RFC3339, info.State.StartedAt) - if err == nil && !container_time.IsZero() { - statefields["started_at"] = container_time.UnixNano() + + finished, err := time.Parse(time.RFC3339, info.State.FinishedAt) + if err == nil && !finished.IsZero() { + statefields["finished_at"] = finished.UnixNano() + } else { + // set finished to now for use in uptime + finished = now() } - container_time, err = time.Parse(time.RFC3339, info.State.FinishedAt) - if err == nil && !container_time.IsZero() { - statefields["finished_at"] = container_time.UnixNano() + + started, err := time.Parse(time.RFC3339, info.State.StartedAt) + if err == nil && !started.IsZero() { + statefields["started_at"] = started.UnixNano() + + uptime := finished.Sub(started) + if finished.Before(started) { + uptime = now().Sub(started) + } + statefields["uptime_ns"] = uptime.Nanoseconds() + } + + acc.AddFields("docker_container_status", statefields, tags, now()) + + if info.State.Health != nil { + healthfields := map[string]interface{}{ + "health_status": info.State.Health.Status, + "failing_streak": info.ContainerJSONBase.State.Health.FailingStreak, + } + acc.AddFields("docker_container_health", healthfields, tags, now()) } - acc.AddFields("docker_container_status", statefields, tags, time.Now()) } - if info.State.Health != nil { - healthfields := map[string]interface{}{ - "health_status": info.State.Health.Status, - "failing_streak": info.ContainerJSONBase.State.Health.FailingStreak, - } - acc.AddFields("docker_container_health", healthfields, tags, time.Now()) - } - - gatherContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType) + parseContainerStats(v, acc, tags, container.ID, d.PerDevice, d.Total, daemonOSType) return nil } -func gatherContainerStats( +func parseContainerStats( stat *types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, @@ -527,12 +631,12 @@ func gatherContainerStats( if daemonOSType != "windows" { memfields["limit"] = stat.MemoryStats.Limit - memfields["usage"] = stat.MemoryStats.Usage memfields["max_usage"] = stat.MemoryStats.MaxUsage - mem := calculateMemUsageUnixNoCache(stat.MemoryStats) + mem := CalculateMemUsageUnixNoCache(stat.MemoryStats) memLimit := float64(stat.MemoryStats.Limit) - memfields["usage_percent"] = calculateMemPercentUnixNoCache(memLimit, mem) + memfields["usage"] = uint64(mem) + memfields["usage_percent"] = CalculateMemPercentUnixNoCache(memLimit, mem) } else { memfields["commit_bytes"] = stat.MemoryStats.Commit memfields["commit_peak_bytes"] = stat.MemoryStats.CommitPeak @@ -555,7 +659,7 @@ func gatherContainerStats( if daemonOSType != "windows" { previousCPU := stat.PreCPUStats.CPUUsage.TotalUsage previousSystem := stat.PreCPUStats.SystemUsage - cpuPercent := calculateCPUPercentUnix(previousCPU, previousSystem, stat) + cpuPercent := CalculateCPUPercentUnix(previousCPU, previousSystem, stat) cpufields["usage_percent"] = cpuPercent } else { cpuPercent := calculateCPUPercentWindows(stat) @@ -772,7 +876,7 @@ func sliceContains(in string, sl []string) bool { func parseSize(sizeStr string) (int64, error) { matches := sizeRegex.FindStringSubmatch(sizeStr) if len(matches) != 4 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + return -1, fmt.Errorf("invalid size: %s", sizeStr) } size, err := strconv.ParseFloat(matches[1], 64) @@ -824,6 +928,19 @@ func (d *Docker) createContainerStateFilters() error { return nil } +func (d *Docker) getNewClient() (Client, error) { + if d.Endpoint == "ENV" { + return d.newEnvClient() + } + + tlsConfig, err := d.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + return d.newClient(d.Endpoint, tlsConfig) +} + func init() { inputs.Add("docker", func() telegraf.Input { return &Docker{ diff --git a/plugins/inputs/docker/docker_test.go b/plugins/inputs/docker/docker_test.go index d51c61c00..a331479d1 100644 --- a/plugins/inputs/docker/docker_test.go +++ b/plugins/inputs/docker/docker_test.go @@ -3,13 +3,16 @@ package docker import ( "context" "crypto/tls" + "io/ioutil" "sort" + "strings" "testing" - - "github.com/influxdata/telegraf/testutil" + "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -77,11 +80,11 @@ var baseClient = MockClient{ ContainerListF: func(context.Context, types.ContainerListOptions) ([]types.Container, error) { return containerList, nil }, - ContainerStatsF: func(context.Context, string, bool) (types.ContainerStats, error) { - return containerStats(), nil + ContainerStatsF: func(c context.Context, s string, b bool) (types.ContainerStats, error) { + return containerStats(s), nil }, ContainerInspectF: func(context.Context, string) (types.ContainerJSON, error) { - return containerInspect, nil + return containerInspect(), nil }, ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) { return ServiceList, nil @@ -107,7 +110,7 @@ func TestDockerGatherContainerStats(t *testing.T) { "container_image": "redis/image", } - gatherContainerStats(stats, &acc, tags, "123456789", true, true, "linux") + parseContainerStats(stats, &acc, tags, "123456789", true, true, "linux") // test docker_container_net measurement netfields := map[string]interface{}{ @@ -250,6 +253,7 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) { var acc testutil.Accumulator d := Docker{ + Log: testutil.Logger{}, newClient: func(string, *tls.Config) (Client, error) { return &MockClient{ InfoF: func(ctx context.Context) (types.Info, error) { @@ -262,7 +266,7 @@ func TestDocker_WindowsMemoryContainerStats(t *testing.T) { return containerStatsWindows(), nil }, ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) { - return containerInspect, nil + return containerInspect(), nil }, ServiceListF: func(context.Context, types.ServiceListOptions) ([]swarm.Service, error) { return ServiceList, nil @@ -290,11 +294,9 @@ func TestContainerLabels(t *testing.T) { }{ { name: "Nil filters matches all", - container: types.Container{ - Labels: map[string]string{ - "a": "x", - }, - }, + container: genContainerLabeled(map[string]string{ + "a": "x", + }), include: nil, exclude: nil, expected: map[string]string{ @@ -303,11 +305,9 @@ func TestContainerLabels(t *testing.T) { }, { name: "Empty filters matches all", - container: types.Container{ - Labels: map[string]string{ - "a": "x", - }, - }, + container: genContainerLabeled(map[string]string{ + "a": "x", + }), include: []string{}, exclude: []string{}, expected: map[string]string{ @@ -316,12 +316,10 @@ func TestContainerLabels(t *testing.T) { }, { name: "Must match include", - container: types.Container{ - Labels: map[string]string{ - "a": "x", - "b": "y", - }, - }, + container: genContainerLabeled(map[string]string{ + "a": "x", + "b": "y", + }), include: []string{"a"}, exclude: []string{}, expected: map[string]string{ @@ -330,12 +328,10 @@ func TestContainerLabels(t *testing.T) { }, { name: "Must not match exclude", - container: types.Container{ - Labels: map[string]string{ - "a": "x", - "b": "y", - }, - }, + container: genContainerLabeled(map[string]string{ + "a": "x", + "b": "y", + }), include: []string{}, exclude: []string{"b"}, expected: map[string]string{ @@ -344,13 +340,11 @@ func TestContainerLabels(t *testing.T) { }, { name: "Include Glob", - container: types.Container{ - Labels: map[string]string{ - "aa": "x", - "ab": "y", - "bb": "z", - }, - }, + container: genContainerLabeled(map[string]string{ + "aa": "x", + "ab": "y", + "bb": "z", + }), include: []string{"a*"}, exclude: []string{}, expected: map[string]string{ @@ -360,13 +354,11 @@ func TestContainerLabels(t *testing.T) { }, { name: "Exclude Glob", - container: types.Container{ - Labels: map[string]string{ - "aa": "x", - "ab": "y", - "bb": "z", - }, - }, + container: genContainerLabeled(map[string]string{ + "aa": "x", + "ab": "y", + "bb": "z", + }), include: []string{}, exclude: []string{"a*"}, expected: map[string]string{ @@ -375,13 +367,11 @@ func TestContainerLabels(t *testing.T) { }, { name: "Excluded Includes", - container: types.Container{ - Labels: map[string]string{ - "aa": "x", - "ab": "y", - "bb": "z", - }, - }, + container: genContainerLabeled(map[string]string{ + "aa": "x", + "ab": "y", + "bb": "z", + }), include: []string{"a*"}, exclude: []string{"*b"}, expected: map[string]string{ @@ -402,6 +392,7 @@ func TestContainerLabels(t *testing.T) { } d := Docker{ + Log: testutil.Logger{}, newClient: newClientFunc, LabelInclude: tt.include, LabelExclude: tt.exclude, @@ -425,6 +416,12 @@ func TestContainerLabels(t *testing.T) { } } +func genContainerLabeled(labels map[string]string) types.Container { + c := containerList[0] + c.Labels = labels + return c +} + func TestContainerNames(t *testing.T) { var tests = []struct { name string @@ -434,112 +431,67 @@ func TestContainerNames(t *testing.T) { expected []string }{ { - name: "Nil filters matches all", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Nil filters matches all", include: nil, exclude: nil, - expected: []string{"etcd", "etcd2"}, + expected: []string{"etcd", "etcd2", "acme", "acme-test", "foo"}, }, { - name: "Empty filters matches all", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Empty filters matches all", include: []string{}, exclude: []string{}, - expected: []string{"etcd", "etcd2"}, + expected: []string{"etcd", "etcd2", "acme", "acme-test", "foo"}, }, { - name: "Match all containers", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Match all containers", include: []string{"*"}, exclude: []string{}, - expected: []string{"etcd", "etcd2"}, + expected: []string{"etcd", "etcd2", "acme", "acme-test", "foo"}, }, { - name: "Include prefix match", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Include prefix match", include: []string{"etc*"}, exclude: []string{}, expected: []string{"etcd", "etcd2"}, }, { - name: "Exact match", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Exact match", include: []string{"etcd"}, exclude: []string{}, expected: []string{"etcd"}, }, { - name: "Star matches zero length", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Star matches zero length", include: []string{"etcd2*"}, exclude: []string{}, expected: []string{"etcd2"}, }, { - name: "Exclude matches all", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Exclude matches all", include: []string{}, exclude: []string{"etc*"}, - expected: []string{}, + expected: []string{"acme", "acme-test", "foo"}, }, { - name: "Exclude single", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Exclude single", include: []string{}, exclude: []string{"etcd"}, - expected: []string{"etcd2"}, + expected: []string{"etcd2", "acme", "acme-test", "foo"}, }, { - name: "Exclude all", - containers: [][]string{ - {"/etcd"}, - {"/etcd2"}, - }, + name: "Exclude all", include: []string{"*"}, exclude: []string{"*"}, expected: []string{}, }, { - name: "Exclude item matching include", - containers: [][]string{ - {"acme"}, - {"foo"}, - {"acme-test"}, - }, + name: "Exclude item matching include", include: []string{"acme*"}, exclude: []string{"*test*"}, expected: []string{"acme"}, }, { - name: "Exclude item no wildcards", - containers: [][]string{ - {"acme"}, - {"acme-test"}, - }, + name: "Exclude item no wildcards", include: []string{"acme*"}, exclude: []string{"test"}, expected: []string{"acme", "acme-test"}, @@ -552,18 +504,17 @@ func TestContainerNames(t *testing.T) { newClientFunc := func(host string, tlsConfig *tls.Config) (Client, error) { client := baseClient client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) { - var containers []types.Container - for _, names := range tt.containers { - containers = append(containers, types.Container{ - Names: names, - }) - } - return containers, nil + return containerList, nil } + client.ContainerStatsF = func(c context.Context, s string, b bool) (types.ContainerStats, error) { + return containerStats(s), nil + } + return &client, nil } d := Docker{ + Log: testutil.Logger{}, newClient: newClientFunc, ContainerInclude: tt.include, ContainerExclude: tt.exclude, @@ -591,9 +542,212 @@ func TestContainerNames(t *testing.T) { } } +func FilterMetrics(metrics []telegraf.Metric, f func(telegraf.Metric) bool) []telegraf.Metric { + results := []telegraf.Metric{} + for _, m := range metrics { + if f(m) { + results = append(results, m) + } + } + return results +} + +func TestContainerStatus(t *testing.T) { + var tests = []struct { + name string + now func() time.Time + inspect types.ContainerJSON + expected []telegraf.Metric + }{ + { + name: "finished_at is zero value", + now: func() time.Time { + return time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) + }, + inspect: containerInspect(), + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_container_status", + map[string]string{ + "container_name": "etcd", + "container_image": "quay.io/coreos/etcd", + "container_version": "v2.2.2", + "engine_host": "absol", + "label1": "test_value_1", + "label2": "test_value_2", + "server_version": "17.09.0-ce", + "container_status": "running", + "source": "e2173b9478a6", + }, + map[string]interface{}{ + "oomkilled": false, + "pid": 1234, + "exitcode": 0, + "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + "started_at": time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC).UnixNano(), + "uptime_ns": int64(3 * time.Minute), + }, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + }, + { + name: "finished_at is non-zero value", + now: func() time.Time { + return time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) + }, + inspect: func() types.ContainerJSON { + i := containerInspect() + i.ContainerJSONBase.State.FinishedAt = "2018-06-14T05:53:53.266176036Z" + return i + }(), + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_container_status", + map[string]string{ + "container_name": "etcd", + "container_image": "quay.io/coreos/etcd", + "container_version": "v2.2.2", + "engine_host": "absol", + "label1": "test_value_1", + "label2": "test_value_2", + "server_version": "17.09.0-ce", + "container_status": "running", + "source": "e2173b9478a6", + }, + map[string]interface{}{ + "oomkilled": false, + "pid": 1234, + "exitcode": 0, + "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + "started_at": time.Date(2018, 6, 14, 5, 48, 53, 266176036, time.UTC).UnixNano(), + "finished_at": time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC).UnixNano(), + "uptime_ns": int64(5 * time.Minute), + }, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + }, + { + name: "started_at is zero value", + now: func() time.Time { + return time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC) + }, + inspect: func() types.ContainerJSON { + i := containerInspect() + i.ContainerJSONBase.State.StartedAt = "" + i.ContainerJSONBase.State.FinishedAt = "2018-06-14T05:53:53.266176036Z" + return i + }(), + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_container_status", + map[string]string{ + "container_name": "etcd", + "container_image": "quay.io/coreos/etcd", + "container_version": "v2.2.2", + "engine_host": "absol", + "label1": "test_value_1", + "label2": "test_value_2", + "server_version": "17.09.0-ce", + "container_status": "running", + "source": "e2173b9478a6", + }, + map[string]interface{}{ + "oomkilled": false, + "pid": 1234, + "exitcode": 0, + "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + "finished_at": time.Date(2018, 6, 14, 5, 53, 53, 266176036, time.UTC).UnixNano(), + }, + time.Date(2018, 6, 14, 5, 51, 53, 266176036, time.UTC), + ), + }, + }, + { + name: "container has been restarted", + now: func() time.Time { + return time.Date(2019, 1, 1, 0, 0, 3, 0, time.UTC) + }, + inspect: func() types.ContainerJSON { + i := containerInspect() + i.ContainerJSONBase.State.StartedAt = "2019-01-01T00:00:02Z" + i.ContainerJSONBase.State.FinishedAt = "2019-01-01T00:00:01Z" + return i + }(), + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_container_status", + map[string]string{ + "container_name": "etcd", + "container_image": "quay.io/coreos/etcd", + "container_version": "v2.2.2", + "engine_host": "absol", + "label1": "test_value_1", + "label2": "test_value_2", + "server_version": "17.09.0-ce", + "container_status": "running", + "source": "e2173b9478a6", + }, + map[string]interface{}{ + "oomkilled": false, + "pid": 1234, + "exitcode": 0, + "container_id": "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", + "started_at": time.Date(2019, 1, 1, 0, 0, 2, 0, time.UTC).UnixNano(), + "finished_at": time.Date(2019, 1, 1, 0, 0, 1, 0, time.UTC).UnixNano(), + "uptime_ns": int64(1 * time.Second), + }, + time.Date(2019, 1, 1, 0, 0, 3, 0, time.UTC), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ( + acc testutil.Accumulator + newClientFunc = func(string, *tls.Config) (Client, error) { + client := baseClient + client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) { + return containerList[:1], nil + } + client.ContainerInspectF = func(c context.Context, s string) (types.ContainerJSON, error) { + return tt.inspect, nil + } + + return &client, nil + } + d = Docker{ + Log: testutil.Logger{}, + newClient: newClientFunc, + IncludeSourceTag: true, + } + ) + + // mock time + if tt.now != nil { + now = tt.now + } + defer func() { + now = time.Now + }() + + err := d.Gather(&acc) + require.NoError(t, err) + + actual := FilterMetrics(acc.GetTelegrafMetrics(), func(m telegraf.Metric) bool { + return m.Name() == "docker_container_status" + }) + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} + func TestDockerGatherInfo(t *testing.T) { var acc testutil.Accumulator d := Docker{ + Log: testutil.Logger{}, newClient: newClient, TagEnvironment: []string{"ENVVAR1", "ENVVAR2", "ENVVAR3", "ENVVAR5", "ENVVAR6", "ENVVAR7", "ENVVAR8", "ENVVAR9"}, @@ -621,6 +775,29 @@ func TestDockerGatherInfo(t *testing.T) { }, ) + acc.AssertContainsTaggedFields(t, + "docker", + map[string]interface{}{ + "memory_total": int64(3840757760), + }, + map[string]string{ + "engine_host": "absol", + "server_version": "17.09.0-ce", + }, + ) + + acc.AssertContainsTaggedFields(t, + "docker", + map[string]interface{}{ + "pool_blocksize": int64(65540), + }, + map[string]string{ + "engine_host": "absol", + "server_version": "17.09.0-ce", + "unit": "bytes", + }, + ) + acc.AssertContainsTaggedFields(t, "docker_data", map[string]interface{}{ @@ -629,11 +806,46 @@ func TestDockerGatherInfo(t *testing.T) { "available": int64(36530000000), }, map[string]string{ - "unit": "bytes", "engine_host": "absol", "server_version": "17.09.0-ce", + "unit": "bytes", }, ) + + acc.AssertContainsTaggedFields(t, + "docker_metadata", + map[string]interface{}{ + "used": int64(20970000), + "total": int64(2146999999), + "available": int64(2126999999), + }, + map[string]string{ + "engine_host": "absol", + "server_version": "17.09.0-ce", + "unit": "bytes", + }, + ) + + acc.AssertContainsTaggedFields(t, + "docker_devicemapper", + map[string]interface{}{ + "base_device_size_bytes": int64(10740000000), + "pool_blocksize_bytes": int64(65540), + "data_space_used_bytes": int64(17300000000), + "data_space_total_bytes": int64(107400000000), + "data_space_available_bytes": int64(36530000000), + "metadata_space_used_bytes": int64(20970000), + "metadata_space_total_bytes": int64(2146999999), + "metadata_space_available_bytes": int64(2126999999), + "thin_pool_minimum_free_space_bytes": int64(10740000000), + }, + map[string]string{ + "engine_host": "absol", + "server_version": "17.09.0-ce", + "pool_name": "docker-8:1-1182287-pool", + }, + ) + acc.AssertContainsTaggedFields(t, "docker_container_cpu", map[string]interface{}{ @@ -685,6 +897,7 @@ func TestDockerGatherInfo(t *testing.T) { func TestDockerGatherSwarmInfo(t *testing.T) { var acc testutil.Accumulator d := Docker{ + Log: testutil.Logger{}, newClient: newClient, } @@ -731,35 +944,35 @@ func TestContainerStateFilter(t *testing.T) { { name: "default", expected: map[string][]string{ - "status": []string{"running"}, + "status": {"running"}, }, }, { name: "include running", include: []string{"running"}, expected: map[string][]string{ - "status": []string{"running"}, + "status": {"running"}, }, }, { name: "include glob", include: []string{"r*"}, expected: map[string][]string{ - "status": []string{"restarting", "running", "removing"}, + "status": {"restarting", "running", "removing"}, }, }, { name: "include all", include: []string{"*"}, expected: map[string][]string{ - "status": []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"}, + "status": {"created", "restarting", "running", "removing", "paused", "exited", "dead"}, }, }, { name: "exclude all", exclude: []string{"*"}, expected: map[string][]string{ - "status": []string{}, + "status": {}, }, }, { @@ -767,7 +980,7 @@ func TestContainerStateFilter(t *testing.T) { include: []string{"*"}, exclude: []string{"exited"}, expected: map[string][]string{ - "status": []string{"created", "restarting", "running", "removing", "paused", "dead"}, + "status": {"created", "restarting", "running", "removing", "paused", "dead"}, }, }, } @@ -792,6 +1005,7 @@ func TestContainerStateFilter(t *testing.T) { } d := Docker{ + Log: testutil.Logger{}, newClient: newClientFunc, ContainerStateInclude: tt.include, ContainerStateExclude: tt.exclude, @@ -802,3 +1016,104 @@ func TestContainerStateFilter(t *testing.T) { }) } } + +func TestContainerName(t *testing.T) { + tests := []struct { + name string + clientFunc func(host string, tlsConfig *tls.Config) (Client, error) + expected string + }{ + { + name: "container stats name is preferred", + clientFunc: func(host string, tlsConfig *tls.Config) (Client, error) { + client := baseClient + client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) { + var containers []types.Container + containers = append(containers, types.Container{ + Names: []string{"/logspout/foo"}, + }) + return containers, nil + } + client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + return types.ContainerStats{ + Body: ioutil.NopCloser(strings.NewReader(`{"name": "logspout"}`)), + }, nil + } + return &client, nil + }, + expected: "logspout", + }, + { + name: "container stats without name uses container list name", + clientFunc: func(host string, tlsConfig *tls.Config) (Client, error) { + client := baseClient + client.ContainerListF = func(context.Context, types.ContainerListOptions) ([]types.Container, error) { + var containers []types.Container + containers = append(containers, types.Container{ + Names: []string{"/logspout"}, + }) + return containers, nil + } + client.ContainerStatsF = func(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + return types.ContainerStats{ + Body: ioutil.NopCloser(strings.NewReader(`{}`)), + }, nil + } + return &client, nil + }, + expected: "logspout", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := Docker{ + Log: testutil.Logger{}, + newClient: tt.clientFunc, + } + var acc testutil.Accumulator + err := d.Gather(&acc) + require.NoError(t, err) + + for _, metric := range acc.Metrics { + // This tag is set on all container measurements + if metric.Measurement == "docker_container_mem" { + require.Equal(t, tt.expected, metric.Tags["container_name"]) + } + } + }) + } +} + +func TestHostnameFromID(t *testing.T) { + tests := []struct { + name string + id string + expect string + }{ + { + name: "Real ID", + id: "565e3a55f5843cfdd4aa5659a1a75e4e78d47f73c3c483f782fe4a26fc8caa07", + expect: "565e3a55f584", + }, + { + name: "Short ID", + id: "shortid123", + expect: "shortid123", + }, + { + name: "No ID", + id: "", + expect: "shortid123", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + output := hostnameFromID(test.id) + if test.expect != output { + t.Logf("Container ID for hostname is wrong. Want: %s, Got: %s", output, test.expect) + } + }) + } + +} diff --git a/plugins/inputs/docker/docker_testdata.go b/plugins/inputs/docker/docker_testdata.go index 1168048a2..d50b80b9a 100644 --- a/plugins/inputs/docker/docker_testdata.go +++ b/plugins/inputs/docker/docker_testdata.go @@ -1,6 +1,7 @@ package docker import ( + "fmt" "io/ioutil" "strings" "time" @@ -46,7 +47,7 @@ var info = types.Info{ HTTPSProxy: "", Labels: []string{}, MemoryLimit: false, - DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}}, + DriverStatus: [][2]string{{"Pool Name", "docker-8:1-1182287-pool"}, {"Base Device Size", "10.74 GB"}, {"Pool Blocksize", "65.54 kB"}, {"Backing Filesystem", "extfs"}, {"Data file", "/dev/loop0"}, {"Metadata file", "/dev/loop1"}, {"Data Space Used", "17.3 GB"}, {"Data Space Total", "107.4 GB"}, {"Data Space Available", "36.53 GB"}, {"Metadata Space Used", "20.97 MB"}, {"Metadata Space Total", "2.147 GB"}, {"Metadata Space Available", "2.127 GB"}, {"Udev Sync Supported", "true"}, {"Deferred Removal Enabled", "false"}, {"Data loop file", "/var/lib/docker/devicemapper/devicemapper/data"}, {"Metadata loop file", "/var/lib/docker/devicemapper/devicemapper/metadata"}, {"Library Version", "1.02.115 (2016-01-25)"}, {"Thin Pool Minimum Free Space", "10.74GB"}}, NFd: 19, HTTPProxy: "", Driver: "devicemapper", @@ -59,7 +60,7 @@ var info = types.Info{ } var containerList = []types.Container{ - types.Container{ + { ID: "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb", Names: []string{"/etcd"}, Image: "quay.io/coreos/etcd:v2.2.2", @@ -67,22 +68,22 @@ var containerList = []types.Container{ Created: 1455941930, Status: "Up 4 hours", Ports: []types.Port{ - types.Port{ + { PrivatePort: 7001, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 4001, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 2380, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 2379, PublicPort: 2379, Type: "tcp", @@ -96,7 +97,7 @@ var containerList = []types.Container{ SizeRw: 0, SizeRootFs: 0, }, - types.Container{ + { ID: "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173", Names: []string{"/etcd2"}, Image: "quay.io:4443/coreos/etcd:v2.2.2", @@ -104,22 +105,22 @@ var containerList = []types.Container{ Created: 1455941933, Status: "Up 4 hours", Ports: []types.Port{ - types.Port{ + { PrivatePort: 7002, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 4002, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 2381, PublicPort: 0, Type: "tcp", }, - types.Port{ + { PrivatePort: 2382, PublicPort: 2382, Type: "tcp", @@ -133,11 +134,23 @@ var containerList = []types.Container{ SizeRw: 0, SizeRootFs: 0, }, + { + ID: "e8a713dd90604f5a257b97c15945e047ab60ed5b2c4397c5a6b5bf40e1bd2791", + Names: []string{"/acme"}, + }, + { + ID: "9bc6faf9ba8106fae32e8faafd38a1dd6f6d262bec172398cc10bc03c0d6841a", + Names: []string{"/acme-test"}, + }, + { + ID: "d4ccced494a1d5fe8ebdb0a86335a0dab069319912221e5838a132ab18a8bc84", + Names: []string{"/foo"}, + }, } var two = uint64(2) var ServiceList = []swarm.Service{ - swarm.Service{ + { ID: "qolkls9g5iasdiuihcyz9rnx2", Spec: swarm.ServiceSpec{ Annotations: swarm.Annotations{ @@ -150,7 +163,7 @@ var ServiceList = []swarm.Service{ }, }, }, - swarm.Service{ + { ID: "qolkls9g5iasdiuihcyz9rn3", Spec: swarm.ServiceSpec{ Annotations: swarm.Annotations{ @@ -164,7 +177,7 @@ var ServiceList = []swarm.Service{ } var TaskList = []swarm.Task{ - swarm.Task{ + { ID: "kwh0lv7hwwbh", ServiceID: "qolkls9g5iasdiuihcyz9rnx2", NodeID: "0cl4jturcyd1ks3fwpd010kor", @@ -173,7 +186,7 @@ var TaskList = []swarm.Task{ }, DesiredState: "running", }, - swarm.Task{ + { ID: "u78m5ojbivc3", ServiceID: "qolkls9g5iasdiuihcyz9rnx2", NodeID: "0cl4jturcyd1ks3fwpd010kor", @@ -182,7 +195,7 @@ var TaskList = []swarm.Task{ }, DesiredState: "running", }, - swarm.Task{ + { ID: "1n1uilkhr98l", ServiceID: "qolkls9g5iasdiuihcyz9rn3", NodeID: "0cl4jturcyd1ks3fwpd010kor", @@ -194,13 +207,13 @@ var TaskList = []swarm.Task{ } var NodeList = []swarm.Node{ - swarm.Node{ + { ID: "0cl4jturcyd1ks3fwpd010kor", Status: swarm.NodeStatus{ State: "ready", }, }, - swarm.Node{ + { ID: "0cl4jturcyd1ks3fwpd010kor", Status: swarm.NodeStatus{ State: "ready", @@ -208,10 +221,25 @@ var NodeList = []swarm.Node{ }, } -func containerStats() types.ContainerStats { +func containerStats(s string) types.ContainerStats { var stat types.ContainerStats - jsonStat := ` + var name string + switch s { + case "e2173b9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296b7dfb": + name = "etcd" + case "b7dfbb9478a6ae55e237d4d74f8bbb753f0817192b5081334dc78476296e2173": + name = "etcd2" + case "e8a713dd90604f5a257b97c15945e047ab60ed5b2c4397c5a6b5bf40e1bd2791": + name = "/acme" + case "9bc6faf9ba8106fae32e8faafd38a1dd6f6d262bec172398cc10bc03c0d6841a": + name = "/acme-test" + case "d4ccced494a1d5fe8ebdb0a86335a0dab069319912221e5838a132ab18a8bc84": + name = "/foo" + } + + jsonStat := fmt.Sprintf(` { + "name": "%s", "blkio_stats": { "io_service_bytes_recursive": [ { @@ -315,7 +343,7 @@ func containerStats() types.ContainerStats { "throttling_data": {} }, "read": "2016-02-24T11:42:27.472459608-05:00" -}` +}`, name) stat.Body = ioutil.NopCloser(strings.NewReader(jsonStat)) return stat } @@ -464,32 +492,34 @@ func containerStatsWindows() types.ContainerStats { return stat } -var containerInspect = types.ContainerJSON{ - Config: &container.Config{ - Env: []string{ - "ENVVAR1=loremipsum", - "ENVVAR1FOO=loremipsum", - "ENVVAR2=dolorsitamet", - "ENVVAR3==ubuntu:10.04", - "ENVVAR4", - "ENVVAR5=", - "ENVVAR6= ", - "ENVVAR7=ENVVAR8=ENVVAR9", - "PATH=/bin:/sbin", - }, - }, - ContainerJSONBase: &types.ContainerJSONBase{ - State: &types.ContainerState{ - Health: &types.Health{ - FailingStreak: 1, - Status: "Unhealthy", +func containerInspect() types.ContainerJSON { + return types.ContainerJSON{ + Config: &container.Config{ + Env: []string{ + "ENVVAR1=loremipsum", + "ENVVAR1FOO=loremipsum", + "ENVVAR2=dolorsitamet", + "ENVVAR3==ubuntu:10.04", + "ENVVAR4", + "ENVVAR5=", + "ENVVAR6= ", + "ENVVAR7=ENVVAR8=ENVVAR9", + "PATH=/bin:/sbin", }, - Status: "running", - OOMKilled: false, - Pid: 1234, - ExitCode: 0, - StartedAt: "2018-06-14T05:48:53.266176036Z", - FinishedAt: "0001-01-01T00:00:00Z", }, - }, + ContainerJSONBase: &types.ContainerJSONBase{ + State: &types.ContainerState{ + Health: &types.Health{ + FailingStreak: 1, + Status: "Unhealthy", + }, + Status: "running", + OOMKilled: false, + Pid: 1234, + ExitCode: 0, + StartedAt: "2018-06-14T05:48:53.266176036Z", + FinishedAt: "0001-01-01T00:00:00Z", + }, + }, + } } diff --git a/plugins/inputs/docker/errors.go b/plugins/inputs/docker/errors.go new file mode 100644 index 000000000..f3c0f76a5 --- /dev/null +++ b/plugins/inputs/docker/errors.go @@ -0,0 +1,11 @@ +package docker + +import "errors" + +var ( + errInfoTimeout = errors.New("timeout retrieving docker engine info") + errStatsTimeout = errors.New("timeout retrieving container stats") + errInspectTimeout = errors.New("timeout retrieving container environment") + errListTimeout = errors.New("timeout retrieving container list") + errServiceTimeout = errors.New("timeout retrieving swarm service list") +) diff --git a/plugins/inputs/docker/stats_helpers.go b/plugins/inputs/docker/stats_helpers.go index b4c91e2fc..93ea2f219 100644 --- a/plugins/inputs/docker/stats_helpers.go +++ b/plugins/inputs/docker/stats_helpers.go @@ -4,7 +4,7 @@ package docker import "github.com/docker/docker/api/types" -func calculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { +func CalculateCPUPercentUnix(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { var ( cpuPercent = 0.0 // calculate the change for the cpu usage of the container in between readings @@ -39,13 +39,13 @@ func calculateCPUPercentWindows(v *types.StatsJSON) float64 { return 0.00 } -// calculateMemUsageUnixNoCache calculate memory usage of the container. +// CalculateMemUsageUnixNoCache calculate memory usage of the container. // Page cache is intentionally excluded to avoid misinterpretation of the output. -func calculateMemUsageUnixNoCache(mem types.MemoryStats) float64 { +func CalculateMemUsageUnixNoCache(mem types.MemoryStats) float64 { return float64(mem.Usage - mem.Stats["cache"]) } -func calculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 { +func CalculateMemPercentUnixNoCache(limit float64, usedNoCache float64) float64 { // MemoryStats.Limit will never be 0 unless the container is not running and we haven't // got any data from cgroup if limit != 0 { diff --git a/plugins/inputs/docker_log/README.md b/plugins/inputs/docker_log/README.md new file mode 100644 index 000000000..d2f0dc614 --- /dev/null +++ b/plugins/inputs/docker_log/README.md @@ -0,0 +1,99 @@ +# Docker Log Input Plugin + +The docker log plugin uses the Docker Engine API to get logs on running +docker containers. + +The docker plugin uses the [Official Docker Client][] to gather logs from the +[Engine API][]. + +**Note:** This plugin works only for containers with the `local` or +`json-file` or `journald` logging driver. + +[Official Docker Client]: https://github.com/moby/moby/tree/master/client +[Engine API]: https://docs.docker.com/engine/api/v1.24/ + +### Configuration + +```toml +[[inputs.docker_log]] + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" + # endpoint = "unix:///var/run/docker.sock" + + ## When true, container logs are read from the beginning; otherwise + ## reading begins at the end of the log. + # from_beginning = false + + ## Timeout for Docker API calls. + # timeout = "5s" + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + # container_name_include = [] + # container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. + # container_state_include = [] + # container_state_exclude = [] + + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + # docker_label_include = [] + # docker_label_exclude = [] + + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +#### Environment Configuration + +When using the `"ENV"` endpoint, the connection is configured using the +[CLI Docker environment variables][env] + +[env]: https://godoc.org/github.com/moby/moby/client#NewEnvClient + +### source tag + +Selecting the containers can be tricky if you have many containers with the same name. +To alleviate this issue you can set the below value to `true` + +```toml +source_tag = true +``` + +This will cause all data points to have the `source` tag be set to the first 12 characters of the container id. The first 12 characters is the common hostname for containers that have no explicit hostname set, as defined by docker. + +### Metrics + +- docker_log + - tags: + - container_image + - container_version + - container_name + - stream (stdout, stderr, or tty) + - source + - fields: + - container_id + - message + +### Example Output + +``` +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! [agent] Config: Interval:10s, Quiet:false, Hostname:\"371ee5d3e587\", Flush Interval:10s" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Tags enabled: host=371ee5d3e587" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded outputs: file" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded processors:" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded aggregators:" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Loaded inputs: net" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Using config file: /etc/telegraf/telegraf.conf" 1560913872000000000 +docker_log,container_image=telegraf,container_name=sharp_bell,container_version=alpine,stream=stderr container_id="371ee5d3e58726112f499be62cddef800138ca72bbba635ed2015fbf475b1023",message="2019-06-19T03:11:11Z I! Starting Telegraf 1.10.4" 1560913872000000000 +``` diff --git a/plugins/inputs/docker_log/client.go b/plugins/inputs/docker_log/client.go new file mode 100644 index 000000000..7667c6e4d --- /dev/null +++ b/plugins/inputs/docker_log/client.go @@ -0,0 +1,63 @@ +package docker_log + +import ( + "context" + "crypto/tls" + "io" + "net/http" + + "github.com/docker/docker/api/types" + docker "github.com/docker/docker/client" +) + +/*This file is inherited from telegraf docker input plugin*/ +var ( + version = "1.24" + defaultHeaders = map[string]string{"User-Agent": "engine-api-cli-1.0"} +) + +type Client interface { + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) +} + +func NewEnvClient() (Client, error) { + client, err := docker.NewClientWithOpts(docker.FromEnv) + if err != nil { + return nil, err + } + return &SocketClient{client}, nil +} + +func NewClient(host string, tlsConfig *tls.Config) (Client, error) { + transport := &http.Transport{ + TLSClientConfig: tlsConfig, + } + httpClient := &http.Client{Transport: transport} + client, err := docker.NewClientWithOpts( + docker.WithHTTPHeaders(defaultHeaders), + docker.WithHTTPClient(httpClient), + docker.WithVersion(version), + docker.WithHost(host)) + + if err != nil { + return nil, err + } + return &SocketClient{client}, nil +} + +type SocketClient struct { + client *docker.Client +} + +func (c *SocketClient) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + return c.client.ContainerList(ctx, options) +} + +func (c *SocketClient) ContainerLogs(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + return c.client.ContainerLogs(ctx, containerID, options) +} +func (c *SocketClient) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + return c.client.ContainerInspect(ctx, containerID) +} diff --git a/plugins/inputs/docker_log/docker_log.go b/plugins/inputs/docker_log/docker_log.go new file mode 100644 index 000000000..bf29ede43 --- /dev/null +++ b/plugins/inputs/docker_log/docker_log.go @@ -0,0 +1,476 @@ +package docker_log + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "strings" + "sync" + "time" + "unicode" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/pkg/stdcopy" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/docker" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var sampleConfig = ` + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" + # endpoint = "unix:///var/run/docker.sock" + + ## When true, container logs are read from the beginning; otherwise + ## reading begins at the end of the log. + # from_beginning = false + + ## Timeout for Docker API calls. + # timeout = "5s" + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + # container_name_include = [] + # container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. + # container_state_include = [] + # container_state_exclude = [] + + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + # docker_label_include = [] + # docker_label_exclude = [] + + ## Set the source tag for the metrics to the container ID hostname, eg first 12 chars + source_tag = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +const ( + defaultEndpoint = "unix:///var/run/docker.sock" + + // Maximum bytes of a log line before it will be split, size is mirroring + // docker code: + // https://github.com/moby/moby/blob/master/daemon/logger/copier.go#L21 + maxLineBytes = 16 * 1024 +) + +var ( + containerStates = []string{"created", "restarting", "running", "removing", "paused", "exited", "dead"} + // ensure *DockerLogs implements telegraf.ServiceInput + _ telegraf.ServiceInput = (*DockerLogs)(nil) +) + +type DockerLogs struct { + Endpoint string `toml:"endpoint"` + FromBeginning bool `toml:"from_beginning"` + Timeout internal.Duration `toml:"timeout"` + LabelInclude []string `toml:"docker_label_include"` + LabelExclude []string `toml:"docker_label_exclude"` + ContainerInclude []string `toml:"container_name_include"` + ContainerExclude []string `toml:"container_name_exclude"` + ContainerStateInclude []string `toml:"container_state_include"` + ContainerStateExclude []string `toml:"container_state_exclude"` + IncludeSourceTag bool `toml:"source_tag"` + + tlsint.ClientConfig + + newEnvClient func() (Client, error) + newClient func(string, *tls.Config) (Client, error) + + client Client + labelFilter filter.Filter + containerFilter filter.Filter + stateFilter filter.Filter + opts types.ContainerListOptions + wg sync.WaitGroup + mu sync.Mutex + containerList map[string]context.CancelFunc +} + +func (d *DockerLogs) Description() string { + return "Read logging output from the Docker engine" +} + +func (d *DockerLogs) SampleConfig() string { + return sampleConfig +} + +func (d *DockerLogs) Init() error { + var err error + if d.Endpoint == "ENV" { + d.client, err = d.newEnvClient() + if err != nil { + return err + } + } else { + tlsConfig, err := d.ClientConfig.TLSConfig() + if err != nil { + return err + } + d.client, err = d.newClient(d.Endpoint, tlsConfig) + if err != nil { + return err + } + } + + // Create filters + err = d.createLabelFilters() + if err != nil { + return err + } + err = d.createContainerFilters() + if err != nil { + return err + } + err = d.createContainerStateFilters() + if err != nil { + return err + } + + filterArgs := filters.NewArgs() + for _, state := range containerStates { + if d.stateFilter.Match(state) { + filterArgs.Add("status", state) + } + } + + if filterArgs.Len() != 0 { + d.opts = types.ContainerListOptions{ + Filters: filterArgs, + } + } + + return nil +} + +func (d *DockerLogs) addToContainerList(containerID string, cancel context.CancelFunc) error { + d.mu.Lock() + defer d.mu.Unlock() + d.containerList[containerID] = cancel + return nil +} + +func (d *DockerLogs) removeFromContainerList(containerID string) error { + d.mu.Lock() + defer d.mu.Unlock() + delete(d.containerList, containerID) + return nil +} + +func (d *DockerLogs) containerInContainerList(containerID string) bool { + d.mu.Lock() + defer d.mu.Unlock() + _, ok := d.containerList[containerID] + return ok +} + +func (d *DockerLogs) cancelTails() error { + d.mu.Lock() + defer d.mu.Unlock() + for _, cancel := range d.containerList { + cancel() + } + return nil +} + +func (d *DockerLogs) matchedContainerName(names []string) string { + // Check if all container names are filtered; in practice I believe + // this array is always of length 1. + for _, name := range names { + trimmedName := strings.TrimPrefix(name, "/") + match := d.containerFilter.Match(trimmedName) + if match { + return trimmedName + } + } + return "" +} + +func (d *DockerLogs) Gather(acc telegraf.Accumulator) error { + ctx := context.Background() + acc.SetPrecision(time.Nanosecond) + + ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) + defer cancel() + containers, err := d.client.ContainerList(ctx, d.opts) + if err != nil { + return err + } + + for _, container := range containers { + if d.containerInContainerList(container.ID) { + continue + } + + containerName := d.matchedContainerName(container.Names) + if containerName == "" { + continue + } + + ctx, cancel := context.WithCancel(context.Background()) + d.addToContainerList(container.ID, cancel) + + // Start a new goroutine for every new container that has logs to collect + d.wg.Add(1) + go func(container types.Container) { + defer d.wg.Done() + defer d.removeFromContainerList(container.ID) + + err = d.tailContainerLogs(ctx, acc, container, containerName) + if err != nil && err != context.Canceled { + acc.AddError(err) + } + }(container) + } + return nil +} + +func (d *DockerLogs) hasTTY(ctx context.Context, container types.Container) (bool, error) { + ctx, cancel := context.WithTimeout(ctx, d.Timeout.Duration) + defer cancel() + c, err := d.client.ContainerInspect(ctx, container.ID) + if err != nil { + return false, err + } + return c.Config.Tty, nil +} + +func (d *DockerLogs) tailContainerLogs( + ctx context.Context, + acc telegraf.Accumulator, + container types.Container, + containerName string, +) error { + imageName, imageVersion := docker.ParseImage(container.Image) + tags := map[string]string{ + "container_name": containerName, + "container_image": imageName, + "container_version": imageVersion, + } + + if d.IncludeSourceTag { + tags["source"] = hostnameFromID(container.ID) + } + + // Add matching container labels as tags + for k, label := range container.Labels { + if d.labelFilter.Match(k) { + tags[k] = label + } + } + + hasTTY, err := d.hasTTY(ctx, container) + if err != nil { + return err + } + + tail := "0" + if d.FromBeginning { + tail = "all" + } + + logOptions := types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: false, + Follow: true, + Tail: tail, + } + + logReader, err := d.client.ContainerLogs(ctx, container.ID, logOptions) + if err != nil { + return err + } + + // If the container is using a TTY, there is only a single stream + // (stdout), and data is copied directly from the container output stream, + // no extra multiplexing or headers. + // + // If the container is *not* using a TTY, streams for stdout and stderr are + // multiplexed. + if hasTTY { + return tailStream(acc, tags, container.ID, logReader, "tty") + } else { + return tailMultiplexed(acc, tags, container.ID, logReader) + } +} + +func parseLine(line []byte) (time.Time, string, error) { + parts := bytes.SplitN(line, []byte(" "), 2) + + switch len(parts) { + case 1: + parts = append(parts, []byte("")) + } + + tsString := string(parts[0]) + + // Keep any leading space, but remove whitespace from end of line. + // This preserves space in, for example, stacktraces, while removing + // annoying end of line characters and is similar to how other logging + // plugins such as syslog behave. + message := bytes.TrimRightFunc(parts[1], unicode.IsSpace) + + ts, err := time.Parse(time.RFC3339Nano, tsString) + if err != nil { + return time.Time{}, "", fmt.Errorf("error parsing timestamp %q: %v", tsString, err) + } + + return ts, string(message), nil +} + +func tailStream( + acc telegraf.Accumulator, + baseTags map[string]string, + containerID string, + reader io.ReadCloser, + stream string, +) error { + defer reader.Close() + + tags := make(map[string]string, len(baseTags)+1) + for k, v := range baseTags { + tags[k] = v + } + tags["stream"] = stream + + r := bufio.NewReaderSize(reader, 64*1024) + + for { + line, err := r.ReadBytes('\n') + + if len(line) != 0 { + ts, message, err := parseLine(line) + if err != nil { + acc.AddError(err) + } else { + acc.AddFields("docker_log", map[string]interface{}{ + "container_id": containerID, + "message": message, + }, tags, ts) + } + } + + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } +} + +func tailMultiplexed( + acc telegraf.Accumulator, + tags map[string]string, + containerID string, + src io.ReadCloser, +) error { + outReader, outWriter := io.Pipe() + errReader, errWriter := io.Pipe() + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err := tailStream(acc, tags, containerID, outReader, "stdout") + if err != nil { + acc.AddError(err) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + err := tailStream(acc, tags, containerID, errReader, "stderr") + if err != nil { + acc.AddError(err) + } + }() + + _, err := stdcopy.StdCopy(outWriter, errWriter, src) + outWriter.Close() + errWriter.Close() + src.Close() + wg.Wait() + return err +} + +// Start is a noop which is required for a *DockerLogs to implement +// the telegraf.ServiceInput interface +func (d *DockerLogs) Start(telegraf.Accumulator) error { + return nil +} + +func (d *DockerLogs) Stop() { + d.cancelTails() + d.wg.Wait() +} + +// Following few functions have been inherited from telegraf docker input plugin +func (d *DockerLogs) createContainerFilters() error { + filter, err := filter.NewIncludeExcludeFilter(d.ContainerInclude, d.ContainerExclude) + if err != nil { + return err + } + d.containerFilter = filter + return nil +} + +func (d *DockerLogs) createLabelFilters() error { + filter, err := filter.NewIncludeExcludeFilter(d.LabelInclude, d.LabelExclude) + if err != nil { + return err + } + d.labelFilter = filter + return nil +} + +func (d *DockerLogs) createContainerStateFilters() error { + if len(d.ContainerStateInclude) == 0 && len(d.ContainerStateExclude) == 0 { + d.ContainerStateInclude = []string{"running"} + } + filter, err := filter.NewIncludeExcludeFilter(d.ContainerStateInclude, d.ContainerStateExclude) + if err != nil { + return err + } + d.stateFilter = filter + return nil +} + +func init() { + inputs.Add("docker_log", func() telegraf.Input { + return &DockerLogs{ + Timeout: internal.Duration{Duration: time.Second * 5}, + Endpoint: defaultEndpoint, + newEnvClient: NewEnvClient, + newClient: NewClient, + containerList: make(map[string]context.CancelFunc), + } + }) +} + +func hostnameFromID(id string) string { + if len(id) > 12 { + return id[0:12] + } + return id +} diff --git a/plugins/inputs/docker_log/docker_log_test.go b/plugins/inputs/docker_log/docker_log_test.go new file mode 100644 index 000000000..c8903c9d8 --- /dev/null +++ b/plugins/inputs/docker_log/docker_log_test.go @@ -0,0 +1,188 @@ +package docker_log + +import ( + "bytes" + "context" + "crypto/tls" + "io" + "testing" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/stdcopy" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +type MockClient struct { + ContainerListF func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerInspectF func(ctx context.Context, containerID string) (types.ContainerJSON, error) + ContainerLogsF func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) +} + +func (c *MockClient) ContainerList( + ctx context.Context, + options types.ContainerListOptions, +) ([]types.Container, error) { + return c.ContainerListF(ctx, options) +} + +func (c *MockClient) ContainerInspect( + ctx context.Context, + containerID string, +) (types.ContainerJSON, error) { + return c.ContainerInspectF(ctx, containerID) +} + +func (c *MockClient) ContainerLogs( + ctx context.Context, + containerID string, + options types.ContainerLogsOptions, +) (io.ReadCloser, error) { + return c.ContainerLogsF(ctx, containerID, options) +} + +type Response struct { + io.Reader +} + +func (r *Response) Close() error { + return nil +} + +func MustParse(layout, value string) time.Time { + tm, err := time.Parse(layout, value) + if err != nil { + panic(err) + } + return tm +} + +func Test(t *testing.T) { + tests := []struct { + name string + client *MockClient + expected []telegraf.Metric + }{ + { + name: "no containers", + client: &MockClient{ + ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + return nil, nil + }, + }, + }, + { + name: "one container tty", + client: &MockClient{ + ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + { + ID: "deadbeef", + Names: []string{"/telegraf"}, + Image: "influxdata/telegraf:1.11.0", + }, + }, nil + }, + ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) { + return types.ContainerJSON{ + Config: &container.Config{ + Tty: true, + }, + }, nil + }, + ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + return &Response{Reader: bytes.NewBuffer([]byte("2020-04-28T18:43:16.432691200Z hello\n"))}, nil + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "tty", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "hello", + }, + MustParse(time.RFC3339Nano, "2020-04-28T18:43:16.432691200Z"), + ), + }, + }, + { + name: "one container multiplex", + client: &MockClient{ + ContainerListF: func(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + return []types.Container{ + { + ID: "deadbeef", + Names: []string{"/telegraf"}, + Image: "influxdata/telegraf:1.11.0", + }, + }, nil + }, + ContainerInspectF: func(ctx context.Context, containerID string) (types.ContainerJSON, error) { + return types.ContainerJSON{ + Config: &container.Config{ + Tty: false, + }, + }, nil + }, + ContainerLogsF: func(ctx context.Context, containerID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + var buf bytes.Buffer + w := stdcopy.NewStdWriter(&buf, stdcopy.Stdout) + w.Write([]byte("2020-04-28T18:42:16.432691200Z hello from stdout")) + return &Response{Reader: &buf}, nil + }, + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "docker_log", + map[string]string{ + "container_name": "telegraf", + "container_image": "influxdata/telegraf", + "container_version": "1.11.0", + "stream": "stdout", + "source": "deadbeef", + }, + map[string]interface{}{ + "container_id": "deadbeef", + "message": "hello from stdout", + }, + MustParse(time.RFC3339Nano, "2020-04-28T18:42:16.432691200Z"), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + plugin := &DockerLogs{ + Timeout: internal.Duration{Duration: time.Second * 5}, + newClient: func(string, *tls.Config) (Client, error) { return tt.client, nil }, + containerList: make(map[string]context.CancelFunc), + IncludeSourceTag: true, + } + + err := plugin.Init() + require.NoError(t, err) + + err = plugin.Gather(&acc) + require.NoError(t, err) + + acc.Wait(len(tt.expected)) + plugin.Stop() + + require.Nil(t, acc.Errors) // no errors during gathering + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics()) + }) + } +} diff --git a/plugins/inputs/dovecot/README.md b/plugins/inputs/dovecot/README.md index 0611ce098..d28ae3dd9 100644 --- a/plugins/inputs/dovecot/README.md +++ b/plugins/inputs/dovecot/README.md @@ -1,9 +1,10 @@ # Dovecot Input Plugin -The dovecot plugin uses the dovecot Stats protocol to gather metrics on configured -domains. You can read Dovecot's documentation -[here](http://wiki2.dovecot.org/Statistics) +The dovecot plugin uses the Dovecot [v2.1 stats protocol][stats old] to gather +metrics on configured domains. +When using Dovecot v2.3 you are still able to use this protocol by following +the [upgrading steps][upgrading]. ### Configuration: @@ -16,59 +17,55 @@ domains. You can read Dovecot's documentation ## ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] + ## Type is one of "user", "domain", "ip", or "global" type = "global" + ## Wildcard matches like "*.com". An empty string "" is same as "*" ## If type = "ip" filters should be filters = [""] ``` +### Metrics: -### Tags: - server: hostname - type: query type - ip: ip addr - user: username - domain: domain name - - -### Fields: - - reset_timestamp time.Time - last_update time.Time - num_logins int64 - num_cmds int64 - num_connected_sessions int64 ## not in type - user_cpu float32 - sys_cpu float32 - clock_time float64 - min_faults int64 - maj_faults int64 - vol_cs int64 - invol_cs int64 - disk_input int64 - disk_output int64 - read_count int64 - read_bytes int64 - write_count int64 - write_bytes int64 - mail_lookup_path int64 - mail_lookup_attr int64 - mail_read_count int64 - mail_read_bytes int64 - mail_cache_hits int64 +- dovecot + - tags: + - server (hostname) + - type (query type) + - ip (ip addr) + - user (username) + - domain (domain name) + - fields: + - reset_timestamp (string) + - last_update (string) + - num_logins (integer) + - num_cmds (integer) + - num_connected_sessions (integer) + - user_cpu (float) + - sys_cpu (float) + - clock_time (float) + - min_faults (integer) + - maj_faults (integer) + - vol_cs (integer) + - invol_cs (integer) + - disk_input (integer) + - disk_output (integer) + - read_count (integer) + - read_bytes (integer) + - write_count (integer) + - write_bytes (integer) + - mail_lookup_path (integer) + - mail_lookup_attr (integer) + - mail_read_count (integer) + - mail_read_bytes (integer) + - mail_cache_hits (integer) ### Example Output: ``` -telegraf --config t.cfg --input-filter dovecot --test -* Plugin: dovecot, Collection 1 -> dovecot,ip=192.168.0.1,server=dovecot-1.domain.test,type=ip clock_time=0,disk_input=0i,disk_output=0i,invol_cs=0i,last_update="2016-04-08 10:59:47.000208479 +0200 CEST",mail_cache_hits=0i,mail_lookup_attr=0i,mail_lookup_path=0i,mail_read_bytes=0i,mail_read_count=0i,maj_faults=0i,min_faults=0i,num_cmds=12i,num_connected_sessions=0i,num_logins=6i,read_bytes=0i,read_count=0i,reset_timestamp="2016-04-08 10:33:34 +0200 CEST",sys_cpu=0,user_cpu=0,vol_cs=0i,write_bytes=0i,write_count=0i 1460106251633824223 -* Plugin: dovecot, Collection 1 -> dovecot,server=dovecot-1.domain.test,type=user,user=user-1@domain.test clock_time=0.00006,disk_input=405504i,disk_output=77824i,invol_cs=67i,last_update="2016-04-08 11:02:55.000111634 +0200 CEST",mail_cache_hits=26i,mail_lookup_attr=0i,mail_lookup_path=6i,mail_read_bytes=86233i,mail_read_count=5i,maj_faults=0i,min_faults=975i,num_cmds=41i,num_logins=3i,read_bytes=368833i,read_count=394i,reset_timestamp="2016-04-08 11:01:32 +0200 CEST",sys_cpu=0.008,user_cpu=0.004,vol_cs=323i,write_bytes=105086i,write_count=176i 1460106256637049167 -* Plugin: dovecot, Collection 1 -> dovecot,domain=domain.test,server=dovecot-1.domain.test,type=domain clock_time=100896189179847.7,disk_input=6467588263936i,disk_output=17933680439296i,invol_cs=1194808498i,last_update="2016-04-08 11:04:08.000377367 +0200 CEST",mail_cache_hits=46455781i,mail_lookup_attr=0i,mail_lookup_path=571490i,mail_read_bytes=79287033067i,mail_read_count=491243i,maj_faults=16992i,min_faults=1278442541i,num_cmds=606005i,num_connected_sessions=6597i,num_logins=166381i,read_bytes=30231409780721i,read_count=1624912080i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=156440.372,user_cpu=216676.476,vol_cs=2749291157i,write_bytes=17097106707594i,write_count=944448998i 1460106261639672622 -* Plugin: dovecot, Collection 1 -> dovecot,server=dovecot-1.domain.test,type=global clock_time=101196971074203.94,disk_input=6493168218112i,disk_output=17978638815232i,invol_cs=1198855447i,last_update="2016-04-08 11:04:13.000379245 +0200 CEST",mail_cache_hits=68192209i,mail_lookup_attr=0i,mail_lookup_path=653861i,mail_read_bytes=86705151847i,mail_read_count=566125i,maj_faults=17208i,min_faults=1286179702i,num_cmds=917469i,num_connected_sessions=8896i,num_logins=174827i,read_bytes=30327690466186i,read_count=1772396430i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=157965.692,user_cpu=219337.48,vol_cs=2827615787i,write_bytes=17150837661940i,write_count=992653220i 1460106266642153907 +dovecot,server=dovecot-1.domain.test,type=global clock_time=101196971074203.94,disk_input=6493168218112i,disk_output=17978638815232i,invol_cs=1198855447i,last_update="2016-04-08 11:04:13.000379245 +0200 CEST",mail_cache_hits=68192209i,mail_lookup_attr=0i,mail_lookup_path=653861i,mail_read_bytes=86705151847i,mail_read_count=566125i,maj_faults=17208i,min_faults=1286179702i,num_cmds=917469i,num_connected_sessions=8896i,num_logins=174827i,read_bytes=30327690466186i,read_count=1772396430i,reset_timestamp="2016-04-08 10:28:45 +0200 CEST",sys_cpu=157965.692,user_cpu=219337.48,vol_cs=2827615787i,write_bytes=17150837661940i,write_count=992653220i 1460106266642153907 ``` + +[stats old]: http://wiki2.dovecot.org/Statistics/Old +[upgrading]: https://wiki2.dovecot.org/Upgrading/2.3#Statistics_Redesign diff --git a/plugins/inputs/dovecot/dovecot.go b/plugins/inputs/dovecot/dovecot.go index a621252e5..66282c434 100644 --- a/plugins/inputs/dovecot/dovecot.go +++ b/plugins/inputs/dovecot/dovecot.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - // "log" "net" "strconv" "strings" @@ -32,8 +31,10 @@ var sampleConfig = ` ## ## If no servers are specified, then localhost is used as the host. servers = ["localhost:24242"] + ## Type is one of "user", "domain", "ip", or "global" type = "global" + ## Wildcard matches like "*.com". An empty string "" is same as "*" ## If type = "ip" filters should be filters = [""] @@ -82,12 +83,12 @@ func (d *Dovecot) Gather(acc telegraf.Accumulator) error { func (d *Dovecot) gatherServer(addr string, acc telegraf.Accumulator, qtype string, filter string) error { _, _, err := net.SplitHostPort(addr) if err != nil { - return fmt.Errorf("Error: %s on url %s\n", err, addr) + return fmt.Errorf("%q on url %s", err.Error(), addr) } c, err := net.DialTimeout("tcp", addr, defaultTimeout) if err != nil { - return fmt.Errorf("Unable to connect to dovecot server '%s': %s", addr, err) + return fmt.Errorf("enable to connect to dovecot server '%s': %s", addr, err) } defer c.Close() diff --git a/plugins/inputs/ecs/README.md b/plugins/inputs/ecs/README.md new file mode 100644 index 000000000..f23eb8bab --- /dev/null +++ b/plugins/inputs/ecs/README.md @@ -0,0 +1,212 @@ +# Amazon ECS Input Plugin + +Amazon ECS, Fargate compatible, input plugin which uses the [Amazon ECS v2 metadata and +stats API][task-metadata-endpoint-v2] endpoints to gather stats on running +containers in a Task. + +The telegraf container must be run in the same Task as the workload it is +inspecting. + +This is similar to (and reuses a few pieces of) the [Docker][docker-input] +input plugin, with some ECS specific modifications for AWS metadata and stats +formats. + +The amazon-ecs-agent (though it _is_ a container running on the host) is not +present in the metadata/stats endpoints. + +### Configuration + +```toml +# Read metrics about ECS containers +[[inputs.ecs]] + ## ECS metadata url + # endpoint_url = "http://169.254.170.2" + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + # container_name_include = [] + # container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "RUNNING" state will be captured. + ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", + ## "RESOURCES_PROVISIONED", "STOPPED". + # container_status_include = [] + # container_status_exclude = [] + + ## ecs labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + ecs_label_include = [ "com.amazonaws.ecs.*" ] + ecs_label_exclude = [] + + ## Timeout for queries. + # timeout = "5s" +``` + +### Metrics + +- ecs_task + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - fields: + - revision (string) + - desired_status (string) + - known_status (string) + - limit_cpu (float) + - limit_mem (float) + ++ ecs_container_mem + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - fields: + - container_id + - active_anon + - active_file + - cache + - hierarchical_memory_limit + - inactive_anon + - inactive_file + - mapped_file + - pgfault + - pgmajfault + - pgpgin + - pgpgout + - rss + - rss_huge + - total_active_anon + - total_active_file + - total_cache + - total_inactive_anon + - total_inactive_file + - total_mapped_file + - total_pgfault + - total_pgmajfault + - total_pgpgin + - total_pgpgout + - total_rss + - total_rss_huge + - total_unevictable + - total_writeback + - unevictable + - writeback + - fail_count + - limit + - max_usage + - usage + - usage_percent + +- ecs_container_cpu + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - cpu + - fields: + - container_id + - usage_total + - usage_in_usermode + - usage_in_kernelmode + - usage_system + - throttling_periods + - throttling_throttled_periods + - throttling_throttled_time + - usage_percent + - usage_total + ++ ecs_container_net + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - network + - fields: + - container_id + - rx_packets + - rx_dropped + - rx_bytes + - rx_errors + - tx_packets + - tx_dropped + - tx_bytes + - tx_errors + +- ecs_container_blkio + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - device + - fields: + - container_id + - io_service_bytes_recursive_async + - io_service_bytes_recursive_read + - io_service_bytes_recursive_sync + - io_service_bytes_recursive_total + - io_service_bytes_recursive_write + - io_serviced_recursive_async + - io_serviced_recursive_read + - io_serviced_recursive_sync + - io_serviced_recursive_total + - io_serviced_recursive_write + ++ ecs_container_meta + - tags: + - cluster + - task_arn + - family + - revision + - id + - name + - fields: + - container_id + - docker_name + - image + - image_id + - desired_status + - known_status + - limit_cpu + - limit_mem + - created_at + - started_at + - type + + +### Example Output + +``` +ecs_task,cluster=test,family=nginx,host=c4b301d4a123,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a revision="2",desired_status="RUNNING",known_status="RUNNING",limit_cpu=0.5,limit_mem=512 1542641488000000000 +ecs_container_mem,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a active_anon=40960i,active_file=8192i,cache=790528i,pgpgin=1243i,total_pgfault=1298i,total_rss=40960i,limit=1033658368i,max_usage=4825088i,hierarchical_memory_limit=536870912i,rss=40960i,total_active_file=8192i,total_mapped_file=618496i,usage_percent=0.05349543109392212,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",pgfault=1298i,pgmajfault=6i,pgpgout=1040i,total_active_anon=40960i,total_inactive_file=782336i,total_pgpgin=1243i,usage=552960i,inactive_file=782336i,mapped_file=618496i,total_cache=790528i,total_pgpgout=1040i 1542642001000000000 +ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu-total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a usage_in_kernelmode=0i,throttling_throttled_periods=0i,throttling_periods=0i,throttling_throttled_time=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_percent=0,usage_total=26426156i,usage_in_usermode=20000000i,usage_system=2336100000000i 1542642001000000000 +ecs_container_cpu,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,cpu=cpu0,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",usage_total=26426156i 1542642001000000000 +ecs_container_net,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,network=eth0,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a rx_errors=0i,rx_packets=36i,tx_errors=0i,tx_bytes=648i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",rx_dropped=0i,rx_bytes=5338i,tx_packets=8i,tx_dropped=0i 1542642001000000000 +ecs_container_net,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,network=eth5,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a rx_errors=0i,tx_packets=9i,rx_packets=26i,tx_errors=0i,rx_bytes=4641i,tx_dropped=0i,tx_bytes=690i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",rx_dropped=0i 1542642001000000000 +ecs_container_net,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,network=total,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a rx_dropped=0i,rx_bytes=9979i,rx_errors=0i,rx_packets=62i,tx_bytes=1338i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",tx_packets=17i,tx_dropped=0i,tx_errors=0i 1542642001000000000 +ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=253:1,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_total=790528i,io_serviced_recursive_sync=10i,io_serviced_recursive_write=0i,io_serviced_recursive_async=0i,io_serviced_recursive_total=10i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_read=790528i,io_service_bytes_recursive_write=0i,io_service_bytes_recursive_async=0i,io_serviced_recursive_read=10i 1542642001000000000 +ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=253:2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_total=790528i,io_serviced_recursive_async=0i,io_serviced_recursive_total=10i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_read=790528i,io_service_bytes_recursive_write=0i,io_service_bytes_recursive_async=0i,io_serviced_recursive_read=10i,io_serviced_recursive_write=0i,io_serviced_recursive_sync=10i 1542642001000000000 +ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=253:4,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_service_bytes_recursive_write=0i,io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_async=0i,io_service_bytes_recursive_total=790528i,io_serviced_recursive_async=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_read=790528i,io_serviced_recursive_read=10i,io_serviced_recursive_write=0i,io_serviced_recursive_sync=10i,io_serviced_recursive_total=10i 1542642001000000000 +ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=202:26368,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_serviced_recursive_read=10i,io_serviced_recursive_write=0i,io_serviced_recursive_sync=10i,io_serviced_recursive_async=0i,io_serviced_recursive_total=10i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_sync=790528i,io_service_bytes_recursive_total=790528i,io_service_bytes_recursive_async=0i,io_service_bytes_recursive_read=790528i,io_service_bytes_recursive_write=0i 1542642001000000000 +ecs_container_blkio,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,device=total,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a io_serviced_recursive_async=0i,io_serviced_recursive_read=40i,io_serviced_recursive_sync=40i,io_serviced_recursive_write=0i,io_serviced_recursive_total=40i,io_service_bytes_recursive_read=3162112i,io_service_bytes_recursive_write=0i,io_service_bytes_recursive_async=0i,container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",io_service_bytes_recursive_sync=3162112i,io_service_bytes_recursive_total=3162112i 1542642001000000000 +ecs_container_meta,cluster=test,com.amazonaws.ecs.cluster=test,com.amazonaws.ecs.container-name=~internal~ecs~pause,com.amazonaws.ecs.task-arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a,com.amazonaws.ecs.task-definition-family=nginx,com.amazonaws.ecs.task-definition-version=2,family=nginx,host=c4b301d4a123,id=e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba,name=~internal~ecs~pause,revision=2,task_arn=arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a limit_mem=0,type="CNI_PAUSE",container_id="e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba",docker_name="ecs-nginx-2-internalecspause",limit_cpu=0,known_status="RESOURCES_PROVISIONED",image="amazon/amazon-ecs-pause:0.1.0",image_id="",desired_status="RESOURCES_PROVISIONED" 1542642001000000000 +``` + +[docker-input]: /plugins/inputs/docker/README.md +[task-metadata-endpoint-v2]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v2.html diff --git a/plugins/inputs/ecs/client.go b/plugins/inputs/ecs/client.go new file mode 100644 index 000000000..93074ad79 --- /dev/null +++ b/plugins/inputs/ecs/client.go @@ -0,0 +1,120 @@ +package ecs + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/docker/docker/api/types" +) + +var ( + ecsMetadataPath, _ = url.Parse("/v2/metadata") + ecsMetaStatsPath, _ = url.Parse("/v2/stats") +) + +// Client is the ECS client contract +type Client interface { + Task() (*Task, error) + ContainerStats() (map[string]types.StatsJSON, error) +} + +type httpClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// NewClient constructs an ECS client with the passed configuration params +func NewClient(timeout time.Duration) (*EcsClient, error) { + c := &http.Client{ + Timeout: timeout, + } + + return &EcsClient{ + client: c, + }, nil +} + +// EcsClient contains ECS connection config +type EcsClient struct { + client httpClient + BaseURL *url.URL + taskURL string + statsURL string +} + +// Task calls the ECS metadata endpoint and returns a populated Task +func (c *EcsClient) Task() (*Task, error) { + if c.taskURL == "" { + c.taskURL = c.BaseURL.ResolveReference(ecsMetadataPath).String() + } + + req, _ := http.NewRequest("GET", c.taskURL, nil) + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.taskURL, resp.Status, body) + } + + task, err := unmarshalTask(resp.Body) + if err != nil { + return nil, err + } + + return task, nil +} + +// ContainerStats calls the ECS stats endpoint and returns a populated container stats map +func (c *EcsClient) ContainerStats() (map[string]types.StatsJSON, error) { + if c.statsURL == "" { + c.statsURL = c.BaseURL.ResolveReference(ecsMetaStatsPath).String() + } + + req, _ := http.NewRequest("GET", c.statsURL, nil) + resp, err := c.client.Do(req) + if err != nil { + return map[string]types.StatsJSON{}, err + } + + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", c.statsURL, resp.Status, body) + } + + statsMap, err := unmarshalStats(resp.Body) + if err != nil { + return map[string]types.StatsJSON{}, err + } + + return statsMap, nil +} + +// PollSync executes Task and ContainerStats in parallel. If both succeed, both structs are returned. +// If either errors, a single error is returned. +func PollSync(c Client) (*Task, map[string]types.StatsJSON, error) { + + var task *Task + var stats map[string]types.StatsJSON + var err error + + if stats, err = c.ContainerStats(); err != nil { + return nil, nil, err + } + + if task, err = c.Task(); err != nil { + return nil, nil, err + } + + return task, stats, nil +} diff --git a/plugins/inputs/ecs/client_test.go b/plugins/inputs/ecs/client_test.go new file mode 100644 index 000000000..6532e5d51 --- /dev/null +++ b/plugins/inputs/ecs/client_test.go @@ -0,0 +1,240 @@ +package ecs + +import ( + "bytes" + "errors" + "io/ioutil" + "net/http" + "os" + "testing" + + "github.com/docker/docker/api/types" + "github.com/stretchr/testify/assert" +) + +type pollMock struct { + task func() (*Task, error) + stats func() (map[string]types.StatsJSON, error) +} + +func (p *pollMock) Task() (*Task, error) { + return p.task() +} + +func (p *pollMock) ContainerStats() (map[string]types.StatsJSON, error) { + return p.stats() +} + +func TestEcsClient_PollSync(t *testing.T) { + + tests := []struct { + name string + mock *pollMock + want *Task + want1 map[string]types.StatsJSON + wantErr bool + }{ + { + name: "success", + mock: &pollMock{ + task: func() (*Task, error) { + return &validMeta, nil + }, + stats: func() (map[string]types.StatsJSON, error) { + return validStats, nil + }, + }, + want: &validMeta, + want1: validStats, + }, + { + name: "task err", + mock: &pollMock{ + task: func() (*Task, error) { + return nil, errors.New("err") + }, + stats: func() (map[string]types.StatsJSON, error) { + return validStats, nil + }, + }, + wantErr: true, + }, + { + name: "stats err", + mock: &pollMock{ + task: func() (*Task, error) { + return &validMeta, nil + }, + stats: func() (map[string]types.StatsJSON, error) { + return nil, errors.New("err") + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := PollSync(tt.mock) + + if (err != nil) != tt.wantErr { + t.Errorf("EcsClient.PollSync() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got, "EcsClient.PollSync() got = %v, want %v", got, tt.want) + assert.Equal(t, tt.want1, got1, "EcsClient.PollSync() got1 = %v, want %v", got1, tt.want1) + }) + } +} + +type mockDo struct { + do func(req *http.Request) (*http.Response, error) +} + +func (m mockDo) Do(req *http.Request) (*http.Response, error) { + return m.do(req) +} + +func TestEcsClient_Task(t *testing.T) { + rc, _ := os.Open("testdata/metadata.golden") + tests := []struct { + name string + client httpClient + want *Task + wantErr bool + }{ + { + name: "happy", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(rc), + }, nil + }, + }, + want: &validMeta, + }, + { + name: "do err", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return nil, errors.New("err") + }, + }, + wantErr: true, + }, + { + name: "malformed 500 resp", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusInternalServerError, + Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + }, nil + }, + }, + wantErr: true, + }, + { + name: "malformed 200 resp", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + }, nil + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &EcsClient{ + client: tt.client, + taskURL: "abc", + } + got, err := c.Task() + if (err != nil) != tt.wantErr { + t.Errorf("EcsClient.Task() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got, "EcsClient.Task() = %v, want %v", got, tt.want) + }) + } +} + +func TestEcsClient_ContainerStats(t *testing.T) { + rc, _ := os.Open("testdata/stats.golden") + tests := []struct { + name string + client httpClient + want map[string]types.StatsJSON + wantErr bool + }{ + { + name: "happy", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(rc), + }, nil + }, + }, + want: validStats, + }, + { + name: "do err", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return nil, errors.New("err") + }, + }, + want: map[string]types.StatsJSON{}, + wantErr: true, + }, + { + name: "malformed 200 resp", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + }, nil + }, + }, + want: map[string]types.StatsJSON{}, + wantErr: true, + }, + { + name: "malformed 500 resp", + client: mockDo{ + do: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusInternalServerError, + Body: ioutil.NopCloser(bytes.NewReader([]byte("foo"))), + }, nil + }, + }, + want: nil, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &EcsClient{ + client: tt.client, + statsURL: "abc", + } + got, err := c.ContainerStats() + if (err != nil) != tt.wantErr { + t.Errorf("EcsClient.ContainerStats() error = %v, wantErr %v", err, tt.wantErr) + return + } + assert.Equal(t, tt.want, got, "EcsClient.ContainerStats() = %v, want %v", got, tt.want) + }) + } +} diff --git a/plugins/inputs/ecs/ecs.go b/plugins/inputs/ecs/ecs.go new file mode 100644 index 000000000..b3fe5f347 --- /dev/null +++ b/plugins/inputs/ecs/ecs.go @@ -0,0 +1,249 @@ +package ecs + +import ( + "net/url" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Ecs config object +type Ecs struct { + EndpointURL string `toml:"endpoint_url"` + Timeout internal.Duration + + ContainerNameInclude []string `toml:"container_name_include"` + ContainerNameExclude []string `toml:"container_name_exclude"` + + ContainerStatusInclude []string `toml:"container_status_include"` + ContainerStatusExclude []string `toml:"container_status_exclude"` + + LabelInclude []string `toml:"ecs_label_include"` + LabelExclude []string `toml:"ecs_label_exclude"` + + newClient func(timeout time.Duration) (*EcsClient, error) + + client Client + filtersCreated bool + labelFilter filter.Filter + containerNameFilter filter.Filter + statusFilter filter.Filter +} + +const ( + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB +) + +var sampleConfig = ` + ## ECS metadata url + # endpoint_url = "http://169.254.170.2" + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers + # container_name_include = [] + # container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "RUNNING" state will be captured. + ## Possible values are "NONE", "PULLED", "CREATED", "RUNNING", + ## "RESOURCES_PROVISIONED", "STOPPED". + # container_status_include = [] + # container_status_exclude = [] + + ## ecs labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags + ecs_label_include = [ "com.amazonaws.ecs.*" ] + ecs_label_exclude = [] + + ## Timeout for queries. + # timeout = "5s" +` + +// Description describes ECS plugin +func (ecs *Ecs) Description() string { + return "Read metrics about docker containers from Fargate/ECS v2 meta endpoints." +} + +// SampleConfig returns the ECS example config +func (ecs *Ecs) SampleConfig() string { + return sampleConfig +} + +// Gather is the entrypoint for telegraf metrics collection +func (ecs *Ecs) Gather(acc telegraf.Accumulator) error { + err := initSetup(ecs) + if err != nil { + return err + } + + task, stats, err := PollSync(ecs.client) + if err != nil { + return err + } + + mergeTaskStats(task, stats) + + taskTags := map[string]string{ + "cluster": task.Cluster, + "task_arn": task.TaskARN, + "family": task.Family, + "revision": task.Revision, + } + + // accumulate metrics + ecs.accTask(task, taskTags, acc) + ecs.accContainers(task, taskTags, acc) + + return nil +} + +func initSetup(ecs *Ecs) error { + if ecs.client == nil { + var err error + var c *EcsClient + c, err = ecs.newClient(ecs.Timeout.Duration) + if err != nil { + return err + } + + c.BaseURL, err = url.Parse(ecs.EndpointURL) + if err != nil { + return err + } + + ecs.client = c + } + + // Create filters + if !ecs.filtersCreated { + err := ecs.createContainerNameFilters() + if err != nil { + return err + } + err = ecs.createContainerStatusFilters() + if err != nil { + return err + } + err = ecs.createLabelFilters() + if err != nil { + return err + } + ecs.filtersCreated = true + } + + return nil +} + +func (ecs *Ecs) accTask(task *Task, tags map[string]string, acc telegraf.Accumulator) { + taskFields := map[string]interface{}{ + "revision": task.Revision, + "desired_status": task.DesiredStatus, + "known_status": task.KnownStatus, + "limit_cpu": task.Limits["CPU"], + "limit_mem": task.Limits["Memory"], + } + + acc.AddFields("ecs_task", taskFields, tags, task.PullStoppedAt) +} + +func (ecs *Ecs) accContainers(task *Task, taskTags map[string]string, acc telegraf.Accumulator) { + for _, c := range task.Containers { + if !ecs.containerNameFilter.Match(c.Name) { + continue + } + + if !ecs.statusFilter.Match(strings.ToUpper(c.KnownStatus)) { + continue + } + + // add matching ECS container Labels + containerTags := map[string]string{ + "id": c.ID, + "name": c.Name, + } + for k, v := range c.Labels { + if ecs.labelFilter.Match(k) { + containerTags[k] = v + } + } + tags := mergeTags(taskTags, containerTags) + + parseContainerStats(c, acc, tags) + } +} + +// returns a new map with the same content values as the input map +func copyTags(in map[string]string) map[string]string { + out := make(map[string]string) + for k, v := range in { + out[k] = v + } + return out +} + +// returns a new map with the merged content values of the two input maps +func mergeTags(a map[string]string, b map[string]string) map[string]string { + c := copyTags(a) + for k, v := range b { + c[k] = v + } + return c +} + +func (ecs *Ecs) createContainerNameFilters() error { + filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerNameInclude, ecs.ContainerNameExclude) + if err != nil { + return err + } + ecs.containerNameFilter = filter + return nil +} + +func (ecs *Ecs) createLabelFilters() error { + filter, err := filter.NewIncludeExcludeFilter(ecs.LabelInclude, ecs.LabelExclude) + if err != nil { + return err + } + ecs.labelFilter = filter + return nil +} + +func (ecs *Ecs) createContainerStatusFilters() error { + if len(ecs.ContainerStatusInclude) == 0 && len(ecs.ContainerStatusExclude) == 0 { + ecs.ContainerStatusInclude = []string{"RUNNING"} + } + + // ECS uses uppercase status names, normalizing for comparison. + for i, include := range ecs.ContainerStatusInclude { + ecs.ContainerStatusInclude[i] = strings.ToUpper(include) + } + for i, exclude := range ecs.ContainerStatusExclude { + ecs.ContainerStatusExclude[i] = strings.ToUpper(exclude) + } + + filter, err := filter.NewIncludeExcludeFilter(ecs.ContainerStatusInclude, ecs.ContainerStatusExclude) + if err != nil { + return err + } + ecs.statusFilter = filter + return nil +} + +func init() { + inputs.Add("ecs", func() telegraf.Input { + return &Ecs{ + EndpointURL: "http://169.254.170.2", + Timeout: internal.Duration{Duration: 5 * time.Second}, + newClient: NewClient, + filtersCreated: false, + } + }) +} diff --git a/plugins/inputs/ecs/ecs_test.go b/plugins/inputs/ecs/ecs_test.go new file mode 100644 index 000000000..b105a433f --- /dev/null +++ b/plugins/inputs/ecs/ecs_test.go @@ -0,0 +1,767 @@ +package ecs + +import ( + "time" + + "github.com/docker/docker/api/types" +) + +// codified golden objects for tests + +// stats +const pauseStatsKey = "e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba" +const nginxStatsKey = "fffe894e232d46c76475cfeabf4907f712e8b92618a37fca3ef0805bbbfb0299" + +var pauseStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.936081344Z") +var pauseStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.933000984Z") + +var nginxStatsRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:40:00.93733207Z") +var nginxStatsPreRead, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:39:59.934291009Z") + +var validStats = map[string]types.StatsJSON{ + pauseStatsKey: { + Stats: types.Stats{ + Read: pauseStatsRead, + PreRead: pauseStatsPreRead, + BlkioStats: types.BlkioStats{ + IoServiceBytesRecursive: []types.BlkioStatEntry{ + { + Major: 202, + Minor: 26368, + Op: "Read", + Value: 790528, + }, + { + Major: 202, + Minor: 26368, + Op: "Write", + }, + { + Major: 202, + Minor: 26368, + Op: "Sync", + Value: 790528, + }, + { + Major: 202, + Minor: 26368, + Op: "Async", + }, + { + Major: 202, + Minor: 26368, + Op: "Total", + Value: 790528, + }, + { + Major: 253, + Minor: 1, + Op: "Read", + Value: 790528, + }, + { + Major: 253, + Minor: 1, + Op: "Write", + }, + { + Major: 253, + Minor: 1, + Op: "Sync", + Value: 790528, + }, + { + Major: 253, + Minor: 1, + Op: "Async", + }, + { + Major: 253, + Minor: 1, + Op: "Total", + Value: 790528, + }, + { + Major: 253, + Minor: 2, + Op: "Read", + Value: 790528, + }, + { + Major: 253, + Minor: 2, + Op: "Write", + }, + { + Major: 253, + Minor: 2, + Op: "Sync", + Value: 790528, + }, + { + Major: 253, + Minor: 2, + Op: "Async", + }, + { + Major: 253, + Minor: 2, + Op: "Total", + Value: 790528, + }, + { + Major: 253, + Minor: 4, + Op: "Read", + Value: 790528, + }, + { + Major: 253, + Minor: 4, + Op: "Write", + }, + { + Major: 253, + Minor: 4, + Op: "Sync", + Value: 790528, + }, + { + Major: 253, + Minor: 4, + Op: "Async", + }, + { + Major: 253, + Minor: 4, + Op: "Total", + Value: 790528, + }, + }, + IoServicedRecursive: []types.BlkioStatEntry{ + { + Major: 202, + Minor: 26368, + Op: "Read", + Value: 10, + }, + { + Major: 202, + Minor: 26368, + Op: "Write", + }, + { + Major: 202, + Minor: 26368, + Op: "Sync", + Value: 10, + }, + { + Major: 202, + Minor: 26368, + Op: "Async", + }, + { + Major: 202, + Minor: 26368, + Op: "Total", + Value: 10, + }, + { + Major: 253, + Minor: 1, + Op: "Read", + Value: 10, + }, + { + Major: 253, + Minor: 1, + Op: "Write", + }, + { + Major: 253, + Minor: 1, + Op: "Sync", + Value: 10, + }, + { + Major: 253, + Minor: 1, + Op: "Async", + }, + { + Major: 253, + Minor: 1, + Op: "Total", + Value: 10, + }, + { + Major: 253, + Minor: 2, + Op: "Read", + Value: 10, + }, + { + Major: 253, + Minor: 2, + Op: "Write", + }, + { + Major: 253, + Minor: 2, + Op: "Sync", + Value: 10, + }, + { + Major: 253, + Minor: 2, + Op: "Async", + }, + { + Major: 253, + Minor: 2, + Op: "Total", + Value: 10, + }, + { + Major: 253, + Minor: 4, + Op: "Read", + Value: 10, + }, + { + Major: 253, + Minor: 4, + Op: "Write", + }, + { + Major: 253, + Minor: 4, + Op: "Sync", + Value: 10, + }, + { + Major: 253, + Minor: 4, + Op: "Async", + }, + { + Major: 253, + Minor: 4, + Op: "Total", + Value: 10, + }, + }, + }, + CPUStats: types.CPUStats{ + CPUUsage: types.CPUUsage{ + PercpuUsage: []uint64{ + 26426156, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + }, + UsageInUsermode: 20000000, + TotalUsage: 26426156, + }, + SystemUsage: 2336100000000, + OnlineCPUs: 1, + ThrottlingData: types.ThrottlingData{}, + }, + PreCPUStats: types.CPUStats{ + CPUUsage: types.CPUUsage{ + PercpuUsage: []uint64{ + 26426156, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + }, + UsageInUsermode: 20000000, + TotalUsage: 26426156, + }, + SystemUsage: 2335090000000, + OnlineCPUs: 1, + ThrottlingData: types.ThrottlingData{}, + }, + MemoryStats: types.MemoryStats{ + Stats: map[string]uint64{ + "cache": 790528, + "mapped_file": 618496, + "total_inactive_file": 782336, + "pgpgout": 1040, + "rss": 40960, + "total_mapped_file": 618496, + "pgpgin": 1243, + "pgmajfault": 6, + "total_rss": 40960, + "hierarchical_memory_limit": 536870912, + "total_pgfault": 1298, + "total_active_file": 8192, + "active_anon": 40960, + "total_active_anon": 40960, + "total_pgpgout": 1040, + "total_cache": 790528, + "active_file": 8192, + "pgfault": 1298, + "inactive_file": 782336, + "total_pgpgin": 1243, + "hierarchical_memsw_limit": 9223372036854772000, + }, + MaxUsage: 4825088, + Usage: 1343488, + Limit: 1033658368, + }, + }, + Networks: map[string]types.NetworkStats{ + "eth0": { + RxBytes: uint64(5338), + RxDropped: uint64(0), + RxErrors: uint64(0), + RxPackets: uint64(36), + TxBytes: uint64(648), + TxDropped: uint64(0), + TxErrors: uint64(0), + TxPackets: uint64(8), + }, + "eth5": { + RxBytes: uint64(4641), + RxDropped: uint64(0), + RxErrors: uint64(0), + RxPackets: uint64(26), + TxBytes: uint64(690), + TxDropped: uint64(0), + TxErrors: uint64(0), + TxPackets: uint64(9), + }, + }, + }, + nginxStatsKey: { + Stats: types.Stats{ + Read: nginxStatsRead, + PreRead: nginxStatsPreRead, + BlkioStats: types.BlkioStats{ + IoServiceBytesRecursive: []types.BlkioStatEntry{ + { + Major: 202, + Minor: 26368, + Op: "Read", + Value: 5730304, + }, + { + Major: 202, + Minor: 26368, + Op: "Write", + }, + { + Major: 202, + Minor: 26368, + Op: "Sync", + Value: 5730304, + }, + { + Major: 202, + Minor: 26368, + Op: "Async", + }, + { + Major: 202, + Minor: 26368, + Op: "Total", + Value: 5730304, + }, + { + Major: 253, + Minor: 1, + Op: "Read", + Value: 5730304, + }, + { + Major: 253, + Minor: 1, + Op: "Write", + }, + { + Major: 253, + Minor: 1, + Op: "Sync", + Value: 5730304, + }, + { + Major: 253, + Minor: 1, + Op: "Async", + }, + { + Major: 253, + Minor: 1, + Op: "Total", + Value: 5730304, + }, + { + Major: 253, + Minor: 2, + Op: "Read", + Value: 5730304, + }, + { + Major: 253, + Minor: 2, + Op: "Write", + }, + { + Major: 253, + Minor: 2, + Op: "Sync", + Value: 5730304, + }, + { + Major: 253, + Minor: 2, + Op: "Async", + }, + { + Major: 253, + Minor: 2, + Op: "Total", + Value: 5730304, + }, + { + Major: 253, + Minor: 5, + Op: "Read", + Value: 5730304, + }, + { + Major: 253, + Minor: 5, + Op: "Write", + }, + { + Major: 253, + Minor: 5, + Op: "Sync", + Value: 5730304, + }, + { + Major: 253, + Minor: 5, + Op: "Async", + }, + { + Major: 253, + Minor: 5, + Op: "Total", + Value: 5730304, + }, + }, + IoServicedRecursive: []types.BlkioStatEntry{ + { + Major: 202, + Minor: 26368, + Op: "Read", + Value: 156, + }, + { + Major: 202, + Minor: 26368, + Op: "Write", + }, + { + Major: 202, + Minor: 26368, + Op: "Sync", + Value: 156, + }, + { + Major: 202, + Minor: 26368, + Op: "Async", + }, + { + Major: 202, + Minor: 26368, + Op: "Total", + Value: 156, + }, + { + Major: 253, + Minor: 1, + Op: "Read", + Value: 156, + }, + { + Major: 253, + Minor: 1, + Op: "Write", + }, + { + Major: 253, + Minor: 1, + Op: "Sync", + Value: 156, + }, + { + Major: 253, + Minor: 1, + Op: "Async", + }, + { + Major: 253, + Minor: 1, + Op: "Total", + Value: 156, + }, + { + Major: 253, + Minor: 2, + Op: "Read", + Value: 156, + }, + { + Major: 253, + Minor: 2, + Op: "Write", + }, + { + Major: 253, + Minor: 2, + Op: "Sync", + Value: 156, + }, + { + Major: 253, + Minor: 2, + Op: "Async", + }, + { + Major: 253, + Minor: 2, + Op: "Total", + Value: 156, + }, + { + Major: 253, + Minor: 5, + Op: "Read", + Value: 147, + }, + { + Major: 253, + Minor: 5, + Op: "Write", + }, + { + Major: 253, + Minor: 5, + Op: "Sync", + Value: 147, + }, + { + Major: 253, + Minor: 5, + Op: "Async", + }, + { + Major: 253, + Minor: 5, + Op: "Total", + Value: 147, + }, + }, + }, + CPUStats: types.CPUStats{ + CPUUsage: types.CPUUsage{ + PercpuUsage: []uint64{ + 65599511, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + }, + UsageInUsermode: 40000000, + TotalUsage: 65599511, + UsageInKernelmode: 10000000, + }, + SystemUsage: 2336100000000, + OnlineCPUs: 1, + ThrottlingData: types.ThrottlingData{}, + }, + PreCPUStats: types.CPUStats{ + CPUUsage: types.CPUUsage{ + PercpuUsage: []uint64{ + 65599511, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + }, + UsageInUsermode: 40000000, + TotalUsage: 65599511, + UsageInKernelmode: 10000000, + }, + SystemUsage: 2335090000000, + OnlineCPUs: 1, + ThrottlingData: types.ThrottlingData{}, + }, + MemoryStats: types.MemoryStats{ + Stats: map[string]uint64{ + "cache": 5787648, + "mapped_file": 3616768, + "total_inactive_file": 4321280, + "pgpgout": 1674, + "rss": 1597440, + "total_mapped_file": 3616768, + "pgpgin": 3477, + "pgmajfault": 40, + "total_rss": 1597440, + "total_inactive_anon": 4096, + "hierarchical_memory_limit": 536870912, + "total_pgfault": 2924, + "total_active_file": 1462272, + "active_anon": 1597440, + "total_active_anon": 1597440, + "total_pgpgout": 1674, + "total_cache": 5787648, + "inactive_anon": 4096, + "active_file": 1462272, + "pgfault": 2924, + "inactive_file": 4321280, + "total_pgpgin": 3477, + "hierarchical_memsw_limit": 9223372036854772000, + }, + MaxUsage: 8667136, + Usage: 8179712, + Limit: 1033658368, + }, + }, + }, +} + +// meta +var metaPauseCreated, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:26.641964373Z") +var metaPauseStarted, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.035698679Z") +var metaCreated, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.614884084Z") +var metaStarted, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.975996351Z") +var metaPullStart, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.197327103Z") +var metaPullStop, _ = time.Parse(time.RFC3339Nano, "2018-11-19T15:31:27.609089471Z") + +var validMeta = Task{ + Cluster: "test", + TaskARN: "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + Family: "nginx", + Revision: "2", + DesiredStatus: "RUNNING", + KnownStatus: "RUNNING", + Containers: []Container{ + { + ID: pauseStatsKey, + Name: "~internal~ecs~pause", + DockerName: "ecs-nginx-2-internalecspause", + Image: "amazon/amazon-ecs-pause:0.1.0", + ImageID: "", + Labels: map[string]string{ + "com.amazonaws.ecs.cluster": "test", + "com.amazonaws.ecs.container-name": "~internal~ecs~pause", + "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + "com.amazonaws.ecs.task-definition-family": "nginx", + "com.amazonaws.ecs.task-definition-version": "2", + }, + DesiredStatus: "RESOURCES_PROVISIONED", + KnownStatus: "RESOURCES_PROVISIONED", + Limits: map[string]float64{ + "CPU": 0, + "Memory": 0, + }, + CreatedAt: metaPauseCreated, + StartedAt: metaPauseStarted, + Type: "CNI_PAUSE", + Networks: []Network{ + { + NetworkMode: "awsvpc", + IPv4Addresses: []string{ + "172.31.25.181", + }, + }, + }, + }, + { + ID: nginxStatsKey, + Name: "nginx", + DockerName: "ecs-nginx-2-nginx", + Image: "nginx:alpine", + ImageID: "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + Labels: map[string]string{ + "com.amazonaws.ecs.cluster": "test", + "com.amazonaws.ecs.container-name": "nginx", + "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + "com.amazonaws.ecs.task-definition-family": "nginx", + "com.amazonaws.ecs.task-definition-version": "2", + }, + DesiredStatus: "RUNNING", + KnownStatus: "RUNNING", + Limits: map[string]float64{ + "CPU": 0, + "Memory": 0, + }, + CreatedAt: metaCreated, + StartedAt: metaStarted, + Type: "NORMAL", + Networks: []Network{ + { + NetworkMode: "awsvpc", + IPv4Addresses: []string{ + "172.31.25.181", + }, + }, + }, + }, + }, + Limits: map[string]float64{ + "CPU": 0.5, + "Memory": 512, + }, + PullStartedAt: metaPullStart, + PullStoppedAt: metaPullStop, +} diff --git a/plugins/inputs/ecs/stats.go b/plugins/inputs/ecs/stats.go new file mode 100644 index 000000000..d2a8ee5d3 --- /dev/null +++ b/plugins/inputs/ecs/stats.go @@ -0,0 +1,295 @@ +package ecs + +import ( + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs/docker" +) + +func parseContainerStats(c Container, acc telegraf.Accumulator, tags map[string]string) { + id := c.ID + stats := c.Stats + tm := stats.Read + + if tm.Before(time.Unix(0, 0)) { + tm = time.Now() + } + + metastats(id, c, acc, tags, tm) + memstats(id, stats, acc, tags, tm) + cpustats(id, stats, acc, tags, tm) + netstats(id, stats, acc, tags, tm) + blkstats(id, stats, acc, tags, tm) +} + +func metastats(id string, c Container, acc telegraf.Accumulator, tags map[string]string, tm time.Time) { + metafields := map[string]interface{}{ + "container_id": id, + "docker_name": c.DockerName, + "image": c.Image, + "image_id": c.ImageID, + "desired_status": c.DesiredStatus, + "known_status": c.KnownStatus, + "limit_cpu": c.Limits["CPU"], + "limit_mem": c.Limits["Memory"], + "created_at": c.CreatedAt, + "started_at": c.StartedAt, + "type": c.Type, + } + + acc.AddFields("ecs_container_meta", metafields, tags, tm) +} + +func memstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) { + memfields := map[string]interface{}{ + "container_id": id, + } + + memstats := []string{ + "active_anon", + "active_file", + "cache", + "hierarchical_memory_limit", + "inactive_anon", + "inactive_file", + "mapped_file", + "pgfault", + "pgmajfault", + "pgpgin", + "pgpgout", + "rss", + "rss_huge", + "total_active_anon", + "total_active_file", + "total_cache", + "total_inactive_anon", + "total_inactive_file", + "total_mapped_file", + "total_pgfault", + "total_pgmajfault", + "total_pgpgin", + "total_pgpgout", + "total_rss", + "total_rss_huge", + "total_unevictable", + "total_writeback", + "unevictable", + "writeback", + } + + for _, field := range memstats { + if value, ok := stats.MemoryStats.Stats[field]; ok { + memfields[field] = value + } + } + if stats.MemoryStats.Failcnt != 0 { + memfields["fail_count"] = stats.MemoryStats.Failcnt + } + + memfields["limit"] = stats.MemoryStats.Limit + memfields["max_usage"] = stats.MemoryStats.MaxUsage + + mem := docker.CalculateMemUsageUnixNoCache(stats.MemoryStats) + memLimit := float64(stats.MemoryStats.Limit) + memfields["usage"] = uint64(mem) + memfields["usage_percent"] = docker.CalculateMemPercentUnixNoCache(memLimit, mem) + + acc.AddFields("ecs_container_mem", memfields, tags, tm) +} + +func cpustats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) { + cpufields := map[string]interface{}{ + "usage_total": stats.CPUStats.CPUUsage.TotalUsage, + "usage_in_usermode": stats.CPUStats.CPUUsage.UsageInUsermode, + "usage_in_kernelmode": stats.CPUStats.CPUUsage.UsageInKernelmode, + "usage_system": stats.CPUStats.SystemUsage, + "throttling_periods": stats.CPUStats.ThrottlingData.Periods, + "throttling_throttled_periods": stats.CPUStats.ThrottlingData.ThrottledPeriods, + "throttling_throttled_time": stats.CPUStats.ThrottlingData.ThrottledTime, + "container_id": id, + } + + previousCPU := stats.PreCPUStats.CPUUsage.TotalUsage + previousSystem := stats.PreCPUStats.SystemUsage + cpuPercent := docker.CalculateCPUPercentUnix(previousCPU, previousSystem, &stats) + cpufields["usage_percent"] = cpuPercent + + cputags := copyTags(tags) + cputags["cpu"] = "cpu-total" + acc.AddFields("ecs_container_cpu", cpufields, cputags, tm) + + // If we have OnlineCPUs field, then use it to restrict stats gathering to only Online CPUs + // (https://github.com/moby/moby/commit/115f91d7575d6de6c7781a96a082f144fd17e400) + var percpuusage []uint64 + if stats.CPUStats.OnlineCPUs > 0 { + percpuusage = stats.CPUStats.CPUUsage.PercpuUsage[:stats.CPUStats.OnlineCPUs] + } else { + percpuusage = stats.CPUStats.CPUUsage.PercpuUsage + } + + for i, percpu := range percpuusage { + percputags := copyTags(tags) + percputags["cpu"] = fmt.Sprintf("cpu%d", i) + fields := map[string]interface{}{ + "usage_total": percpu, + "container_id": id, + } + acc.AddFields("ecs_container_cpu", fields, percputags, tm) + } +} + +func netstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) { + totalNetworkStatMap := make(map[string]interface{}) + for network, netstats := range stats.Networks { + netfields := map[string]interface{}{ + "rx_dropped": netstats.RxDropped, + "rx_bytes": netstats.RxBytes, + "rx_errors": netstats.RxErrors, + "tx_packets": netstats.TxPackets, + "tx_dropped": netstats.TxDropped, + "rx_packets": netstats.RxPackets, + "tx_errors": netstats.TxErrors, + "tx_bytes": netstats.TxBytes, + "container_id": id, + } + + nettags := copyTags(tags) + nettags["network"] = network + acc.AddFields("ecs_container_net", netfields, nettags, tm) + + for field, value := range netfields { + if field == "container_id" { + continue + } + + var uintV uint64 + switch v := value.(type) { + case uint64: + uintV = v + case int64: + uintV = uint64(v) + default: + continue + } + + _, ok := totalNetworkStatMap[field] + if ok { + totalNetworkStatMap[field] = totalNetworkStatMap[field].(uint64) + uintV + } else { + totalNetworkStatMap[field] = uintV + } + } + } + + // totalNetworkStatMap could be empty if container is running with --net=host. + if len(totalNetworkStatMap) != 0 { + nettags := copyTags(tags) + nettags["network"] = "total" + totalNetworkStatMap["container_id"] = id + acc.AddFields("ecs_container_net", totalNetworkStatMap, nettags, tm) + } +} + +func blkstats(id string, stats types.StatsJSON, acc telegraf.Accumulator, tags map[string]string, tm time.Time) { + blkioStats := stats.BlkioStats + // Make a map of devices to their block io stats + deviceStatMap := make(map[string]map[string]interface{}) + + for _, metric := range blkioStats.IoServiceBytesRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + _, ok := deviceStatMap[device] + if !ok { + deviceStatMap[device] = make(map[string]interface{}) + } + + field := fmt.Sprintf("io_service_bytes_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoServicedRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + _, ok := deviceStatMap[device] + if !ok { + deviceStatMap[device] = make(map[string]interface{}) + } + + field := fmt.Sprintf("io_serviced_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoQueuedRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_queue_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoServiceTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_service_time_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoWaitTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_wait_time_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoMergedRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + field := fmt.Sprintf("io_merged_recursive_%s", strings.ToLower(metric.Op)) + deviceStatMap[device][field] = metric.Value + } + + for _, metric := range blkioStats.IoTimeRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + deviceStatMap[device]["io_time_recursive"] = metric.Value + } + + for _, metric := range blkioStats.SectorsRecursive { + device := fmt.Sprintf("%d:%d", metric.Major, metric.Minor) + deviceStatMap[device]["sectors_recursive"] = metric.Value + } + + totalStatMap := make(map[string]interface{}) + for device, fields := range deviceStatMap { + fields["container_id"] = id + + iotags := copyTags(tags) + iotags["device"] = device + acc.AddFields("ecs_container_blkio", fields, iotags, tm) + + for field, value := range fields { + if field == "container_id" { + continue + } + + var uintV uint64 + switch v := value.(type) { + case uint64: + uintV = v + case int64: + uintV = uint64(v) + default: + continue + } + + _, ok := totalStatMap[field] + if ok { + totalStatMap[field] = totalStatMap[field].(uint64) + uintV + } else { + totalStatMap[field] = uintV + } + + } + } + + totalStatMap["container_id"] = id + iotags := copyTags(tags) + iotags["device"] = "total" + acc.AddFields("ecs_container_blkio", totalStatMap, iotags, tm) +} diff --git a/plugins/inputs/ecs/stats_test.go b/plugins/inputs/ecs/stats_test.go new file mode 100644 index 000000000..04632ac61 --- /dev/null +++ b/plugins/inputs/ecs/stats_test.go @@ -0,0 +1,226 @@ +package ecs + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" +) + +func Test_metastats(t *testing.T) { + var mockAcc testutil.Accumulator + + tags := map[string]string{ + "test_tag": "test", + } + tm := time.Now() + + metastats(nginxStatsKey, validMeta.Containers[1], &mockAcc, tags, tm) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_meta", + map[string]interface{}{ + "container_id": nginxStatsKey, + "docker_name": "ecs-nginx-2-nginx", + "image": "nginx:alpine", + "image_id": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "desired_status": "RUNNING", + "known_status": "RUNNING", + "limit_cpu": float64(0), + "limit_mem": float64(0), + "created_at": metaCreated, + "started_at": metaStarted, + "type": "NORMAL", + }, + tags, + ) +} + +func Test_memstats(t *testing.T) { + var mockAcc testutil.Accumulator + + tags := map[string]string{ + "test_tag": "test", + } + tm := time.Now() + + memstats(nginxStatsKey, validStats[nginxStatsKey], &mockAcc, tags, tm) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_mem", + map[string]interface{}{ + "active_anon": uint64(1597440), + "active_file": uint64(1462272), + "cache": uint64(5787648), + "container_id": nginxStatsKey, + "hierarchical_memory_limit": uint64(536870912), + "inactive_anon": uint64(4096), + "inactive_file": uint64(4321280), + "limit": uint64(1033658368), + "mapped_file": uint64(3616768), + "max_usage": uint64(8667136), + "pgmajfault": uint64(40), + "pgpgin": uint64(3477), + "pgpgout": uint64(1674), + "pgfault": uint64(2924), + "rss": uint64(1597440), + "total_active_anon": uint64(1597440), + "total_active_file": uint64(1462272), + "total_cache": uint64(5787648), + "total_inactive_anon": uint64(4096), + "total_inactive_file": uint64(4321280), + "total_mapped_file": uint64(3616768), + "total_pgfault": uint64(2924), + "total_pgpgout": uint64(1674), + "total_pgpgin": uint64(3477), + "total_rss": uint64(1597440), + "usage": uint64(2392064), + "usage_percent": float64(0.23141727228778164), + }, + map[string]string{ + "test_tag": "test", + }, + ) +} + +func Test_cpustats(t *testing.T) { + var mockAcc testutil.Accumulator + + tags := map[string]string{ + "test_tag": "test", + } + tm := time.Now() + + cpustats(nginxStatsKey, validStats[nginxStatsKey], &mockAcc, tags, tm) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_cpu", + map[string]interface{}{ + "container_id": nginxStatsKey, + "throttling_periods": uint64(0), + "throttling_throttled_periods": uint64(0), + "throttling_throttled_time": uint64(0), + "usage_in_usermode": uint64(40000000), + "usage_in_kernelmode": uint64(10000000), + "usage_percent": float64(0), + "usage_system": uint64(2336100000000), + "usage_total": uint64(65599511), + }, + map[string]string{ + "test_tag": "test", + "cpu": "cpu-total", + }, + ) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_cpu", + map[string]interface{}{ + "container_id": nginxStatsKey, + "usage_total": uint64(65599511), + }, + map[string]string{ + "test_tag": "test", + "cpu": "cpu0", + }, + ) +} + +func Test_netstats(t *testing.T) { + var mockAcc testutil.Accumulator + + tags := map[string]string{ + "test_tag": "test", + } + tm := time.Now() + + netstats(pauseStatsKey, validStats[pauseStatsKey], &mockAcc, tags, tm) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_net", + map[string]interface{}{ + "container_id": pauseStatsKey, + "rx_bytes": uint64(5338), + "rx_dropped": uint64(0), + "rx_errors": uint64(0), + "rx_packets": uint64(36), + "tx_bytes": uint64(648), + "tx_dropped": uint64(0), + "tx_errors": uint64(0), + "tx_packets": uint64(8), + }, + map[string]string{ + "test_tag": "test", + "network": "eth0", + }, + ) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_net", + map[string]interface{}{ + "container_id": pauseStatsKey, + "rx_bytes": uint64(4641), + "rx_dropped": uint64(0), + "rx_errors": uint64(0), + "rx_packets": uint64(26), + "tx_bytes": uint64(690), + "tx_dropped": uint64(0), + "tx_errors": uint64(0), + "tx_packets": uint64(9), + }, + map[string]string{ + "test_tag": "test", + "network": "eth5", + }, + ) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_net", + map[string]interface{}{ + "container_id": pauseStatsKey, + "rx_bytes": uint64(9979), + "rx_dropped": uint64(0), + "rx_errors": uint64(0), + "rx_packets": uint64(62), + "tx_bytes": uint64(1338), + "tx_dropped": uint64(0), + "tx_errors": uint64(0), + "tx_packets": uint64(17), + }, + map[string]string{ + "test_tag": "test", + "network": "total", + }, + ) +} + +func Test_blkstats(t *testing.T) { + var mockAcc testutil.Accumulator + + tags := map[string]string{ + "test_tag": "test", + } + tm := time.Now() + + blkstats(nginxStatsKey, validStats[nginxStatsKey], &mockAcc, tags, tm) + mockAcc.AssertContainsTaggedFields( + t, + "ecs_container_blkio", + map[string]interface{}{ + "container_id": nginxStatsKey, + "io_service_bytes_recursive_read": uint64(5730304), + "io_service_bytes_recursive_write": uint64(0), + "io_service_bytes_recursive_sync": uint64(5730304), + "io_service_bytes_recursive_async": uint64(0), + "io_service_bytes_recursive_total": uint64(5730304), + "io_serviced_recursive_read": uint64(156), + "io_serviced_recursive_write": uint64(0), + "io_serviced_recursive_sync": uint64(156), + "io_serviced_recursive_async": uint64(0), + "io_serviced_recursive_total": uint64(156), + }, + map[string]string{ + "test_tag": "test", + "device": "202:26368", + }, + ) +} diff --git a/plugins/inputs/ecs/testdata/metadata.golden b/plugins/inputs/ecs/testdata/metadata.golden new file mode 100644 index 000000000..6823d7e5e --- /dev/null +++ b/plugins/inputs/ecs/testdata/metadata.golden @@ -0,0 +1,78 @@ +{ + "Cluster": "test", + "TaskARN": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + "Family": "nginx", + "Revision": "2", + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "Containers": [ + { + "DockerId": "e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba", + "Name": "~internal~ecs~pause", + "DockerName": "ecs-nginx-2-internalecspause", + "Image": "amazon/amazon-ecs-pause:0.1.0", + "ImageID": "", + "Labels": { + "com.amazonaws.ecs.cluster": "test", + "com.amazonaws.ecs.container-name": "~internal~ecs~pause", + "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + "com.amazonaws.ecs.task-definition-family": "nginx", + "com.amazonaws.ecs.task-definition-version": "2" + }, + "DesiredStatus": "RESOURCES_PROVISIONED", + "KnownStatus": "RESOURCES_PROVISIONED", + "Limits": { + "CPU": 0, + "Memory": 0 + }, + "CreatedAt": "2018-11-19T15:31:26.641964373Z", + "StartedAt": "2018-11-19T15:31:27.035698679Z", + "Type": "CNI_PAUSE", + "Networks": [ + { + "NetworkMode": "awsvpc", + "IPv4Addresses": [ + "172.31.25.181" + ] + } + ] + }, + { + "DockerId": "fffe894e232d46c76475cfeabf4907f712e8b92618a37fca3ef0805bbbfb0299", + "Name": "nginx", + "DockerName": "ecs-nginx-2-nginx", + "Image": "nginx:alpine", + "ImageID": "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "Labels": { + "com.amazonaws.ecs.cluster": "test", + "com.amazonaws.ecs.container-name": "nginx", + "com.amazonaws.ecs.task-arn": "arn:aws:ecs:aws-region-1:012345678901:task/a1234abc-a0a0-0a01-ab01-0abc012a0a0a", + "com.amazonaws.ecs.task-definition-family": "nginx", + "com.amazonaws.ecs.task-definition-version": "2" + }, + "DesiredStatus": "RUNNING", + "KnownStatus": "RUNNING", + "Limits": { + "CPU": 0, + "Memory": 0 + }, + "CreatedAt": "2018-11-19T15:31:27.614884084Z", + "StartedAt": "2018-11-19T15:31:27.975996351Z", + "Type": "NORMAL", + "Networks": [ + { + "NetworkMode": "awsvpc", + "IPv4Addresses": [ + "172.31.25.181" + ] + } + ] + } + ], + "Limits": { + "CPU": 0.5, + "Memory": 512 + }, + "PullStartedAt": "2018-11-19T15:31:27.197327103Z", + "PullStoppedAt": "2018-11-19T15:31:27.609089471Z" +} \ No newline at end of file diff --git a/plugins/inputs/ecs/testdata/stats.golden b/plugins/inputs/ecs/testdata/stats.golden new file mode 100644 index 000000000..791f4f0b3 --- /dev/null +++ b/plugins/inputs/ecs/testdata/stats.golden @@ -0,0 +1,663 @@ +{ + "e6af031b91deb3136a2b7c42f262ed2ab554e2fe2736998c7d8edf4afe708dba": { + "read": "2018-11-19T15:40:00.936081344Z", + "preread": "2018-11-19T15:39:59.933000984Z", + "num_procs": 0, + "pids_stats": {}, + "networks": { + "eth0": { + "rx_bytes": 5338, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 36, + "tx_bytes": 648, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 8 + }, + "eth5": { + "rx_bytes": 4641, + "rx_dropped": 0, + "rx_errors": 0, + "rx_packets": 26, + "tx_bytes": 690, + "tx_dropped": 0, + "tx_errors": 0, + "tx_packets": 9 + } + }, + "memory_stats": { + "stats": { + "cache": 790528, + "mapped_file": 618496, + "total_inactive_file": 782336, + "pgpgout": 1040, + "rss": 40960, + "total_mapped_file": 618496, + "pgpgin": 1243, + "pgmajfault": 6, + "total_rss": 40960, + "hierarchical_memory_limit": 536870912, + "total_pgfault": 1298, + "total_active_file": 8192, + "active_anon": 40960, + "total_active_anon": 40960, + "total_pgpgout": 1040, + "total_cache": 790528, + "active_file": 8192, + "pgfault": 1298, + "inactive_file": 782336, + "total_pgpgin": 1243, + "hierarchical_memsw_limit": 9223372036854772000 + }, + "max_usage": 4825088, + "usage": 1343488, + "limit": 1033658368 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 202, + "minor": 26368, + "op": "Read", + "value": 790528 + }, + { + "major": 202, + "minor": 26368, + "op": "Write" + }, + { + "major": 202, + "minor": 26368, + "op": "Sync", + "value": 790528 + }, + { + "major": 202, + "minor": 26368, + "op": "Async" + }, + { + "major": 202, + "minor": 26368, + "op": "Total", + "value": 790528 + }, + { + "major": 253, + "minor": 1, + "op": "Read", + "value": 790528 + }, + { + "major": 253, + "minor": 1, + "op": "Write" + }, + { + "major": 253, + "minor": 1, + "op": "Sync", + "value": 790528 + }, + { + "major": 253, + "minor": 1, + "op": "Async" + }, + { + "major": 253, + "minor": 1, + "op": "Total", + "value": 790528 + }, + { + "major": 253, + "minor": 2, + "op": "Read", + "value": 790528 + }, + { + "major": 253, + "minor": 2, + "op": "Write" + }, + { + "major": 253, + "minor": 2, + "op": "Sync", + "value": 790528 + }, + { + "major": 253, + "minor": 2, + "op": "Async" + }, + { + "major": 253, + "minor": 2, + "op": "Total", + "value": 790528 + }, + { + "major": 253, + "minor": 4, + "op": "Read", + "value": 790528 + }, + { + "major": 253, + "minor": 4, + "op": "Write" + }, + { + "major": 253, + "minor": 4, + "op": "Sync", + "value": 790528 + }, + { + "major": 253, + "minor": 4, + "op": "Async" + }, + { + "major": 253, + "minor": 4, + "op": "Total", + "value": 790528 + } + ], + "io_serviced_recursive": [ + { + "major": 202, + "minor": 26368, + "op": "Read", + "value": 10 + }, + { + "major": 202, + "minor": 26368, + "op": "Write" + }, + { + "major": 202, + "minor": 26368, + "op": "Sync", + "value": 10 + }, + { + "major": 202, + "minor": 26368, + "op": "Async" + }, + { + "major": 202, + "minor": 26368, + "op": "Total", + "value": 10 + }, + { + "major": 253, + "minor": 1, + "op": "Read", + "value": 10 + }, + { + "major": 253, + "minor": 1, + "op": "Write" + }, + { + "major": 253, + "minor": 1, + "op": "Sync", + "value": 10 + }, + { + "major": 253, + "minor": 1, + "op": "Async" + }, + { + "major": 253, + "minor": 1, + "op": "Total", + "value": 10 + }, + { + "major": 253, + "minor": 2, + "op": "Read", + "value": 10 + }, + { + "major": 253, + "minor": 2, + "op": "Write" + }, + { + "major": 253, + "minor": 2, + "op": "Sync", + "value": 10 + }, + { + "major": 253, + "minor": 2, + "op": "Async" + }, + { + "major": 253, + "minor": 2, + "op": "Total", + "value": 10 + }, + { + "major": 253, + "minor": 4, + "op": "Read", + "value": 10 + }, + { + "major": 253, + "minor": 4, + "op": "Write" + }, + { + "major": 253, + "minor": 4, + "op": "Sync", + "value": 10 + }, + { + "major": 253, + "minor": 4, + "op": "Async" + }, + { + "major": 253, + "minor": 4, + "op": "Total", + "value": 10 + } + ] + }, + "cpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 26426156, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "usage_in_usermode": 20000000, + "total_usage": 26426156 + }, + "system_cpu_usage": 2336100000000, + "online_cpus": 1, + "throttling_data": {} + }, + "precpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 26426156, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "usage_in_usermode": 20000000, + "total_usage": 26426156 + }, + "system_cpu_usage": 2335090000000, + "online_cpus": 1, + "throttling_data": {} + }, + "storage_stats": {} + }, + "fffe894e232d46c76475cfeabf4907f712e8b92618a37fca3ef0805bbbfb0299": { + "read": "2018-11-19T15:40:00.93733207Z", + "preread": "2018-11-19T15:39:59.934291009Z", + "num_procs": 0, + "pids_stats": {}, + "network": {}, + "memory_stats": { + "stats": { + "cache": 5787648, + "mapped_file": 3616768, + "total_inactive_file": 4321280, + "pgpgout": 1674, + "rss": 1597440, + "total_mapped_file": 3616768, + "pgpgin": 3477, + "pgmajfault": 40, + "total_rss": 1597440, + "total_inactive_anon": 4096, + "hierarchical_memory_limit": 536870912, + "total_pgfault": 2924, + "total_active_file": 1462272, + "active_anon": 1597440, + "total_active_anon": 1597440, + "total_pgpgout": 1674, + "total_cache": 5787648, + "inactive_anon": 4096, + "active_file": 1462272, + "pgfault": 2924, + "inactive_file": 4321280, + "total_pgpgin": 3477, + "hierarchical_memsw_limit": 9223372036854772000 + }, + "max_usage": 8667136, + "usage": 8179712, + "limit": 1033658368 + }, + "blkio_stats": { + "io_service_bytes_recursive": [ + { + "major": 202, + "minor": 26368, + "op": "Read", + "value": 5730304 + }, + { + "major": 202, + "minor": 26368, + "op": "Write" + }, + { + "major": 202, + "minor": 26368, + "op": "Sync", + "value": 5730304 + }, + { + "major": 202, + "minor": 26368, + "op": "Async" + }, + { + "major": 202, + "minor": 26368, + "op": "Total", + "value": 5730304 + }, + { + "major": 253, + "minor": 1, + "op": "Read", + "value": 5730304 + }, + { + "major": 253, + "minor": 1, + "op": "Write" + }, + { + "major": 253, + "minor": 1, + "op": "Sync", + "value": 5730304 + }, + { + "major": 253, + "minor": 1, + "op": "Async" + }, + { + "major": 253, + "minor": 1, + "op": "Total", + "value": 5730304 + }, + { + "major": 253, + "minor": 2, + "op": "Read", + "value": 5730304 + }, + { + "major": 253, + "minor": 2, + "op": "Write" + }, + { + "major": 253, + "minor": 2, + "op": "Sync", + "value": 5730304 + }, + { + "major": 253, + "minor": 2, + "op": "Async" + }, + { + "major": 253, + "minor": 2, + "op": "Total", + "value": 5730304 + }, + { + "major": 253, + "minor": 5, + "op": "Read", + "value": 5730304 + }, + { + "major": 253, + "minor": 5, + "op": "Write" + }, + { + "major": 253, + "minor": 5, + "op": "Sync", + "value": 5730304 + }, + { + "major": 253, + "minor": 5, + "op": "Async" + }, + { + "major": 253, + "minor": 5, + "op": "Total", + "value": 5730304 + } + ], + "io_serviced_recursive": [ + { + "major": 202, + "minor": 26368, + "op": "Read", + "value": 156 + }, + { + "major": 202, + "minor": 26368, + "op": "Write" + }, + { + "major": 202, + "minor": 26368, + "op": "Sync", + "value": 156 + }, + { + "major": 202, + "minor": 26368, + "op": "Async" + }, + { + "major": 202, + "minor": 26368, + "op": "Total", + "value": 156 + }, + { + "major": 253, + "minor": 1, + "op": "Read", + "value": 156 + }, + { + "major": 253, + "minor": 1, + "op": "Write" + }, + { + "major": 253, + "minor": 1, + "op": "Sync", + "value": 156 + }, + { + "major": 253, + "minor": 1, + "op": "Async" + }, + { + "major": 253, + "minor": 1, + "op": "Total", + "value": 156 + }, + { + "major": 253, + "minor": 2, + "op": "Read", + "value": 156 + }, + { + "major": 253, + "minor": 2, + "op": "Write" + }, + { + "major": 253, + "minor": 2, + "op": "Sync", + "value": 156 + }, + { + "major": 253, + "minor": 2, + "op": "Async" + }, + { + "major": 253, + "minor": 2, + "op": "Total", + "value": 156 + }, + { + "major": 253, + "minor": 5, + "op": "Read", + "value": 147 + }, + { + "major": 253, + "minor": 5, + "op": "Write" + }, + { + "major": 253, + "minor": 5, + "op": "Sync", + "value": 147 + }, + { + "major": 253, + "minor": 5, + "op": "Async" + }, + { + "major": 253, + "minor": 5, + "op": "Total", + "value": 147 + } + ] + }, + "cpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 65599511, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "usage_in_usermode": 40000000, + "total_usage": 65599511, + "usage_in_kernelmode": 10000000 + }, + "system_cpu_usage": 2336100000000, + "online_cpus": 1, + "throttling_data": {} + }, + "precpu_stats": { + "cpu_usage": { + "percpu_usage": [ + 65599511, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ], + "usage_in_usermode": 40000000, + "total_usage": 65599511, + "usage_in_kernelmode": 10000000 + }, + "system_cpu_usage": 2335090000000, + "online_cpus": 1, + "throttling_data": {} + }, + "storage_stats": {} + } +} diff --git a/plugins/inputs/ecs/types.go b/plugins/inputs/ecs/types.go new file mode 100644 index 000000000..0b9b402f6 --- /dev/null +++ b/plugins/inputs/ecs/types.go @@ -0,0 +1,75 @@ +package ecs + +import ( + "encoding/json" + "io" + "strings" + "time" + + "github.com/docker/docker/api/types" +) + +// Task is the ECS task representation +type Task struct { + Cluster string + TaskARN string + Family string + Revision string + DesiredStatus string + KnownStatus string + Containers []Container + Limits map[string]float64 + PullStartedAt time.Time + PullStoppedAt time.Time +} + +// Container is the ECS metadata container representation +type Container struct { + ID string `json:"DockerId"` + Name string + DockerName string + Image string + ImageID string + Labels map[string]string + DesiredStatus string + KnownStatus string + Limits map[string]float64 + CreatedAt time.Time + StartedAt time.Time + Stats types.StatsJSON + Type string + Networks []Network +} + +// Network is a docker network configuration +type Network struct { + NetworkMode string + IPv4Addresses []string +} + +func unmarshalTask(r io.Reader) (*Task, error) { + task := &Task{} + err := json.NewDecoder(r).Decode(task) + return task, err +} + +// docker parsers +func unmarshalStats(r io.Reader) (map[string]types.StatsJSON, error) { + var statsMap map[string]types.StatsJSON + err := json.NewDecoder(r).Decode(&statsMap) + return statsMap, err +} + +// interleaves Stats in to the Container objects in the Task +func mergeTaskStats(task *Task, stats map[string]types.StatsJSON) { + for i, c := range task.Containers { + if strings.Trim(c.ID, " ") == "" { + continue + } + stat, ok := stats[c.ID] + if !ok { + continue + } + task.Containers[i].Stats = stat + } +} diff --git a/plugins/inputs/ecs/types_test.go b/plugins/inputs/ecs/types_test.go new file mode 100644 index 000000000..e68e9711e --- /dev/null +++ b/plugins/inputs/ecs/types_test.go @@ -0,0 +1,47 @@ +package ecs + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_parseTask(t *testing.T) { + r, err := os.Open("testdata/metadata.golden") + require.NoError(t, err) + + parsed, err := unmarshalTask(r) + require.NoError(t, err) + + require.Equal(t, validMeta, *parsed) +} + +func Test_parseStats(t *testing.T) { + r, err := os.Open("testdata/stats.golden") + require.NoError(t, err) + + parsed, err := unmarshalStats(r) + require.NoError(t, err) + require.Equal(t, validStats, parsed) +} + +func Test_mergeTaskStats(t *testing.T) { + metadata, err := os.Open("testdata/metadata.golden") + require.NoError(t, err) + + parsedMetadata, err := unmarshalTask(metadata) + require.NoError(t, err) + + stats, err := os.Open("testdata/stats.golden") + require.NoError(t, err) + + parsedStats, err := unmarshalStats(stats) + require.NoError(t, err) + + mergeTaskStats(parsedMetadata, parsedStats) + + for _, cont := range parsedMetadata.Containers { + require.Equal(t, validStats[cont.ID], cont.Stats) + } +} diff --git a/plugins/inputs/elasticsearch/README.md b/plugins/inputs/elasticsearch/README.md index e88c3f4d6..36fd15fe8 100644 --- a/plugins/inputs/elasticsearch/README.md +++ b/plugins/inputs/elasticsearch/README.md @@ -1,15 +1,32 @@ -# Elasticsearch input plugin +# Elasticsearch Input Plugin The [elasticsearch](https://www.elastic.co/) plugin queries endpoints to obtain -[node](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) -and optionally [cluster-health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) -or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) metrics. +[Node Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) +and optionally +[Cluster-Health](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) +metrics. -### Configuration: +In addition, the following optional queries are only made by the master node: + [Cluster Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) + [Indices Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) + [Shard Stats](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-stats.html) -``` +Specific Elasticsearch endpoints that are queried: +- Node: either /_nodes/stats or /_nodes/_local/stats depending on 'local' configuration setting +- Cluster Heath: /_cluster/health?level=indices +- Cluster Stats: /_cluster/stats +- Indices Stats: /_all/_stats +- Shard Stats: /_all/_stats?level=shards + +Note that specific statistics information can change between Elasticsearch versions. In general, this plugin attempts to stay as version-generic as possible by tagging high-level categories only and using a generic json parser to make unique field names of whatever statistics names are provided at the mid-low level. + +### Configuration + +```toml [[inputs.elasticsearch]] ## specify a list of one or more Elasticsearch servers + ## you can add username and password to your url to use basic authentication: + ## servers = ["http://user:pass@localhost:9200"] servers = ["http://localhost:9200"] ## Timeout for HTTP requests to the elastic search server(s) @@ -17,27 +34,40 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre ## When local is true (the default), the node will read only its own stats. ## Set local to false when you want to read the node stats from all nodes - ## of the cluster. + ## of the cluster. local = true - ## Set cluster_health to true when you want to also obtain cluster health stats + ## Set cluster_health to true when you want to obtain cluster health stats cluster_health = false - ## Adjust cluster_health_level when you want to also obtain detailed health stats + ## Adjust cluster_health_level when you want to obtain detailed health stats ## The options are ## - indices (default) ## - cluster # cluster_health_level = "indices" - ## Set cluster_stats to true when you want to also obtain cluster stats from the - ## Master node. + ## Set cluster_stats to true when you want to obtain cluster stats. cluster_stats = false + ## Only gather cluster_stats from the master node. To work this require local = true + cluster_stats_only_from_master = true + + ## Indices to collect; can be one or more indices names or _all + indices_include = ["_all"] + + ## One of "shards", "cluster", "indices" + ## Currently only "shards" is implemented + indices_level = "shards" + ## node_stats is a list of sub-stats that you want to have gathered. Valid options ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", ## "breaker". Per default, all stats are gathered. # node_stats = ["jvm", "http"] + ## HTTP Basic Authentication username and password. + # username = "" + # password = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -46,306 +76,777 @@ or [cluster-stats](https://www.elastic.co/guide/en/elasticsearch/reference/curre # insecure_skip_verify = false ``` -### Status mappings +### Metrics -When reporting health (green/yellow/red), additional field `status_code` -is reported. Field contains mapping from status:string to status_code:int -with following rules: +Emitted when `cluster_health = true`: -* `green` - 1 -* `yellow` - 2 -* `red` - 3 -* `unknown` - 0 +- elasticsearch_cluster_health + - tags: + - name + - fields: + - active_primary_shards (integer) + - active_shards (integer) + - active_shards_percent_as_number (float) + - delayed_unassigned_shards (integer) + - initializing_shards (integer) + - number_of_data_nodes (integer) + - number_of_in_flight_fetch (integer) + - number_of_nodes (integer) + - number_of_pending_tasks (integer) + - relocating_shards (integer) + - status (string, one of green, yellow or red) + - status_code (integer, green = 1, yellow = 2, red = 3), + - task_max_waiting_in_queue_millis (integer) + - timed_out (boolean) + - unassigned_shards (integer) -### Measurements & Fields: +Emitted when `cluster_health = true` and `cluster_health_level = "indices"`: -field data circuit breaker measurement names: -- elasticsearch_breakers - - fielddata_estimated_size_in_bytes value=0 - - fielddata_overhead value=1.03 - - fielddata_tripped value=0 - - fielddata_limit_size_in_bytes value=623326003 - - request_estimated_size_in_bytes value=0 - - request_overhead value=1.0 - - request_tripped value=0 - - request_limit_size_in_bytes value=415550668 - - parent_overhead value=1.0 - - parent_tripped value=0 - - parent_limit_size_in_bytes value=727213670 - - parent_estimated_size_in_bytes value=0 +- elasticsearch_cluster_health_indices + - tags: + - index + - name + - fields: + - active_primary_shards (integer) + - active_shards (integer) + - initializing_shards (integer) + - number_of_replicas (integer) + - number_of_shards (integer) + - relocating_shards (integer) + - status (string, one of green, yellow or red) + - status_code (integer, green = 1, yellow = 2, red = 3), + - unassigned_shards (integer) -File system information, data path, free disk space, read/write measurement names: -- elasticsearch_fs - - timestamp value=1436460392946 - - total_free_in_bytes value=16909316096 - - total_available_in_bytes value=15894814720 - - total_total_in_bytes value=19507089408 +Emitted when `cluster__stats = true`: -indices size, document count, indexing and deletion times, search times, -field cache size, merges and flushes measurement names: -- elasticsearch_indices - - id_cache_memory_size_in_bytes value=0 - - completion_size_in_bytes value=0 - - suggest_total value=0 - - suggest_time_in_millis value=0 - - suggest_current value=0 - - query_cache_memory_size_in_bytes value=0 - - query_cache_evictions value=0 - - query_cache_hit_count value=0 - - query_cache_miss_count value=0 - - store_size_in_bytes value=37715234 - - store_throttle_time_in_millis value=215 - - merges_current_docs value=0 - - merges_current_size_in_bytes value=0 - - merges_total value=133 - - merges_total_time_in_millis value=21060 - - merges_total_docs value=203672 - - merges_total_size_in_bytes value=142900226 - - merges_current value=0 - - filter_cache_memory_size_in_bytes value=7384 - - filter_cache_evictions value=0 - - indexing_index_total value=84790 - - indexing_index_time_in_millis value=29680 - - indexing_index_current value=0 - - indexing_noop_update_total value=0 - - indexing_throttle_time_in_millis value=0 - - indexing_delete_tota value=13879 - - indexing_delete_time_in_millis value=1139 - - indexing_delete_current value=0 - - get_exists_time_in_millis value=0 - - get_missing_total value=1 - - get_missing_time_in_millis value=2 - - get_current value=0 - - get_total value=1 - - get_time_in_millis value=2 - - get_exists_total value=0 - - refresh_total value=1076 - - refresh_total_time_in_millis value=20078 - - percolate_current value=0 - - percolate_memory_size_in_bytes value=-1 - - percolate_queries value=0 - - percolate_total value=0 - - percolate_time_in_millis value=0 - - translog_operations value=17702 - - translog_size_in_bytes value=17 - - recovery_current_as_source value=0 - - recovery_current_as_target value=0 - - recovery_throttle_time_in_millis value=0 - - docs_count value=29652 - - docs_deleted value=5229 - - flush_total_time_in_millis value=2401 - - flush_total value=115 - - fielddata_memory_size_in_bytes value=12996 - - fielddata_evictions value=0 - - search_fetch_current value=0 - - search_open_contexts value=0 - - search_query_total value=1452 - - search_query_time_in_millis value=5695 - - search_query_current value=0 - - search_fetch_total value=414 - - search_fetch_time_in_millis value=146 - - warmer_current value=0 - - warmer_total value=2319 - - warmer_total_time_in_millis value=448 - - segments_count value=134 - - segments_memory_in_bytes value=1285212 - - segments_index_writer_memory_in_bytes value=0 - - segments_index_writer_max_memory_in_bytes value=172368955 - - segments_version_map_memory_in_bytes value=611844 - - segments_fixed_bit_set_memory_in_bytes value=0 +- elasticsearch_clusterstats_indices + - tags: + - cluster_name + - node_name + - status + - fields: + - completion_size_in_bytes (float) + - count (float) + - docs_count (float) + - docs_deleted (float) + - fielddata_evictions (float) + - fielddata_memory_size_in_bytes (float) + - query_cache_cache_count (float) + - query_cache_cache_size (float) + - query_cache_evictions (float) + - query_cache_hit_count (float) + - query_cache_memory_size_in_bytes (float) + - query_cache_miss_count (float) + - query_cache_total_count (float) + - segments_count (float) + - segments_doc_values_memory_in_bytes (float) + - segments_fixed_bit_set_memory_in_bytes (float) + - segments_index_writer_memory_in_bytes (float) + - segments_max_unsafe_auto_id_timestamp (float) + - segments_memory_in_bytes (float) + - segments_norms_memory_in_bytes (float) + - segments_points_memory_in_bytes (float) + - segments_stored_fields_memory_in_bytes (float) + - segments_term_vectors_memory_in_bytes (float) + - segments_terms_memory_in_bytes (float) + - segments_version_map_memory_in_bytes (float) + - shards_index_primaries_avg (float) + - shards_index_primaries_max (float) + - shards_index_primaries_min (float) + - shards_index_replication_avg (float) + - shards_index_replication_max (float) + - shards_index_replication_min (float) + - shards_index_shards_avg (float) + - shards_index_shards_max (float) + - shards_index_shards_min (float) + - shards_primaries (float) + - shards_replication (float) + - shards_total (float) + - store_size_in_bytes (float) -HTTP connection measurement names: -- elasticsearch_http - - current_open value=3 - - total_opened value=3 ++ elasticsearch_clusterstats_nodes + - tags: + - cluster_name + - node_name + - status + - fields: + - count_coordinating_only (float) + - count_data (float) + - count_ingest (float) + - count_master (float) + - count_total (float) + - fs_available_in_bytes (float) + - fs_free_in_bytes (float) + - fs_total_in_bytes (float) + - jvm_max_uptime_in_millis (float) + - jvm_mem_heap_max_in_bytes (float) + - jvm_mem_heap_used_in_bytes (float) + - jvm_threads (float) + - jvm_versions_0_count (float) + - jvm_versions_0_version (string) + - jvm_versions_0_vm_name (string) + - jvm_versions_0_vm_vendor (string) + - jvm_versions_0_vm_version (string) + - network_types_http_types_security4 (float) + - network_types_transport_types_security4 (float) + - os_allocated_processors (float) + - os_available_processors (float) + - os_mem_free_in_bytes (float) + - os_mem_free_percent (float) + - os_mem_total_in_bytes (float) + - os_mem_used_in_bytes (float) + - os_mem_used_percent (float) + - os_names_0_count (float) + - os_names_0_name (string) + - os_pretty_names_0_count (float) + - os_pretty_names_0_pretty_name (string) + - process_cpu_percent (float) + - process_open_file_descriptors_avg (float) + - process_open_file_descriptors_max (float) + - process_open_file_descriptors_min (float) + - versions_0 (string) -JVM stats, memory pool information, garbage collection, buffer pools measurement names: -- elasticsearch_jvm - - timestamp value=1436460392945 - - uptime_in_millis value=202245 - - mem_non_heap_used_in_bytes value=39634576 - - mem_non_heap_committed_in_bytes value=40841216 - - mem_pools_young_max_in_bytes value=279183360 - - mem_pools_young_peak_used_in_bytes value=71630848 - - mem_pools_young_peak_max_in_bytes value=279183360 - - mem_pools_young_used_in_bytes value=32685760 - - mem_pools_survivor_peak_used_in_bytes value=8912888 - - mem_pools_survivor_peak_max_in_bytes value=34865152 - - mem_pools_survivor_used_in_bytes value=8912880 - - mem_pools_survivor_max_in_bytes value=34865152 - - mem_pools_old_peak_max_in_bytes value=724828160 - - mem_pools_old_used_in_bytes value=11110928 - - mem_pools_old_max_in_bytes value=724828160 - - mem_pools_old_peak_used_in_bytes value=14354608 - - mem_heap_used_in_bytes value=52709568 - - mem_heap_used_percent value=5 - - mem_heap_committed_in_bytes value=259522560 - - mem_heap_max_in_bytes value=1038876672 - - threads_peak_count value=45 - - threads_count value=44 - - gc_collectors_young_collection_count value=2 - - gc_collectors_young_collection_time_in_millis value=98 - - gc_collectors_old_collection_count value=1 - - gc_collectors_old_collection_time_in_millis value=24 - - buffer_pools_direct_count value=40 - - buffer_pools_direct_used_in_bytes value=6304239 - - buffer_pools_direct_total_capacity_in_bytes value=6304239 - - buffer_pools_mapped_count value=0 - - buffer_pools_mapped_used_in_bytes value=0 - - buffer_pools_mapped_total_capacity_in_bytes value=0 +Emitted when the appropriate `node_stats` options are set. -TCP information measurement names: -- elasticsearch_network - - tcp_in_errs value=0 - - tcp_passive_opens value=16 - - tcp_curr_estab value=29 - - tcp_in_segs value=113 - - tcp_out_segs value=97 - - tcp_retrans_segs value=0 - - tcp_attempt_fails value=0 - - tcp_active_opens value=13 - - tcp_estab_resets value=0 - - tcp_out_rsts value=0 - -Operating system stats, load average, cpu, mem, swap measurement names: -- elasticsearch_os - - swap_used_in_bytes value=0 - - swap_free_in_bytes value=487997440 - - timestamp value=1436460392944 - - uptime_in_millis value=25092 - - cpu_sys value=0 - - cpu_user value=0 - - cpu_idle value=99 - - cpu_usage value=0 - - cpu_stolen value=0 - - mem_free_percent value=74 - - mem_used_percent value=25 - - mem_actual_free_in_bytes value=1565470720 - - mem_actual_used_in_bytes value=534159360 - - mem_free_in_bytes value=477761536 - - mem_used_in_bytes value=1621868544 - -Process statistics, memory consumption, cpu usage, open file descriptors measurement names: -- elasticsearch_process - - mem_resident_in_bytes value=246382592 - - mem_share_in_bytes value=18747392 - - mem_total_virtual_in_bytes value=4747890688 - - timestamp value=1436460392945 - - open_file_descriptors value=160 - - cpu_total_in_millis value=15480 - - cpu_percent value=2 - - cpu_sys_in_millis value=1870 - - cpu_user_in_millis value=13610 - -Statistics about each thread pool, including current size, queue and rejected tasks measurement names: -- elasticsearch_thread_pool - - merge_threads value=6 - - merge_queue value=4 - - merge_active value=5 - - merge_rejected value=2 - - merge_largest value=5 - - merge_completed value=1 - - bulk_threads value=4 - - bulk_queue value=5 - - bulk_active value=7 - - bulk_rejected value=3 - - bulk_largest value=1 - - bulk_completed value=4 - - warmer_threads value=2 - - warmer_queue value=7 - - warmer_active value=3 - - warmer_rejected value=2 - - warmer_largest value=3 - - warmer_completed value=1 - - get_largest value=2 - - get_completed value=1 - - get_threads value=1 - - get_queue value=8 - - get_active value=4 - - get_rejected value=3 - - index_threads value=6 - - index_queue value=8 - - index_active value=4 - - index_rejected value=2 - - index_largest value=3 - - index_completed value=6 - - suggest_threads value=2 - - suggest_queue value=7 - - suggest_active value=2 - - suggest_rejected value=1 - - suggest_largest value=8 - - suggest_completed value=3 - - fetch_shard_store_queue value=7 - - fetch_shard_store_active value=4 - - fetch_shard_store_rejected value=2 - - fetch_shard_store_largest value=4 - - fetch_shard_store_completed value=1 - - fetch_shard_store_threads value=1 - - management_threads value=2 - - management_queue value=3 - - management_active value=1 - - management_rejected value=6 - - management_largest value=2 - - management_completed value=22 - - percolate_queue value=23 - - percolate_active value=13 - - percolate_rejected value=235 - - percolate_largest value=23 - - percolate_completed value=33 - - percolate_threads value=123 - - listener_active value=4 - - listener_rejected value=8 - - listener_largest value=1 - - listener_completed value=1 - - listener_threads value=1 - - listener_queue value=2 - - search_rejected value=7 - - search_largest value=2 - - search_completed value=4 - - search_threads value=5 - - search_queue value=7 - - search_active value=2 - - fetch_shard_started_threads value=3 - - fetch_shard_started_queue value=1 - - fetch_shard_started_active value=5 - - fetch_shard_started_rejected value=6 - - fetch_shard_started_largest value=4 - - fetch_shard_started_completed value=54 - - refresh_rejected value=4 - - refresh_largest value=8 - - refresh_completed value=3 - - refresh_threads value=23 - - refresh_queue value=7 - - refresh_active value=3 - - optimize_threads value=3 - - optimize_queue value=4 - - optimize_active value=1 - - optimize_rejected value=2 - - optimize_largest value=7 - - optimize_completed value=3 - - snapshot_largest value=1 - - snapshot_completed value=0 - - snapshot_threads value=8 - - snapshot_queue value=5 - - snapshot_active value=6 - - snapshot_rejected value=2 - - generic_threads value=1 - - generic_queue value=4 - - generic_active value=6 - - generic_rejected value=3 - - generic_largest value=2 - - generic_completed value=27 - - flush_threads value=3 - - flush_queue value=8 - - flush_active value=0 - - flush_rejected value=1 - - flush_largest value=5 - - flush_completed value=3 - -Transport statistics about sent and received bytes in cluster communication measurement names: - elasticsearch_transport - - server_open value=13 - - rx_count value=6 - - rx_size_in_bytes value=1380 - - tx_count value=6 - - tx_size_in_bytes value=1380 + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - rx_count (float) + - rx_size_in_bytes (float) + - server_open (float) + - tx_count (float) + - tx_size_in_bytes (float) + ++ elasticsearch_breakers + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - accounting_estimated_size_in_bytes (float) + - accounting_limit_size_in_bytes (float) + - accounting_overhead (float) + - accounting_tripped (float) + - fielddata_estimated_size_in_bytes (float) + - fielddata_limit_size_in_bytes (float) + - fielddata_overhead (float) + - fielddata_tripped (float) + - in_flight_requests_estimated_size_in_bytes (float) + - in_flight_requests_limit_size_in_bytes (float) + - in_flight_requests_overhead (float) + - in_flight_requests_tripped (float) + - parent_estimated_size_in_bytes (float) + - parent_limit_size_in_bytes (float) + - parent_overhead (float) + - parent_tripped (float) + - request_estimated_size_in_bytes (float) + - request_limit_size_in_bytes (float) + - request_overhead (float) + - request_tripped (float) + +- elasticsearch_fs + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - data_0_available_in_bytes (float) + - data_0_free_in_bytes (float) + - data_0_total_in_bytes (float) + - io_stats_devices_0_operations (float) + - io_stats_devices_0_read_kilobytes (float) + - io_stats_devices_0_read_operations (float) + - io_stats_devices_0_write_kilobytes (float) + - io_stats_devices_0_write_operations (float) + - io_stats_total_operations (float) + - io_stats_total_read_kilobytes (float) + - io_stats_total_read_operations (float) + - io_stats_total_write_kilobytes (float) + - io_stats_total_write_operations (float) + - timestamp (float) + - total_available_in_bytes (float) + - total_free_in_bytes (float) + - total_total_in_bytes (float) + ++ elasticsearch_http + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - current_open (float) + - total_opened (float) + +- elasticsearch_indices + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - completion_size_in_bytes (float) + - docs_count (float) + - docs_deleted (float) + - fielddata_evictions (float) + - fielddata_memory_size_in_bytes (float) + - flush_periodic (float) + - flush_total (float) + - flush_total_time_in_millis (float) + - get_current (float) + - get_exists_time_in_millis (float) + - get_exists_total (float) + - get_missing_time_in_millis (float) + - get_missing_total (float) + - get_time_in_millis (float) + - get_total (float) + - indexing_delete_current (float) + - indexing_delete_time_in_millis (float) + - indexing_delete_total (float) + - indexing_index_current (float) + - indexing_index_failed (float) + - indexing_index_time_in_millis (float) + - indexing_index_total (float) + - indexing_noop_update_total (float) + - indexing_throttle_time_in_millis (float) + - merges_current (float) + - merges_current_docs (float) + - merges_current_size_in_bytes (float) + - merges_total (float) + - merges_total_auto_throttle_in_bytes (float) + - merges_total_docs (float) + - merges_total_size_in_bytes (float) + - merges_total_stopped_time_in_millis (float) + - merges_total_throttled_time_in_millis (float) + - merges_total_time_in_millis (float) + - query_cache_cache_count (float) + - query_cache_cache_size (float) + - query_cache_evictions (float) + - query_cache_hit_count (float) + - query_cache_memory_size_in_bytes (float) + - query_cache_miss_count (float) + - query_cache_total_count (float) + - recovery_current_as_source (float) + - recovery_current_as_target (float) + - recovery_throttle_time_in_millis (float) + - refresh_listeners (float) + - refresh_total (float) + - refresh_total_time_in_millis (float) + - request_cache_evictions (float) + - request_cache_hit_count (float) + - request_cache_memory_size_in_bytes (float) + - request_cache_miss_count (float) + - search_fetch_current (float) + - search_fetch_time_in_millis (float) + - search_fetch_total (float) + - search_open_contexts (float) + - search_query_current (float) + - search_query_time_in_millis (float) + - search_query_total (float) + - search_scroll_current (float) + - search_scroll_time_in_millis (float) + - search_scroll_total (float) + - search_suggest_current (float) + - search_suggest_time_in_millis (float) + - search_suggest_total (float) + - segments_count (float) + - segments_doc_values_memory_in_bytes (float) + - segments_fixed_bit_set_memory_in_bytes (float) + - segments_index_writer_memory_in_bytes (float) + - segments_max_unsafe_auto_id_timestamp (float) + - segments_memory_in_bytes (float) + - segments_norms_memory_in_bytes (float) + - segments_points_memory_in_bytes (float) + - segments_stored_fields_memory_in_bytes (float) + - segments_term_vectors_memory_in_bytes (float) + - segments_terms_memory_in_bytes (float) + - segments_version_map_memory_in_bytes (float) + - store_size_in_bytes (float) + - translog_earliest_last_modified_age (float) + - translog_operations (float) + - translog_size_in_bytes (float) + - translog_uncommitted_operations (float) + - translog_uncommitted_size_in_bytes (float) + - warmer_current (float) + - warmer_total (float) + - warmer_total_time_in_millis (float) + ++ elasticsearch_jvm + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - buffer_pools_direct_count (float) + - buffer_pools_direct_total_capacity_in_bytes (float) + - buffer_pools_direct_used_in_bytes (float) + - buffer_pools_mapped_count (float) + - buffer_pools_mapped_total_capacity_in_bytes (float) + - buffer_pools_mapped_used_in_bytes (float) + - classes_current_loaded_count (float) + - classes_total_loaded_count (float) + - classes_total_unloaded_count (float) + - gc_collectors_old_collection_count (float) + - gc_collectors_old_collection_time_in_millis (float) + - gc_collectors_young_collection_count (float) + - gc_collectors_young_collection_time_in_millis (float) + - mem_heap_committed_in_bytes (float) + - mem_heap_max_in_bytes (float) + - mem_heap_used_in_bytes (float) + - mem_heap_used_percent (float) + - mem_non_heap_committed_in_bytes (float) + - mem_non_heap_used_in_bytes (float) + - mem_pools_old_max_in_bytes (float) + - mem_pools_old_peak_max_in_bytes (float) + - mem_pools_old_peak_used_in_bytes (float) + - mem_pools_old_used_in_bytes (float) + - mem_pools_survivor_max_in_bytes (float) + - mem_pools_survivor_peak_max_in_bytes (float) + - mem_pools_survivor_peak_used_in_bytes (float) + - mem_pools_survivor_used_in_bytes (float) + - mem_pools_young_max_in_bytes (float) + - mem_pools_young_peak_max_in_bytes (float) + - mem_pools_young_peak_used_in_bytes (float) + - mem_pools_young_used_in_bytes (float) + - threads_count (float) + - threads_peak_count (float) + - timestamp (float) + - uptime_in_millis (float) + +- elasticsearch_os + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - cgroup_cpu_cfs_period_micros (float) + - cgroup_cpu_cfs_quota_micros (float) + - cgroup_cpu_stat_number_of_elapsed_periods (float) + - cgroup_cpu_stat_number_of_times_throttled (float) + - cgroup_cpu_stat_time_throttled_nanos (float) + - cgroup_cpuacct_usage_nanos (float) + - cpu_load_average_15m (float) + - cpu_load_average_1m (float) + - cpu_load_average_5m (float) + - cpu_percent (float) + - mem_free_in_bytes (float) + - mem_free_percent (float) + - mem_total_in_bytes (float) + - mem_used_in_bytes (float) + - mem_used_percent (float) + - swap_free_in_bytes (float) + - swap_total_in_bytes (float) + - swap_used_in_bytes (float) + - timestamp (float) + ++ elasticsearch_process + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - cpu_percent (float) + - cpu_total_in_millis (float) + - max_file_descriptors (float) + - mem_total_virtual_in_bytes (float) + - open_file_descriptors (float) + - timestamp (float) + +- elasticsearch_thread_pool + - tags: + - cluster_name + - node_attribute_ml.enabled + - node_attribute_ml.machine_memory + - node_attribute_ml.max_open_jobs + - node_attribute_xpack.installed + - node_host + - node_id + - node_name + - fields: + - analyze_active (float) + - analyze_completed (float) + - analyze_largest (float) + - analyze_queue (float) + - analyze_rejected (float) + - analyze_threads (float) + - ccr_active (float) + - ccr_completed (float) + - ccr_largest (float) + - ccr_queue (float) + - ccr_rejected (float) + - ccr_threads (float) + - fetch_shard_started_active (float) + - fetch_shard_started_completed (float) + - fetch_shard_started_largest (float) + - fetch_shard_started_queue (float) + - fetch_shard_started_rejected (float) + - fetch_shard_started_threads (float) + - fetch_shard_store_active (float) + - fetch_shard_store_completed (float) + - fetch_shard_store_largest (float) + - fetch_shard_store_queue (float) + - fetch_shard_store_rejected (float) + - fetch_shard_store_threads (float) + - flush_active (float) + - flush_completed (float) + - flush_largest (float) + - flush_queue (float) + - flush_rejected (float) + - flush_threads (float) + - force_merge_active (float) + - force_merge_completed (float) + - force_merge_largest (float) + - force_merge_queue (float) + - force_merge_rejected (float) + - force_merge_threads (float) + - generic_active (float) + - generic_completed (float) + - generic_largest (float) + - generic_queue (float) + - generic_rejected (float) + - generic_threads (float) + - get_active (float) + - get_completed (float) + - get_largest (float) + - get_queue (float) + - get_rejected (float) + - get_threads (float) + - index_active (float) + - index_completed (float) + - index_largest (float) + - index_queue (float) + - index_rejected (float) + - index_threads (float) + - listener_active (float) + - listener_completed (float) + - listener_largest (float) + - listener_queue (float) + - listener_rejected (float) + - listener_threads (float) + - management_active (float) + - management_completed (float) + - management_largest (float) + - management_queue (float) + - management_rejected (float) + - management_threads (float) + - ml_autodetect_active (float) + - ml_autodetect_completed (float) + - ml_autodetect_largest (float) + - ml_autodetect_queue (float) + - ml_autodetect_rejected (float) + - ml_autodetect_threads (float) + - ml_datafeed_active (float) + - ml_datafeed_completed (float) + - ml_datafeed_largest (float) + - ml_datafeed_queue (float) + - ml_datafeed_rejected (float) + - ml_datafeed_threads (float) + - ml_utility_active (float) + - ml_utility_completed (float) + - ml_utility_largest (float) + - ml_utility_queue (float) + - ml_utility_rejected (float) + - ml_utility_threads (float) + - refresh_active (float) + - refresh_completed (float) + - refresh_largest (float) + - refresh_queue (float) + - refresh_rejected (float) + - refresh_threads (float) + - rollup_indexing_active (float) + - rollup_indexing_completed (float) + - rollup_indexing_largest (float) + - rollup_indexing_queue (float) + - rollup_indexing_rejected (float) + - rollup_indexing_threads (float) + - search_active (float) + - search_completed (float) + - search_largest (float) + - search_queue (float) + - search_rejected (float) + - search_threads (float) + - search_throttled_active (float) + - search_throttled_completed (float) + - search_throttled_largest (float) + - search_throttled_queue (float) + - search_throttled_rejected (float) + - search_throttled_threads (float) + - security-token-key_active (float) + - security-token-key_completed (float) + - security-token-key_largest (float) + - security-token-key_queue (float) + - security-token-key_rejected (float) + - security-token-key_threads (float) + - snapshot_active (float) + - snapshot_completed (float) + - snapshot_largest (float) + - snapshot_queue (float) + - snapshot_rejected (float) + - snapshot_threads (float) + - warmer_active (float) + - warmer_completed (float) + - warmer_largest (float) + - warmer_queue (float) + - warmer_rejected (float) + - warmer_threads (float) + - watcher_active (float) + - watcher_completed (float) + - watcher_largest (float) + - watcher_queue (float) + - watcher_rejected (float) + - watcher_threads (float) + - write_active (float) + - write_completed (float) + - write_largest (float) + - write_queue (float) + - write_rejected (float) + - write_threads (float) + +Emitted when the appropriate `indices_stats` options are set. + +- elasticsearch_indices_stats_(primaries|total) + - tags: + - index_name + - fields: + - completion_size_in_bytes (float) + - docs_count (float) + - docs_deleted (float) + - fielddata_evictions (float) + - fielddata_memory_size_in_bytes (float) + - flush_periodic (float) + - flush_total (float) + - flush_total_time_in_millis (float) + - get_current (float) + - get_exists_time_in_millis (float) + - get_exists_total (float) + - get_missing_time_in_millis (float) + - get_missing_total (float) + - get_time_in_millis (float) + - get_total (float) + - indexing_delete_current (float) + - indexing_delete_time_in_millis (float) + - indexing_delete_total (float) + - indexing_index_current (float) + - indexing_index_failed (float) + - indexing_index_time_in_millis (float) + - indexing_index_total (float) + - indexing_is_throttled (float) + - indexing_noop_update_total (float) + - indexing_throttle_time_in_millis (float) + - merges_current (float) + - merges_current_docs (float) + - merges_current_size_in_bytes (float) + - merges_total (float) + - merges_total_auto_throttle_in_bytes (float) + - merges_total_docs (float) + - merges_total_size_in_bytes (float) + - merges_total_stopped_time_in_millis (float) + - merges_total_throttled_time_in_millis (float) + - merges_total_time_in_millis (float) + - query_cache_cache_count (float) + - query_cache_cache_size (float) + - query_cache_evictions (float) + - query_cache_hit_count (float) + - query_cache_memory_size_in_bytes (float) + - query_cache_miss_count (float) + - query_cache_total_count (float) + - recovery_current_as_source (float) + - recovery_current_as_target (float) + - recovery_throttle_time_in_millis (float) + - refresh_external_total (float) + - refresh_external_total_time_in_millis (float) + - refresh_listeners (float) + - refresh_total (float) + - refresh_total_time_in_millis (float) + - request_cache_evictions (float) + - request_cache_hit_count (float) + - request_cache_memory_size_in_bytes (float) + - request_cache_miss_count (float) + - search_fetch_current (float) + - search_fetch_time_in_millis (float) + - search_fetch_total (float) + - search_open_contexts (float) + - search_query_current (float) + - search_query_time_in_millis (float) + - search_query_total (float) + - search_scroll_current (float) + - search_scroll_time_in_millis (float) + - search_scroll_total (float) + - search_suggest_current (float) + - search_suggest_time_in_millis (float) + - search_suggest_total (float) + - segments_count (float) + - segments_doc_values_memory_in_bytes (float) + - segments_fixed_bit_set_memory_in_bytes (float) + - segments_index_writer_memory_in_bytes (float) + - segments_max_unsafe_auto_id_timestamp (float) + - segments_memory_in_bytes (float) + - segments_norms_memory_in_bytes (float) + - segments_points_memory_in_bytes (float) + - segments_stored_fields_memory_in_bytes (float) + - segments_term_vectors_memory_in_bytes (float) + - segments_terms_memory_in_bytes (float) + - segments_version_map_memory_in_bytes (float) + - store_size_in_bytes (float) + - translog_earliest_last_modified_age (float) + - translog_operations (float) + - translog_size_in_bytes (float) + - translog_uncommitted_operations (float) + - translog_uncommitted_size_in_bytes (float) + - warmer_current (float) + - warmer_total (float) + - warmer_total_time_in_millis (float) + +Emitted when the appropriate `shards_stats` options are set. + +- elasticsearch_indices_stats_shards_total + - fields: + - failed (float) + - successful (float) + - total (float) + +- elasticsearch_indices_stats_shards + - tags: + - index_name + - node_name + - shard_name + - type + - fields: + - commit_generation (float) + - commit_num_docs (float) + - completion_size_in_bytes (float) + - docs_count (float) + - docs_deleted (float) + - fielddata_evictions (float) + - fielddata_memory_size_in_bytes (float) + - flush_periodic (float) + - flush_total (float) + - flush_total_time_in_millis (float) + - get_current (float) + - get_exists_time_in_millis (float) + - get_exists_total (float) + - get_missing_time_in_millis (float) + - get_missing_total (float) + - get_time_in_millis (float) + - get_total (float) + - indexing_delete_current (float) + - indexing_delete_time_in_millis (float) + - indexing_delete_total (float) + - indexing_index_current (float) + - indexing_index_failed (float) + - indexing_index_time_in_millis (float) + - indexing_index_total (float) + - indexing_is_throttled (bool) + - indexing_noop_update_total (float) + - indexing_throttle_time_in_millis (float) + - merges_current (float) + - merges_current_docs (float) + - merges_current_size_in_bytes (float) + - merges_total (float) + - merges_total_auto_throttle_in_bytes (float) + - merges_total_docs (float) + - merges_total_size_in_bytes (float) + - merges_total_stopped_time_in_millis (float) + - merges_total_throttled_time_in_millis (float) + - merges_total_time_in_millis (float) + - query_cache_cache_count (float) + - query_cache_cache_size (float) + - query_cache_evictions (float) + - query_cache_hit_count (float) + - query_cache_memory_size_in_bytes (float) + - query_cache_miss_count (float) + - query_cache_total_count (float) + - recovery_current_as_source (float) + - recovery_current_as_target (float) + - recovery_throttle_time_in_millis (float) + - refresh_external_total (float) + - refresh_external_total_time_in_millis (float) + - refresh_listeners (float) + - refresh_total (float) + - refresh_total_time_in_millis (float) + - request_cache_evictions (float) + - request_cache_hit_count (float) + - request_cache_memory_size_in_bytes (float) + - request_cache_miss_count (float) + - retention_leases_primary_term (float) + - retention_leases_version (float) + - routing_state (int) (UNASSIGNED = 1, INITIALIZING = 2, STARTED = 3, RELOCATING = 4, other = 0) + - search_fetch_current (float) + - search_fetch_time_in_millis (float) + - search_fetch_total (float) + - search_open_contexts (float) + - search_query_current (float) + - search_query_time_in_millis (float) + - search_query_total (float) + - search_scroll_current (float) + - search_scroll_time_in_millis (float) + - search_scroll_total (float) + - search_suggest_current (float) + - search_suggest_time_in_millis (float) + - search_suggest_total (float) + - segments_count (float) + - segments_doc_values_memory_in_bytes (float) + - segments_fixed_bit_set_memory_in_bytes (float) + - segments_index_writer_memory_in_bytes (float) + - segments_max_unsafe_auto_id_timestamp (float) + - segments_memory_in_bytes (float) + - segments_norms_memory_in_bytes (float) + - segments_points_memory_in_bytes (float) + - segments_stored_fields_memory_in_bytes (float) + - segments_term_vectors_memory_in_bytes (float) + - segments_terms_memory_in_bytes (float) + - segments_version_map_memory_in_bytes (float) + - seq_no_global_checkpoint (float) + - seq_no_local_checkpoint (float) + - seq_no_max_seq_no (float) + - shard_path_is_custom_data_path (bool) + - store_size_in_bytes (float) + - translog_earliest_last_modified_age (float) + - translog_operations (float) + - translog_size_in_bytes (float) + - translog_uncommitted_operations (float) + - translog_uncommitted_size_in_bytes (float) + - warmer_current (float) + - warmer_total (float) + - warmer_total_time_in_millis (float) \ No newline at end of file diff --git a/plugins/inputs/elasticsearch/elasticsearch.go b/plugins/inputs/elasticsearch/elasticsearch.go index 9875b68aa..65d76c3ae 100644 --- a/plugins/inputs/elasticsearch/elasticsearch.go +++ b/plugins/inputs/elasticsearch/elasticsearch.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "net/http" "regexp" + "sort" "strings" "sync" "time" @@ -27,6 +28,7 @@ const statsPathLocal = "/_nodes/_local/stats" type nodeStat struct { Host string `json:"host"` Name string `json:"name"` + Roles []string `json:"roles"` Attributes map[string]string `json:"attributes"` Indices interface{} `json:"indices"` OS interface{} `json:"os"` @@ -40,30 +42,32 @@ type nodeStat struct { } type clusterHealth struct { - ClusterName string `json:"cluster_name"` - Status string `json:"status"` - TimedOut bool `json:"timed_out"` - NumberOfNodes int `json:"number_of_nodes"` - NumberOfDataNodes int `json:"number_of_data_nodes"` ActivePrimaryShards int `json:"active_primary_shards"` ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` - InitializingShards int `json:"initializing_shards"` - UnassignedShards int `json:"unassigned_shards"` - NumberOfPendingTasks int `json:"number_of_pending_tasks"` - TaskMaxWaitingInQueueMillis int `json:"task_max_waiting_in_queue_millis"` ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` + ClusterName string `json:"cluster_name"` + DelayedUnassignedShards int `json:"delayed_unassigned_shards"` + InitializingShards int `json:"initializing_shards"` + NumberOfDataNodes int `json:"number_of_data_nodes"` + NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` + NumberOfNodes int `json:"number_of_nodes"` + NumberOfPendingTasks int `json:"number_of_pending_tasks"` + RelocatingShards int `json:"relocating_shards"` + Status string `json:"status"` + TaskMaxWaitingInQueueMillis int `json:"task_max_waiting_in_queue_millis"` + TimedOut bool `json:"timed_out"` + UnassignedShards int `json:"unassigned_shards"` Indices map[string]indexHealth `json:"indices"` } type indexHealth struct { - Status string `json:"status"` - NumberOfShards int `json:"number_of_shards"` - NumberOfReplicas int `json:"number_of_replicas"` ActivePrimaryShards int `json:"active_primary_shards"` ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` InitializingShards int `json:"initializing_shards"` + NumberOfReplicas int `json:"number_of_replicas"` + NumberOfShards int `json:"number_of_shards"` + RelocatingShards int `json:"relocating_shards"` + Status string `json:"status"` UnassignedShards int `json:"unassigned_shards"` } @@ -75,10 +79,10 @@ type clusterStats struct { Nodes interface{} `json:"nodes"` } -type catMaster struct { - NodeID string `json:"id"` - NodeIP string `json:"ip"` - NodeName string `json:"node"` +type indexStat struct { + Primaries interface{} `json:"primaries"` + Total interface{} `json:"total"` + Shards map[string][]interface{} `json:"shards"` } const sampleConfig = ` @@ -104,15 +108,27 @@ const sampleConfig = ` ## - cluster # cluster_health_level = "indices" - ## Set cluster_stats to true when you want to also obtain cluster stats from the - ## Master node. + ## Set cluster_stats to true when you want to also obtain cluster stats. cluster_stats = false + ## Only gather cluster_stats from the master node. To work this require local = true + cluster_stats_only_from_master = true + + ## Indices to collect; can be one or more indices names or _all + indices_include = ["_all"] + + ## One of "shards", "cluster", "indices" + indices_level = "shards" + ## node_stats is a list of sub-stats that you want to have gathered. Valid options ## are "indices", "os", "process", "jvm", "thread_pool", "fs", "transport", "http", ## "breaker". Per default, all stats are gathered. # node_stats = ["jvm", "http"] + ## HTTP Basic Authentication username and password. + # username = "" + # password = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -124,25 +140,39 @@ const sampleConfig = ` // Elasticsearch is a plugin to read stats from one or many Elasticsearch // servers. type Elasticsearch struct { - Local bool - Servers []string - HttpTimeout internal.Duration - ClusterHealth bool - ClusterHealthLevel string - ClusterStats bool - NodeStats []string + Local bool `toml:"local"` + Servers []string `toml:"servers"` + HTTPTimeout internal.Duration `toml:"http_timeout"` + ClusterHealth bool `toml:"cluster_health"` + ClusterHealthLevel string `toml:"cluster_health_level"` + ClusterStats bool `toml:"cluster_stats"` + ClusterStatsOnlyFromMaster bool `toml:"cluster_stats_only_from_master"` + IndicesInclude []string `toml:"indices_include"` + IndicesLevel string `toml:"indices_level"` + NodeStats []string `toml:"node_stats"` + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig - client *http.Client - catMasterResponseTokens []string - isMaster bool + client *http.Client + serverInfo map[string]serverInfo + serverInfoMutex sync.Mutex +} +type serverInfo struct { + nodeID string + masterID string +} + +func (i serverInfo) isMaster() bool { + return i.nodeID == i.masterID } // NewElasticsearch return a new instance of Elasticsearch func NewElasticsearch() *Elasticsearch { return &Elasticsearch{ - HttpTimeout: internal.Duration{Duration: time.Second * 5}, - ClusterHealthLevel: "indices", + HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + ClusterStatsOnlyFromMaster: true, + ClusterHealthLevel: "indices", } } @@ -159,6 +189,21 @@ func mapHealthStatusToCode(s string) int { return 0 } +// perform shard status mapping +func mapShardStatusToCode(s string) int { + switch strings.ToUpper(s) { + case "UNASSIGNED": + return 1 + case "INITIALIZING": + return 2 + case "STARTED": + return 3 + case "RELOCATING": + return 4 + } + return 0 +} + // SampleConfig returns sample configuration for this plugin. func (e *Elasticsearch) SampleConfig() string { return sampleConfig @@ -173,7 +218,7 @@ func (e *Elasticsearch) Description() string { // Accumulator. func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { if e.client == nil { - client, err := e.createHttpClient() + client, err := e.createHTTPClient() if err != nil { return err @@ -181,25 +226,49 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { e.client = client } + if e.ClusterStats || len(e.IndicesInclude) > 0 || len(e.IndicesLevel) > 0 { + var wgC sync.WaitGroup + wgC.Add(len(e.Servers)) + + e.serverInfo = make(map[string]serverInfo) + for _, serv := range e.Servers { + go func(s string, acc telegraf.Accumulator) { + defer wgC.Done() + info := serverInfo{} + + var err error + + // Gather node ID + if info.nodeID, err = e.gatherNodeID(s + "/_nodes/_local/name"); err != nil { + acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) + return + } + + // get cat/master information here so NodeStats can determine + // whether this node is the Master + if info.masterID, err = e.getCatMaster(s + "/_cat/master"); err != nil { + acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) + return + } + + e.serverInfoMutex.Lock() + e.serverInfo[s] = info + e.serverInfoMutex.Unlock() + + }(serv, acc) + } + wgC.Wait() + } + var wg sync.WaitGroup wg.Add(len(e.Servers)) for _, serv := range e.Servers { go func(s string, acc telegraf.Accumulator) { defer wg.Done() - url := e.nodeStatsUrl(s) - e.isMaster = false + url := e.nodeStatsURL(s) - if e.ClusterStats { - // get cat/master information here so NodeStats can determine - // whether this node is the Master - if err := e.setCatMaster(s + "/_cat/master"); err != nil { - acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) - return - } - } - - // Always gather node states + // Always gather node stats if err := e.gatherNodeStats(url, acc); err != nil { acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) return @@ -216,12 +285,26 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { } } - if e.ClusterStats && e.isMaster { + if e.ClusterStats && (e.serverInfo[s].isMaster() || !e.ClusterStatsOnlyFromMaster || !e.Local) { if err := e.gatherClusterStats(s+"/_cluster/stats", acc); err != nil { acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) return } } + + if len(e.IndicesInclude) > 0 && (e.serverInfo[s].isMaster() || !e.ClusterStatsOnlyFromMaster || !e.Local) { + if e.IndicesLevel != "shards" { + if err := e.gatherIndicesStats(s+"/"+strings.Join(e.IndicesInclude, ",")+"/_stats", acc); err != nil { + acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) + return + } + } else { + if err := e.gatherIndicesStats(s+"/"+strings.Join(e.IndicesInclude, ",")+"/_stats?level=shards", acc); err != nil { + acc.AddError(fmt.Errorf(mask.ReplaceAllString(err.Error(), "http(s)://XXX:XXX@"))) + return + } + } + } }(serv, acc) } @@ -229,30 +312,30 @@ func (e *Elasticsearch) Gather(acc telegraf.Accumulator) error { return nil } -func (e *Elasticsearch) createHttpClient() (*http.Client, error) { +func (e *Elasticsearch) createHTTPClient() (*http.Client, error) { tlsCfg, err := e.ClientConfig.TLSConfig() if err != nil { return nil, err } tr := &http.Transport{ - ResponseHeaderTimeout: e.HttpTimeout.Duration, + ResponseHeaderTimeout: e.HTTPTimeout.Duration, TLSClientConfig: tlsCfg, } client := &http.Client{ Transport: tr, - Timeout: e.HttpTimeout.Duration, + Timeout: e.HTTPTimeout.Duration, } return client, nil } -func (e *Elasticsearch) nodeStatsUrl(baseUrl string) string { +func (e *Elasticsearch) nodeStatsURL(baseURL string) string { var url string if e.Local { - url = baseUrl + statsPathLocal + url = baseURL + statsPathLocal } else { - url = baseUrl + statsPath + url = baseURL + statsPath } if len(e.NodeStats) == 0 { @@ -262,26 +345,39 @@ func (e *Elasticsearch) nodeStatsUrl(baseUrl string) string { return fmt.Sprintf("%s/%s", url, strings.Join(e.NodeStats, ",")) } +func (e *Elasticsearch) gatherNodeID(url string) (string, error) { + nodeStats := &struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]*nodeStat `json:"nodes"` + }{} + if err := e.gatherJSONData(url, nodeStats); err != nil { + return "", err + } + + // Only 1 should be returned + for id := range nodeStats.Nodes { + return id, nil + } + return "", nil +} + func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) error { nodeStats := &struct { ClusterName string `json:"cluster_name"` Nodes map[string]*nodeStat `json:"nodes"` }{} - if err := e.gatherJsonData(url, nodeStats); err != nil { + if err := e.gatherJSONData(url, nodeStats); err != nil { return err } for id, n := range nodeStats.Nodes { + sort.Strings(n.Roles) tags := map[string]string{ "node_id": id, "node_host": n.Host, "node_name": n.Name, "cluster_name": nodeStats.ClusterName, - } - - if e.ClusterStats { - // check for master - e.isMaster = (id == e.catMasterResponseTokens[0]) + "node_roles": strings.Join(n.Roles, ","), } for k, v := range n.Attributes { @@ -321,24 +417,26 @@ func (e *Elasticsearch) gatherNodeStats(url string, acc telegraf.Accumulator) er func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator) error { healthStats := &clusterHealth{} - if err := e.gatherJsonData(url, healthStats); err != nil { + if err := e.gatherJSONData(url, healthStats); err != nil { return err } measurementTime := time.Now() clusterFields := map[string]interface{}{ - "status": healthStats.Status, - "status_code": mapHealthStatusToCode(healthStats.Status), - "timed_out": healthStats.TimedOut, - "number_of_nodes": healthStats.NumberOfNodes, - "number_of_data_nodes": healthStats.NumberOfDataNodes, "active_primary_shards": healthStats.ActivePrimaryShards, "active_shards": healthStats.ActiveShards, - "relocating_shards": healthStats.RelocatingShards, - "initializing_shards": healthStats.InitializingShards, - "unassigned_shards": healthStats.UnassignedShards, - "number_of_pending_tasks": healthStats.NumberOfPendingTasks, - "task_max_waiting_in_queue_millis": healthStats.TaskMaxWaitingInQueueMillis, "active_shards_percent_as_number": healthStats.ActiveShardsPercentAsNumber, + "delayed_unassigned_shards": healthStats.DelayedUnassignedShards, + "initializing_shards": healthStats.InitializingShards, + "number_of_data_nodes": healthStats.NumberOfDataNodes, + "number_of_in_flight_fetch": healthStats.NumberOfInFlightFetch, + "number_of_nodes": healthStats.NumberOfNodes, + "number_of_pending_tasks": healthStats.NumberOfPendingTasks, + "relocating_shards": healthStats.RelocatingShards, + "status": healthStats.Status, + "status_code": mapHealthStatusToCode(healthStats.Status), + "task_max_waiting_in_queue_millis": healthStats.TaskMaxWaitingInQueueMillis, + "timed_out": healthStats.TimedOut, + "unassigned_shards": healthStats.UnassignedShards, } acc.AddFields( "elasticsearch_cluster_health", @@ -349,20 +447,20 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator for name, health := range healthStats.Indices { indexFields := map[string]interface{}{ - "status": health.Status, - "status_code": mapHealthStatusToCode(health.Status), - "number_of_shards": health.NumberOfShards, - "number_of_replicas": health.NumberOfReplicas, "active_primary_shards": health.ActivePrimaryShards, "active_shards": health.ActiveShards, - "relocating_shards": health.RelocatingShards, "initializing_shards": health.InitializingShards, + "number_of_replicas": health.NumberOfReplicas, + "number_of_shards": health.NumberOfShards, + "relocating_shards": health.RelocatingShards, + "status": health.Status, + "status_code": mapHealthStatusToCode(health.Status), "unassigned_shards": health.UnassignedShards, } acc.AddFields( - "elasticsearch_indices", + "elasticsearch_cluster_health_indices", indexFields, - map[string]string{"index": name}, + map[string]string{"index": name, "name": healthStats.ClusterName}, measurementTime, ) } @@ -371,7 +469,7 @@ func (e *Elasticsearch) gatherClusterHealth(url string, acc telegraf.Accumulator func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) error { clusterStats := &clusterStats{} - if err := e.gatherJsonData(url, clusterStats); err != nil { + if err := e.gatherJSONData(url, clusterStats); err != nil { return err } now := time.Now() @@ -399,31 +497,146 @@ func (e *Elasticsearch) gatherClusterStats(url string, acc telegraf.Accumulator) return nil } -func (e *Elasticsearch) setCatMaster(url string) error { - r, err := e.client.Get(url) - if err != nil { +func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator) error { + indicesStats := &struct { + Shards map[string]interface{} `json:"_shards"` + All map[string]interface{} `json:"_all"` + Indices map[string]indexStat `json:"indices"` + }{} + + if err := e.gatherJSONData(url, indicesStats); err != nil { return err } + now := time.Now() + + // Total Shards Stats + shardsStats := map[string]interface{}{} + for k, v := range indicesStats.Shards { + shardsStats[k] = v + } + acc.AddFields("elasticsearch_indices_stats_shards_total", shardsStats, map[string]string{}, now) + + // All Stats + for m, s := range indicesStats.All { + // parse Json, ignoring strings and bools + jsonParser := jsonparser.JSONFlattener{} + err := jsonParser.FullFlattenJSON("_", s, true, true) + if err != nil { + return err + } + acc.AddFields("elasticsearch_indices_stats_"+m, jsonParser.Fields, map[string]string{"index_name": "_all"}, now) + } + + // Individual Indices stats + for id, index := range indicesStats.Indices { + indexTag := map[string]string{"index_name": id} + stats := map[string]interface{}{ + "primaries": index.Primaries, + "total": index.Total, + } + for m, s := range stats { + f := jsonparser.JSONFlattener{} + // parse Json, getting strings and bools + err := f.FullFlattenJSON("", s, true, true) + if err != nil { + return err + } + acc.AddFields("elasticsearch_indices_stats_"+m, f.Fields, indexTag, now) + } + + if e.IndicesLevel == "shards" { + for shardNumber, shards := range index.Shards { + for _, shard := range shards { + + // Get Shard Stats + flattened := jsonparser.JSONFlattener{} + err := flattened.FullFlattenJSON("", shard, true, true) + if err != nil { + return err + } + + // determine shard tag and primary/replica designation + shardType := "replica" + if flattened.Fields["routing_primary"] == true { + shardType = "primary" + } + delete(flattened.Fields, "routing_primary") + + routingState, ok := flattened.Fields["routing_state"].(string) + if ok { + flattened.Fields["routing_state"] = mapShardStatusToCode(routingState) + } + + routingNode, _ := flattened.Fields["routing_node"].(string) + shardTags := map[string]string{ + "index_name": id, + "node_id": routingNode, + "shard_name": string(shardNumber), + "type": shardType, + } + + for key, field := range flattened.Fields { + switch field.(type) { + case string, bool: + delete(flattened.Fields, key) + } + } + + acc.AddFields("elasticsearch_indices_stats_shards", + flattened.Fields, + shardTags, + now) + } + } + } + } + + return nil +} + +func (e *Elasticsearch) getCatMaster(url string) (string, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", err + } + + if e.Username != "" || e.Password != "" { + req.SetBasicAuth(e.Username, e.Password) + } + + r, err := e.client.Do(req) + if err != nil { + return "", err + } defer r.Body.Close() if r.StatusCode != http.StatusOK { // NOTE: we are not going to read/discard r.Body under the assumption we'd prefer // to let the underlying transport close the connection and re-establish a new one for // future calls. - return fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) + return "", fmt.Errorf("elasticsearch: Unable to retrieve master node information. API responded with status-code %d, expected %d", r.StatusCode, http.StatusOK) } response, err := ioutil.ReadAll(r.Body) + if err != nil { + return "", err + } + + masterID := strings.Split(string(response), " ")[0] + + return masterID, nil +} + +func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error { + req, err := http.NewRequest("GET", url, nil) if err != nil { return err } - e.catMasterResponseTokens = strings.Split(string(response), " ") + if e.Username != "" || e.Password != "" { + req.SetBasicAuth(e.Username, e.Password) + } - return nil -} - -func (e *Elasticsearch) gatherJsonData(url string, v interface{}) error { - r, err := e.client.Get(url) + r, err := e.client.Do(req) if err != nil { return err } diff --git a/plugins/inputs/elasticsearch/elasticsearch_test.go b/plugins/inputs/elasticsearch/elasticsearch_test.go index 1616bfeb2..ad91c898a 100644 --- a/plugins/inputs/elasticsearch/elasticsearch_test.go +++ b/plugins/inputs/elasticsearch/elasticsearch_test.go @@ -9,6 +9,7 @@ import ( "github.com/influxdata/telegraf/testutil" "fmt" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -20,8 +21,12 @@ func defaultTags() map[string]string { "node_id": "SDFsfSDFsdfFSDSDfSFDSDF", "node_name": "test.host.com", "node_host": "test", + "node_roles": "data,ingest,master", } } +func defaultServerInfo() serverInfo { + return serverInfo{nodeID: "", masterID: "SDFsfSDFsdfFSDSDfSFDSDF"} +} type transportMock struct { statusCode int @@ -49,8 +54,8 @@ func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { func (t *transportMock) CancelRequest(_ *http.Request) { } -func checkIsMaster(es *Elasticsearch, expected bool, t *testing.T) { - if es.isMaster != expected { +func checkIsMaster(es *Elasticsearch, server string, expected bool, t *testing.T) { + if es.serverInfo[server].isMaster() != expected { msg := fmt.Sprintf("IsMaster set incorrectly") assert.Fail(t, msg) } @@ -65,7 +70,7 @@ func checkNodeStatsResult(t *testing.T, acc *testutil.Accumulator) { acc.AssertContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags) acc.AssertContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags) acc.AssertContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags) - acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags) + acc.AssertContainsTaggedFields(t, "elasticsearch_http", nodestatsHTTPExpected, tags) acc.AssertContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags) } @@ -73,13 +78,15 @@ func TestGather(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator if err := acc.GatherError(es.Gather); err != nil { t.Fatal(err) } - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) checkNodeStatsResult(t, &acc) } @@ -88,13 +95,15 @@ func TestGatherIndividualStats(t *testing.T) { es.Servers = []string{"http://example.com:9200"} es.NodeStats = []string{"jvm", "process"} es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponseJVMProcess) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator if err := acc.GatherError(es.Gather); err != nil { t.Fatal(err) } - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) tags := defaultTags() acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", nodestatsIndicesExpected, tags) @@ -104,7 +113,7 @@ func TestGatherIndividualStats(t *testing.T) { acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_thread_pool", nodestatsThreadPoolExpected, tags) acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_fs", nodestatsFsExpected, tags) acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_transport", nodestatsTransportExpected, tags) - acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHttpExpected, tags) + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_http", nodestatsHTTPExpected, tags) acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_breakers", nodestatsBreakersExpected, tags) } @@ -112,13 +121,15 @@ func TestGatherNodeStats(t *testing.T) { es := newElasticsearchWithClient() es.Servers = []string{"http://example.com:9200"} es.client.Transport = newTransportMock(http.StatusOK, nodeStatsResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator if err := es.gatherNodeStats("junk", &acc); err != nil { t.Fatal(err) } - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) checkNodeStatsResult(t, &acc) } @@ -128,21 +139,23 @@ func TestGatherClusterHealthEmptyClusterHealth(t *testing.T) { es.ClusterHealth = true es.ClusterHealthLevel = "" es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, map[string]string{"name": "elasticsearch_telegraf"}) - acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v1IndexExpected, map[string]string{"index": "v1"}) - acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v2IndexExpected, map[string]string{"index": "v2"}) } @@ -153,21 +166,23 @@ func TestGatherClusterHealthSpecificClusterHealth(t *testing.T) { es.ClusterHealth = true es.ClusterHealthLevel = "cluster" es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, map[string]string{"name": "elasticsearch_telegraf"}) - acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v1IndexExpected, map[string]string{"index": "v1"}) - acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertDoesNotContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v2IndexExpected, map[string]string{"index": "v2"}) } @@ -178,23 +193,25 @@ func TestGatherClusterHealthAlsoIndicesHealth(t *testing.T) { es.ClusterHealth = true es.ClusterHealthLevel = "indices" es.client.Transport = newTransportMock(http.StatusOK, clusterHealthResponseWithIndices) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() var acc testutil.Accumulator require.NoError(t, es.gatherClusterHealth("junk", &acc)) - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health", clusterHealthExpected, map[string]string{"name": "elasticsearch_telegraf"}) - acc.AssertContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v1IndexExpected, - map[string]string{"index": "v1"}) + map[string]string{"index": "v1", "name": "elasticsearch_telegraf"}) - acc.AssertContainsTaggedFields(t, "elasticsearch_indices", + acc.AssertContainsTaggedFields(t, "elasticsearch_cluster_health_indices", v2IndexExpected, - map[string]string{"index": "v2"}) + map[string]string{"index": "v2", "name": "elasticsearch_telegraf"}) } func TestGatherClusterStatsMaster(t *testing.T) { @@ -202,13 +219,18 @@ func TestGatherClusterStatsMaster(t *testing.T) { es := newElasticsearchWithClient() es.ClusterStats = true es.Servers = []string{"http://example.com:9200"} + es.serverInfo = make(map[string]serverInfo) + info := serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""} // first get catMaster es.client.Transport = newTransportMock(http.StatusOK, IsMasterResult) - require.NoError(t, es.setCatMaster("junk")) + masterID, err := es.getCatMaster("junk") + require.NoError(t, err) + info.masterID = masterID + es.serverInfo["http://example.com:9200"] = info IsMasterResultTokens := strings.Split(string(IsMasterResult), " ") - if es.catMasterResponseTokens[0] != IsMasterResultTokens[0] { + if masterID != IsMasterResultTokens[0] { msg := fmt.Sprintf("catmaster is incorrect") assert.Fail(t, msg) } @@ -221,7 +243,7 @@ func TestGatherClusterStatsMaster(t *testing.T) { t.Fatal(err) } - checkIsMaster(es, true, t) + checkIsMaster(es, es.Servers[0], true, t) checkNodeStatsResult(t, &acc) // now test the clusterstats method @@ -243,13 +265,16 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { es := newElasticsearchWithClient() es.ClusterStats = true es.Servers = []string{"http://example.com:9200"} + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = serverInfo{nodeID: "SDFsfSDFsdfFSDSDfSFDSDF", masterID: ""} // first get catMaster es.client.Transport = newTransportMock(http.StatusOK, IsNotMasterResult) - require.NoError(t, es.setCatMaster("junk")) + masterID, err := es.getCatMaster("junk") + require.NoError(t, err) IsNotMasterResultTokens := strings.Split(string(IsNotMasterResult), " ") - if es.catMasterResponseTokens[0] != IsNotMasterResultTokens[0] { + if masterID != IsNotMasterResultTokens[0] { msg := fmt.Sprintf("catmaster is incorrect") assert.Fail(t, msg) } @@ -263,10 +288,67 @@ func TestGatherClusterStatsNonMaster(t *testing.T) { } // ensure flag is clear so Cluster Stats would not be done - checkIsMaster(es, false, t) + checkIsMaster(es, es.Servers[0], false, t) checkNodeStatsResult(t, &acc) } +func TestGatherClusterIndicesStats(t *testing.T) { + es := newElasticsearchWithClient() + es.IndicesInclude = []string{"_all"} + es.Servers = []string{"http://example.com:9200"} + es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() + + var acc testutil.Accumulator + if err := es.gatherIndicesStats("junk", &acc); err != nil { + t.Fatal(err) + } + + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter"}) +} + +func TestGatherClusterIndiceShardsStats(t *testing.T) { + es := newElasticsearchWithClient() + es.IndicesLevel = "shards" + es.Servers = []string{"http://example.com:9200"} + es.client.Transport = newTransportMock(http.StatusOK, clusterIndicesShardsResponse) + es.serverInfo = make(map[string]serverInfo) + es.serverInfo["http://example.com:9200"] = defaultServerInfo() + + var acc testutil.Accumulator + if err := es.gatherIndicesStats("junk", &acc); err != nil { + t.Fatal(err) + } + + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_primaries", + clusterIndicesExpected, + map[string]string{"index_name": "twitter"}) + + primaryTags := map[string]string{ + "index_name": "twitter", + "node_id": "oqvR8I1dTpONvwRM30etww", + "shard_name": "0", + "type": "primary", + } + + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_shards", + clusterIndicesPrimaryShardsExpected, + primaryTags) + + replicaTags := map[string]string{ + "index_name": "twitter", + "node_id": "oqvR8I1dTpONvwRM30etww", + "shard_name": "1", + "type": "replica", + } + acc.AssertContainsTaggedFields(t, "elasticsearch_indices_stats_shards", + clusterIndicesReplicaShardsExpected, + replicaTags) +} + func newElasticsearchWithClient() *Elasticsearch { es := NewElasticsearch() es.client = &http.Client{} diff --git a/plugins/inputs/elasticsearch/testdata_test.go b/plugins/inputs/elasticsearch/testdata_test.go index 622abeaf8..a04fe1521 100644 --- a/plugins/inputs/elasticsearch/testdata_test.go +++ b/plugins/inputs/elasticsearch/testdata_test.go @@ -7,11 +7,13 @@ const clusterHealthResponse = ` "timed_out": false, "number_of_nodes": 3, "number_of_data_nodes": 3, + "number_of_in_flight_fetch": 0, "active_primary_shards": 5, "active_shards": 15, "relocating_shards": 0, "initializing_shards": 0, "unassigned_shards": 0, + "delayed_unassigned_shards": 0, "number_of_pending_tasks": 0, "task_max_waiting_in_queue_millis": 0, "active_shards_percent_as_number": 100.0 @@ -25,11 +27,13 @@ const clusterHealthResponseWithIndices = ` "timed_out": false, "number_of_nodes": 3, "number_of_data_nodes": 3, + "number_of_in_flight_fetch": 0, "active_primary_shards": 5, "active_shards": 15, "relocating_shards": 0, "initializing_shards": 0, "unassigned_shards": 0, + "delayed_unassigned_shards": 0, "number_of_pending_tasks": 0, "task_max_waiting_in_queue_millis": 0, "active_shards_percent_as_number": 100.0, @@ -64,11 +68,13 @@ var clusterHealthExpected = map[string]interface{}{ "timed_out": false, "number_of_nodes": 3, "number_of_data_nodes": 3, + "number_of_in_flight_fetch": 0, "active_primary_shards": 5, "active_shards": 15, "relocating_shards": 0, "initializing_shards": 0, "unassigned_shards": 0, + "delayed_unassigned_shards": 0, "number_of_pending_tasks": 0, "task_max_waiting_in_queue_millis": 0, "active_shards_percent_as_number": 100.0, @@ -103,6 +109,421 @@ const nodeStatsResponse = ` "cluster_name": "es-testcluster", "nodes": { "SDFsfSDFsdfFSDSDfSFDSDF": { + "timestamp": 1436365550135, + "name": "test.host.com", + "transport_address": "inet[/127.0.0.1:9300]", + "host": "test", + "ip": [ + "inet[/127.0.0.1:9300]", + "NONE" + ], + "roles": [ + "master", + "data", + "ingest" + ], + "attributes": { + "master": "true" + }, + "indices": { + "docs": { + "count": 29652, + "deleted": 5229 + }, + "store": { + "size_in_bytes": 37715234, + "throttle_time_in_millis": 215 + }, + "indexing": { + "index_total": 84790, + "index_time_in_millis": 29680, + "index_current": 0, + "delete_total": 13879, + "delete_time_in_millis": 1139, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 1, + "time_in_millis": 2, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 1, + "missing_time_in_millis": 2, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 1452, + "query_time_in_millis": 5695, + "query_current": 0, + "fetch_total": 414, + "fetch_time_in_millis": 146, + "fetch_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 133, + "total_time_in_millis": 21060, + "total_docs": 203672, + "total_size_in_bytes": 142900226 + }, + "refresh": { + "total": 1076, + "total_time_in_millis": 20078 + }, + "flush": { + "total": 115, + "total_time_in_millis": 2401 + }, + "warmer": { + "current": 0, + "total": 2319, + "total_time_in_millis": 448 + }, + "filter_cache": { + "memory_size_in_bytes": 7384, + "evictions": 0 + }, + "id_cache": { + "memory_size_in_bytes": 0 + }, + "fielddata": { + "memory_size_in_bytes": 12996, + "evictions": 0 + }, + "percolate": { + "total": 0, + "time_in_millis": 0, + "current": 0, + "memory_size_in_bytes": -1, + "memory_size": "-1b", + "queries": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 134, + "memory_in_bytes": 1285212, + "index_writer_memory_in_bytes": 0, + "index_writer_max_memory_in_bytes": 172368955, + "version_map_memory_in_bytes": 611844, + "fixed_bit_set_memory_in_bytes": 0 + }, + "translog": { + "operations": 17702, + "size_in_bytes": 17 + }, + "suggest": { + "total": 0, + "time_in_millis": 0, + "current": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "os": { + "timestamp": 1436460392944, + "load_average": [ + 0.01, + 0.04, + 0.05 + ], + "mem": { + "free_in_bytes": 477761536, + "used_in_bytes": 1621868544, + "free_percent": 74, + "used_percent": 25, + "actual_free_in_bytes": 1565470720, + "actual_used_in_bytes": 534159360 + }, + "swap": { + "used_in_bytes": 0, + "free_in_bytes": 487997440 + } + }, + "process": { + "timestamp": 1436460392945, + "open_file_descriptors": 160, + "cpu": { + "percent": 2, + "sys_in_millis": 1870, + "user_in_millis": 13610, + "total_in_millis": 15480 + }, + "mem": { + "total_virtual_in_bytes": 4747890688 + } + }, + "jvm": { + "timestamp": 1436460392945, + "uptime_in_millis": 202245, + "mem": { + "heap_used_in_bytes": 52709568, + "heap_used_percent": 5, + "heap_committed_in_bytes": 259522560, + "heap_max_in_bytes": 1038876672, + "non_heap_used_in_bytes": 39634576, + "non_heap_committed_in_bytes": 40841216, + "pools": { + "young": { + "used_in_bytes": 32685760, + "max_in_bytes": 279183360, + "peak_used_in_bytes": 71630848, + "peak_max_in_bytes": 279183360 + }, + "survivor": { + "used_in_bytes": 8912880, + "max_in_bytes": 34865152, + "peak_used_in_bytes": 8912888, + "peak_max_in_bytes": 34865152 + }, + "old": { + "used_in_bytes": 11110928, + "max_in_bytes": 724828160, + "peak_used_in_bytes": 14354608, + "peak_max_in_bytes": 724828160 + } + } + }, + "threads": { + "count": 44, + "peak_count": 45 + }, + "gc": { + "collectors": { + "young": { + "collection_count": 2, + "collection_time_in_millis": 98 + }, + "old": { + "collection_count": 1, + "collection_time_in_millis": 24 + } + } + }, + "buffer_pools": { + "direct": { + "count": 40, + "used_in_bytes": 6304239, + "total_capacity_in_bytes": 6304239 + }, + "mapped": { + "count": 0, + "used_in_bytes": 0, + "total_capacity_in_bytes": 0 + } + } + }, + "thread_pool": { + "percolate": { + "threads": 123, + "queue": 23, + "active": 13, + "rejected": 235, + "largest": 23, + "completed": 33 + }, + "fetch_shard_started": { + "threads": 3, + "queue": 1, + "active": 5, + "rejected": 6, + "largest": 4, + "completed": 54 + }, + "listener": { + "threads": 1, + "queue": 2, + "active": 4, + "rejected": 8, + "largest": 1, + "completed": 1 + }, + "index": { + "threads": 6, + "queue": 8, + "active": 4, + "rejected": 2, + "largest": 3, + "completed": 6 + }, + "refresh": { + "threads": 23, + "queue": 7, + "active": 3, + "rejected": 4, + "largest": 8, + "completed": 3 + }, + "suggest": { + "threads": 2, + "queue": 7, + "active": 2, + "rejected": 1, + "largest": 8, + "completed": 3 + }, + "generic": { + "threads": 1, + "queue": 4, + "active": 6, + "rejected": 3, + "largest": 2, + "completed": 27 + }, + "warmer": { + "threads": 2, + "queue": 7, + "active": 3, + "rejected": 2, + "largest": 3, + "completed": 1 + }, + "search": { + "threads": 5, + "queue": 7, + "active": 2, + "rejected": 7, + "largest": 2, + "completed": 4 + }, + "flush": { + "threads": 3, + "queue": 8, + "active": 0, + "rejected": 1, + "largest": 5, + "completed": 3 + }, + "optimize": { + "threads": 3, + "queue": 4, + "active": 1, + "rejected": 2, + "largest": 7, + "completed": 3 + }, + "fetch_shard_store": { + "threads": 1, + "queue": 7, + "active": 4, + "rejected": 2, + "largest": 4, + "completed": 1 + }, + "management": { + "threads": 2, + "queue": 3, + "active": 1, + "rejected": 6, + "largest": 2, + "completed": 22 + }, + "get": { + "threads": 1, + "queue": 8, + "active": 4, + "rejected": 3, + "largest": 2, + "completed": 1 + }, + "merge": { + "threads": 6, + "queue": 4, + "active": 5, + "rejected": 2, + "largest": 5, + "completed": 1 + }, + "bulk": { + "threads": 4, + "queue": 5, + "active": 7, + "rejected": 3, + "largest": 1, + "completed": 4 + }, + "snapshot": { + "threads": 8, + "queue": 5, + "active": 6, + "rejected": 2, + "largest": 1, + "completed": 0 + } + }, + "fs": { + "timestamp": 1436460392946, + "total": { + "total_in_bytes": 19507089408, + "free_in_bytes": 16909316096, + "available_in_bytes": 15894814720 + }, + "data": [ + { + "path": "/usr/share/elasticsearch/data/elasticsearch/nodes/0", + "mount": "/usr/share/elasticsearch/data", + "type": "ext4", + "total_in_bytes": 19507089408, + "free_in_bytes": 16909316096, + "available_in_bytes": 15894814720 + } + ] + }, + "transport": { + "server_open": 13, + "rx_count": 6, + "rx_size_in_bytes": 1380, + "tx_count": 6, + "tx_size_in_bytes": 1380 + }, + "http": { + "current_open": 3, + "total_opened": 3 + }, + "breakers": { + "fielddata": { + "limit_size_in_bytes": 623326003, + "limit_size": "594.4mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.03, + "tripped": 0 + }, + "request": { + "limit_size_in_bytes": 415550668, + "limit_size": "396.2mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.0, + "tripped": 0 + }, + "parent": { + "limit_size_in_bytes": 727213670, + "limit_size": "693.5mb", + "estimated_size_in_bytes": 0, + "estimated_size": "0b", + "overhead": 1.0, + "tripped": 0 + } + } + }, + "SDFsfSDFsdfFSDSDfSPOJUY": { "timestamp": 1436365550135, "name": "test.host.com", "transport_address": "inet[/127.0.0.1:9300]", @@ -529,6 +950,11 @@ const nodeStatsResponseJVMProcess = ` "inet[/127.0.0.1:9300]", "NONE" ], + "roles": [ + "master", + "data", + "ingest" + ], "attributes": { "master": "true" }, @@ -865,7 +1291,7 @@ var nodestatsTransportExpected = map[string]interface{}{ "tx_size_in_bytes": float64(1380), } -var nodestatsHttpExpected = map[string]interface{}{ +var nodestatsHTTPExpected = map[string]interface{}{ "current_open": float64(3), "total_opened": float64(3), } @@ -1154,3 +1580,2268 @@ var clusterstatsNodesExpected = map[string]interface{}{ const IsMasterResult = "SDFsfSDFsdfFSDSDfSFDSDF 10.206.124.66 10.206.124.66 test.host.com " const IsNotMasterResult = "junk 10.206.124.66 10.206.124.66 test.junk.com " + +const clusterIndicesResponse = ` +{ + "_shards": { + "total": 9, + "successful": 6, + "failed": 0 + }, + "_all": { + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "indices": { + "twitter": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + } + } +}` + +var clusterIndicesExpected = map[string]interface{}{ + "completion_size_in_bytes": float64(0), + "docs_count": float64(999), + "docs_deleted": float64(0), + "fielddata_evictions": float64(0), + "fielddata_memory_size_in_bytes": float64(0), + "flush_periodic": float64(0), + "flush_total": float64(0), + "flush_total_time_in_millis": float64(0), + "get_current": float64(0), + "get_exists_time_in_millis": float64(0), + "get_exists_total": float64(0), + "get_missing_time_in_millis": float64(0), + "get_missing_total": float64(0), + "get_time_in_millis": float64(0), + "get_total": float64(0), + "indexing_delete_current": float64(0), + "indexing_delete_time_in_millis": float64(0), + "indexing_delete_total": float64(0), + "indexing_index_current": float64(0), + "indexing_index_failed": float64(0), + "indexing_index_time_in_millis": float64(548), + "indexing_index_total": float64(999), + "indexing_is_throttled": false, + "indexing_noop_update_total": float64(0), + "indexing_throttle_time_in_millis": float64(0), + "merges_current": float64(0), + "merges_current_docs": float64(0), + "merges_current_size_in_bytes": float64(0), + "merges_total": float64(0), + "merges_total_auto_throttle_in_bytes": float64(62914560), + "merges_total_docs": float64(0), + "merges_total_size_in_bytes": float64(0), + "merges_total_stopped_time_in_millis": float64(0), + "merges_total_throttled_time_in_millis": float64(0), + "merges_total_time_in_millis": float64(0), + "query_cache_cache_count": float64(0), + "query_cache_cache_size": float64(0), + "query_cache_evictions": float64(0), + "query_cache_hit_count": float64(0), + "query_cache_memory_size_in_bytes": float64(0), + "query_cache_miss_count": float64(0), + "query_cache_total_count": float64(0), + "recovery_current_as_source": float64(0), + "recovery_current_as_target": float64(0), + "recovery_throttle_time_in_millis": float64(0), + "refresh_external_total": float64(9), + "refresh_external_total_time_in_millis": float64(258), + "refresh_listeners": float64(0), + "refresh_total": float64(9), + "refresh_total_time_in_millis": float64(256), + "request_cache_evictions": float64(0), + "request_cache_hit_count": float64(0), + "request_cache_memory_size_in_bytes": float64(0), + "request_cache_miss_count": float64(0), + "search_fetch_current": float64(0), + "search_fetch_time_in_millis": float64(0), + "search_fetch_total": float64(0), + "search_open_contexts": float64(0), + "search_query_current": float64(0), + "search_query_time_in_millis": float64(0), + "search_query_total": float64(0), + "search_scroll_current": float64(0), + "search_scroll_time_in_millis": float64(0), + "search_scroll_total": float64(0), + "search_suggest_current": float64(0), + "search_suggest_time_in_millis": float64(0), + "search_suggest_total": float64(0), + "segments_count": float64(3), + "segments_doc_values_memory_in_bytes": float64(204), + "segments_fixed_bit_set_memory_in_bytes": float64(0), + "segments_index_writer_memory_in_bytes": float64(0), + "segments_max_unsafe_auto_id_timestamp": float64(-1), + "segments_memory_in_bytes": float64(12849), + "segments_norms_memory_in_bytes": float64(1152), + "segments_points_memory_in_bytes": float64(9), + "segments_stored_fields_memory_in_bytes": float64(904), + "segments_term_vectors_memory_in_bytes": float64(0), + "segments_terms_memory_in_bytes": float64(10580), + "segments_version_map_memory_in_bytes": float64(0), + "store_size_in_bytes": float64(267500), + "translog_earliest_last_modified_age": float64(0), + "translog_operations": float64(999), + "translog_size_in_bytes": float64(226444), + "translog_uncommitted_operations": float64(999), + "translog_uncommitted_size_in_bytes": float64(226444), + "warmer_current": float64(0), + "warmer_total": float64(6), + "warmer_total_time_in_millis": float64(0), +} + +const clusterIndicesShardsResponse = ` +{ + "_shards": { + "total": 9, + "successful": 6, + "failed": 0 + }, + "_all": { + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + } + }, + "indices": { + "twitter": { + "uuid": "AtNrbbl_QhirW0p7Fnq26A", + "primaries": { + "docs": { + "count": 999, + "deleted": 0 + }, + "store": { + "size_in_bytes": 267500 + }, + "indexing": { + "index_total": 999, + "index_time_in_millis": 548, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 62914560 + }, + "refresh": { + "total": 9, + "total_time_in_millis": 256, + "external_total": 9, + "external_total_time_in_millis": 258, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 6, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 3, + "memory_in_bytes": 12849, + "terms_memory_in_bytes": 10580, + "stored_fields_memory_in_bytes": 904, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 1152, + "points_memory_in_bytes": 9, + "doc_values_memory_in_bytes": 204, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 999, + "size_in_bytes": 226444, + "uncommitted_operations": 999, + "uncommitted_size_in_bytes": 226444, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "total": { + "docs": { + "count": 1998, + "deleted": 0 + }, + "store": { + "size_in_bytes": 535000 + }, + "indexing": { + "index_total": 1998, + "index_time_in_millis": 793, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 125829120 + }, + "refresh": { + "total": 18, + "total_time_in_millis": 518, + "external_total": 18, + "external_total_time_in_millis": 522, + "listeners": 0 + }, + "flush": { + "total": 0, + "periodic": 0, + "total_time_in_millis": 0 + }, + "warmer": { + "current": 0, + "total": 12, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 6, + "memory_in_bytes": 25698, + "terms_memory_in_bytes": 21160, + "stored_fields_memory_in_bytes": 1808, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 2304, + "points_memory_in_bytes": 18, + "doc_values_memory_in_bytes": 408, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 1998, + "size_in_bytes": 452888, + "uncommitted_operations": 1998, + "uncommitted_size_in_bytes": 452888, + "earliest_last_modified_age": 0 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + } + }, + "shards": { + "0": [ + { + "routing": { + "state": "STARTED", + "primary": true, + "node": "oqvR8I1dTpONvwRM30etww", + "relocating_node": null + }, + "docs": { + "count": 340, + "deleted": 0 + }, + "store": { + "size_in_bytes": 90564 + }, + "indexing": { + "index_total": 340, + "index_time_in_millis": 176, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 103, + "external_total": 4, + "external_total_time_in_millis": 105, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 32 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4301, + "terms_memory_in_bytes": 3534, + "stored_fields_memory_in_bytes": 312, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 340, + "size_in_bytes": 77158, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936870 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "13gxQDHZ96BnNkzSgEdElQ==", + "generation": 4, + "user_data": { + "local_checkpoint": "339", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "340", + "translog_uuid": "4rp02VCQRTSJXgochWk3Hg", + "history_uuid": "-od5QvNmQlero8jatbG-5w", + "sync_id": "KKglZYafSaWN_MFUbpNviA", + "translog_generation": "3", + "max_seq_no": "339" + }, + "num_docs": 340 + }, + "seq_no": { + "max_seq_no": 339, + "local_checkpoint": 339, + "global_checkpoint": 339 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + }, + { + "routing": { + "state": "STARTED", + "primary": false, + "node": "0jfDeZxuTsGblcDGa39DzQ", + "relocating_node": null + }, + "docs": { + "count": 340, + "deleted": 0 + }, + "store": { + "size_in_bytes": 90564 + }, + "indexing": { + "index_total": 340, + "index_time_in_millis": 99, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 139, + "external_total": 4, + "external_total_time_in_millis": 140, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 34 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4301, + "terms_memory_in_bytes": 3534, + "stored_fields_memory_in_bytes": 312, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 340, + "size_in_bytes": 77158, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936653 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "A8QO9SiMWYX000riUOApBg==", + "generation": 5, + "user_data": { + "local_checkpoint": "339", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "340", + "translog_uuid": "9kWpEKQyQ3yIUwwEp4fP8A", + "history_uuid": "-od5QvNmQlero8jatbG-5w", + "sync_id": "KKglZYafSaWN_MFUbpNviA", + "translog_generation": "3", + "max_seq_no": "339" + }, + "num_docs": 340 + }, + "seq_no": { + "max_seq_no": 339, + "local_checkpoint": 339, + "global_checkpoint": 339 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + } + ], + "1": [ + { + "routing": { + "state": "STARTED", + "primary": false, + "node": "oqvR8I1dTpONvwRM30etww", + "relocating_node": null + }, + "docs": { + "count": 352, + "deleted": 0 + }, + "store": { + "size_in_bytes": 94584 + }, + "indexing": { + "index_total": 352, + "index_time_in_millis": 66, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 104, + "external_total": 4, + "external_total_time_in_millis": 106, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 26 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4280, + "terms_memory_in_bytes": 3529, + "stored_fields_memory_in_bytes": 296, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 352, + "size_in_bytes": 79980, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936144 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "13gxQDHZ96BnNkzSgEdEkg==", + "generation": 5, + "user_data": { + "local_checkpoint": "351", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "352", + "translog_uuid": "SjKxb5TIRqCinxWbqVBo-g", + "history_uuid": "3SAavs9KTPm-jhaioYg4UA", + "sync_id": "swZVzk6tShS0tcbBQt9AjA", + "translog_generation": "3", + "max_seq_no": "351" + }, + "num_docs": 352 + }, + "seq_no": { + "max_seq_no": 351, + "local_checkpoint": 351, + "global_checkpoint": 351 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + }, + { + "routing": { + "state": "STARTED", + "primary": true, + "node": "0jfDeZxuTsGblcDGa39DzQ", + "relocating_node": null + }, + "docs": { + "count": 352, + "deleted": 0 + }, + "store": { + "size_in_bytes": 94584 + }, + "indexing": { + "index_total": 352, + "index_time_in_millis": 154, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 74, + "external_total": 4, + "external_total_time_in_millis": 74, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 29 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4280, + "terms_memory_in_bytes": 3529, + "stored_fields_memory_in_bytes": 296, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 352, + "size_in_bytes": 79980, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936839 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "A8QO9SiMWYX000riUOApAw==", + "generation": 4, + "user_data": { + "local_checkpoint": "351", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "352", + "translog_uuid": "GpauXMbxQpWKUYGYqQUIdQ", + "history_uuid": "3SAavs9KTPm-jhaioYg4UA", + "sync_id": "swZVzk6tShS0tcbBQt9AjA", + "translog_generation": "3", + "max_seq_no": "351" + }, + "num_docs": 352 + }, + "seq_no": { + "max_seq_no": 351, + "local_checkpoint": 351, + "global_checkpoint": 351 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + } + ], + "2": [ + { + "routing": { + "state": "STARTED", + "primary": true, + "node": "oqvR8I1dTpONvwRM30etww", + "relocating_node": null + }, + "docs": { + "count": 307, + "deleted": 0 + }, + "store": { + "size_in_bytes": 82727 + }, + "indexing": { + "index_total": 307, + "index_time_in_millis": 218, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 86, + "external_total": 4, + "external_total_time_in_millis": 87, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 33 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4268, + "terms_memory_in_bytes": 3517, + "stored_fields_memory_in_bytes": 296, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 307, + "size_in_bytes": 69471, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936881 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "13gxQDHZ96BnNkzSgEdElg==", + "generation": 4, + "user_data": { + "local_checkpoint": "306", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "307", + "translog_uuid": "Y0a3bdIQTD2Ir6Ex9J3gSQ", + "history_uuid": "WmsCMyRyRaGz9mnR50wYFA", + "sync_id": "nvNppgfgTp63llS8r-Pwiw", + "translog_generation": "3", + "max_seq_no": "306" + }, + "num_docs": 307 + }, + "seq_no": { + "max_seq_no": 306, + "local_checkpoint": 306, + "global_checkpoint": 306 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + }, + { + "routing": { + "state": "STARTED", + "primary": false, + "node": "0jfDeZxuTsGblcDGa39DzQ", + "relocating_node": null + }, + "docs": { + "count": 307, + "deleted": 0 + }, + "store": { + "size_in_bytes": 82727 + }, + "indexing": { + "index_total": 307, + "index_time_in_millis": 80, + "index_current": 0, + "index_failed": 0, + "delete_total": 0, + "delete_time_in_millis": 0, + "delete_current": 0, + "noop_update_total": 0, + "is_throttled": false, + "throttle_time_in_millis": 0 + }, + "get": { + "total": 0, + "time_in_millis": 0, + "exists_total": 0, + "exists_time_in_millis": 0, + "missing_total": 0, + "missing_time_in_millis": 0, + "current": 0 + }, + "search": { + "open_contexts": 0, + "query_total": 0, + "query_time_in_millis": 0, + "query_current": 0, + "fetch_total": 0, + "fetch_time_in_millis": 0, + "fetch_current": 0, + "scroll_total": 0, + "scroll_time_in_millis": 0, + "scroll_current": 0, + "suggest_total": 0, + "suggest_time_in_millis": 0, + "suggest_current": 0 + }, + "merges": { + "current": 0, + "current_docs": 0, + "current_size_in_bytes": 0, + "total": 0, + "total_time_in_millis": 0, + "total_docs": 0, + "total_size_in_bytes": 0, + "total_stopped_time_in_millis": 0, + "total_throttled_time_in_millis": 0, + "total_auto_throttle_in_bytes": 20971520 + }, + "refresh": { + "total": 6, + "total_time_in_millis": 33, + "external_total": 4, + "external_total_time_in_millis": 30, + "listeners": 0 + }, + "flush": { + "total": 1, + "periodic": 0, + "total_time_in_millis": 37 + }, + "warmer": { + "current": 0, + "total": 3, + "total_time_in_millis": 0 + }, + "query_cache": { + "memory_size_in_bytes": 0, + "total_count": 0, + "hit_count": 0, + "miss_count": 0, + "cache_size": 0, + "cache_count": 0, + "evictions": 0 + }, + "fielddata": { + "memory_size_in_bytes": 0, + "evictions": 0 + }, + "completion": { + "size_in_bytes": 0 + }, + "segments": { + "count": 1, + "memory_in_bytes": 4268, + "terms_memory_in_bytes": 3517, + "stored_fields_memory_in_bytes": 296, + "term_vectors_memory_in_bytes": 0, + "norms_memory_in_bytes": 384, + "points_memory_in_bytes": 3, + "doc_values_memory_in_bytes": 68, + "index_writer_memory_in_bytes": 0, + "version_map_memory_in_bytes": 0, + "fixed_bit_set_memory_in_bytes": 0, + "max_unsafe_auto_id_timestamp": -1, + "file_sizes": {} + }, + "translog": { + "operations": 307, + "size_in_bytes": 69471, + "uncommitted_operations": 0, + "uncommitted_size_in_bytes": 55, + "earliest_last_modified_age": 936696 + }, + "request_cache": { + "memory_size_in_bytes": 0, + "evictions": 0, + "hit_count": 0, + "miss_count": 0 + }, + "recovery": { + "current_as_source": 0, + "current_as_target": 0, + "throttle_time_in_millis": 0 + }, + "commit": { + "id": "A8QO9SiMWYX000riUOApBw==", + "generation": 5, + "user_data": { + "local_checkpoint": "306", + "max_unsafe_auto_id_timestamp": "-1", + "min_retained_seq_no": "307", + "translog_uuid": "s62inR7FRA2p86axtAIvgA", + "history_uuid": "WmsCMyRyRaGz9mnR50wYFA", + "sync_id": "nvNppgfgTp63llS8r-Pwiw", + "translog_generation": "3", + "max_seq_no": "306" + }, + "num_docs": 307 + }, + "seq_no": { + "max_seq_no": 306, + "local_checkpoint": 306, + "global_checkpoint": 306 + }, + "retention_leases": { + "primary_term": 1, + "version": 0, + "leases": [] + }, + "shard_path": { + "state_path": "/usr/share/elasticsearch/data/nodes/0", + "data_path": "/usr/share/elasticsearch/data/nodes/0", + "is_custom_data_path": false + } + } + ] + } + } + } +}` + +var clusterIndicesPrimaryShardsExpected = map[string]interface{}{ + "commit_generation": float64(4), + "commit_num_docs": float64(340), + "completion_size_in_bytes": float64(0), + "docs_count": float64(340), + "docs_deleted": float64(0), + "fielddata_evictions": float64(0), + "fielddata_memory_size_in_bytes": float64(0), + "flush_periodic": float64(0), + "flush_total": float64(1), + "flush_total_time_in_millis": float64(32), + "get_current": float64(0), + "get_exists_time_in_millis": float64(0), + "get_exists_total": float64(0), + "get_missing_time_in_millis": float64(0), + "get_missing_total": float64(0), + "get_time_in_millis": float64(0), + "get_total": float64(0), + "indexing_delete_current": float64(0), + "indexing_delete_time_in_millis": float64(0), + "indexing_delete_total": float64(0), + "indexing_index_current": float64(0), + "indexing_index_failed": float64(0), + "indexing_index_time_in_millis": float64(176), + "indexing_index_total": float64(340), + "indexing_noop_update_total": float64(0), + "indexing_throttle_time_in_millis": float64(0), + "merges_current": float64(0), + "merges_current_docs": float64(0), + "merges_current_size_in_bytes": float64(0), + "merges_total": float64(0), + "merges_total_auto_throttle_in_bytes": float64(2.097152e+07), + "merges_total_docs": float64(0), + "merges_total_size_in_bytes": float64(0), + "merges_total_stopped_time_in_millis": float64(0), + "merges_total_throttled_time_in_millis": float64(0), + "merges_total_time_in_millis": float64(0), + "query_cache_cache_count": float64(0), + "query_cache_cache_size": float64(0), + "query_cache_evictions": float64(0), + "query_cache_hit_count": float64(0), + "query_cache_memory_size_in_bytes": float64(0), + "query_cache_miss_count": float64(0), + "query_cache_total_count": float64(0), + "recovery_current_as_source": float64(0), + "recovery_current_as_target": float64(0), + "recovery_throttle_time_in_millis": float64(0), + "refresh_external_total": float64(4), + "refresh_external_total_time_in_millis": float64(105), + "refresh_listeners": float64(0), + "refresh_total": float64(6), + "refresh_total_time_in_millis": float64(103), + "request_cache_evictions": float64(0), + "request_cache_hit_count": float64(0), + "request_cache_memory_size_in_bytes": float64(0), + "request_cache_miss_count": float64(0), + "retention_leases_primary_term": float64(1), + "retention_leases_version": float64(0), + "routing_state": int(3), + "search_fetch_current": float64(0), + "search_fetch_time_in_millis": float64(0), + "search_fetch_total": float64(0), + "search_open_contexts": float64(0), + "search_query_current": float64(0), + "search_query_time_in_millis": float64(0), + "search_query_total": float64(0), + "search_scroll_current": float64(0), + "search_scroll_time_in_millis": float64(0), + "search_scroll_total": float64(0), + "search_suggest_current": float64(0), + "search_suggest_time_in_millis": float64(0), + "search_suggest_total": float64(0), + "segments_count": float64(1), + "segments_doc_values_memory_in_bytes": float64(68), + "segments_fixed_bit_set_memory_in_bytes": float64(0), + "segments_index_writer_memory_in_bytes": float64(0), + "segments_max_unsafe_auto_id_timestamp": float64(-1), + "segments_memory_in_bytes": float64(4301), + "segments_norms_memory_in_bytes": float64(384), + "segments_points_memory_in_bytes": float64(3), + "segments_stored_fields_memory_in_bytes": float64(312), + "segments_term_vectors_memory_in_bytes": float64(0), + "segments_terms_memory_in_bytes": float64(3534), + "segments_version_map_memory_in_bytes": float64(0), + "seq_no_global_checkpoint": float64(339), + "seq_no_local_checkpoint": float64(339), + "seq_no_max_seq_no": float64(339), + "store_size_in_bytes": float64(90564), + "translog_earliest_last_modified_age": float64(936870), + "translog_operations": float64(340), + "translog_size_in_bytes": float64(77158), + "translog_uncommitted_operations": float64(0), + "translog_uncommitted_size_in_bytes": float64(55), + "warmer_current": float64(0), + "warmer_total": float64(3), + "warmer_total_time_in_millis": float64(0), +} + +var clusterIndicesReplicaShardsExpected = map[string]interface{}{ + "commit_generation": float64(5), + "commit_num_docs": float64(352), + "completion_size_in_bytes": float64(0), + "docs_count": float64(352), + "docs_deleted": float64(0), + "fielddata_evictions": float64(0), + "fielddata_memory_size_in_bytes": float64(0), + "flush_periodic": float64(0), + "flush_total": float64(1), + "flush_total_time_in_millis": float64(26), + "get_current": float64(0), + "get_exists_time_in_millis": float64(0), + "get_exists_total": float64(0), + "get_missing_time_in_millis": float64(0), + "get_missing_total": float64(0), + "get_time_in_millis": float64(0), + "get_total": float64(0), + "indexing_delete_current": float64(0), + "indexing_delete_time_in_millis": float64(0), + "indexing_delete_total": float64(0), + "indexing_index_current": float64(0), + "indexing_index_failed": float64(0), + "indexing_index_time_in_millis": float64(66), + "indexing_index_total": float64(352), + "indexing_noop_update_total": float64(0), + "indexing_throttle_time_in_millis": float64(0), + "merges_current": float64(0), + "merges_current_docs": float64(0), + "merges_current_size_in_bytes": float64(0), + "merges_total": float64(0), + "merges_total_auto_throttle_in_bytes": float64(20971520), + "merges_total_docs": float64(0), + "merges_total_size_in_bytes": float64(0), + "merges_total_stopped_time_in_millis": float64(0), + "merges_total_throttled_time_in_millis": float64(0), + "merges_total_time_in_millis": float64(0), + "query_cache_cache_count": float64(0), + "query_cache_cache_size": float64(0), + "query_cache_evictions": float64(0), + "query_cache_hit_count": float64(0), + "query_cache_memory_size_in_bytes": float64(0), + "query_cache_miss_count": float64(0), + "query_cache_total_count": float64(0), + "recovery_current_as_source": float64(0), + "recovery_current_as_target": float64(0), + "recovery_throttle_time_in_millis": float64(0), + "refresh_external_total": float64(4), + "refresh_external_total_time_in_millis": float64(106), + "refresh_listeners": float64(0), + "refresh_total": float64(6), + "refresh_total_time_in_millis": float64(104), + "request_cache_evictions": float64(0), + "request_cache_hit_count": float64(0), + "request_cache_memory_size_in_bytes": float64(0), + "request_cache_miss_count": float64(0), + "retention_leases_primary_term": float64(1), + "retention_leases_version": float64(0), + "routing_state": int(3), + "search_fetch_current": float64(0), + "search_fetch_time_in_millis": float64(0), + "search_fetch_total": float64(0), + "search_open_contexts": float64(0), + "search_query_current": float64(0), + "search_query_time_in_millis": float64(0), + "search_query_total": float64(0), + "search_scroll_current": float64(0), + "search_scroll_time_in_millis": float64(0), + "search_scroll_total": float64(0), + "search_suggest_current": float64(0), + "search_suggest_time_in_millis": float64(0), + "search_suggest_total": float64(0), + "segments_count": float64(1), + "segments_doc_values_memory_in_bytes": float64(68), + "segments_fixed_bit_set_memory_in_bytes": float64(0), + "segments_index_writer_memory_in_bytes": float64(0), + "segments_max_unsafe_auto_id_timestamp": float64(-1), + "segments_memory_in_bytes": float64(4280), + "segments_norms_memory_in_bytes": float64(384), + "segments_points_memory_in_bytes": float64(3), + "segments_stored_fields_memory_in_bytes": float64(296), + "segments_term_vectors_memory_in_bytes": float64(0), + "segments_terms_memory_in_bytes": float64(3529), + "segments_version_map_memory_in_bytes": float64(0), + "seq_no_global_checkpoint": float64(351), + "seq_no_local_checkpoint": float64(351), + "seq_no_max_seq_no": float64(351), + "store_size_in_bytes": float64(94584), + "translog_earliest_last_modified_age": float64(936144), + "translog_operations": float64(352), + "translog_size_in_bytes": float64(79980), + "translog_uncommitted_operations": float64(0), + "translog_uncommitted_size_in_bytes": float64(55), + "warmer_current": float64(0), + "warmer_total": float64(3), + "warmer_total_time_in_millis": float64(0), +} diff --git a/plugins/inputs/ethtool/README.md b/plugins/inputs/ethtool/README.md new file mode 100644 index 000000000..3f397cdfb --- /dev/null +++ b/plugins/inputs/ethtool/README.md @@ -0,0 +1,33 @@ +# Ethtool Input Plugin + +The ethtool input plugin pulls ethernet device stats. Fields pulled will depend on the network device and driver + +### Configuration: + +```toml +# Returns ethtool statistics for given interfaces +[[inputs.ethtool]] + ## List of interfaces to pull metrics for + # interface_include = ["eth0"] + + ## List of interfaces to ignore when pulling metrics. + # interface_exclude = ["eth1"] +``` + +Interfaces can be included or ignored using + +- `interface_include` +- `interface_exclude` + +Note that loopback interfaces will be automatically ignored + +### Metrics: + +Metrics are dependant on the network device and driver + +### Example Output: + +``` +ethtool,driver=igb,host=test01,interface=mgmt0 tx_queue_1_packets=280782i,rx_queue_5_csum_err=0i,tx_queue_4_restart=0i,tx_multicast=7i,tx_queue_1_bytes=39674885i,rx_queue_2_alloc_failed=0i,tx_queue_5_packets=173970i,tx_single_coll_ok=0i,rx_queue_1_drops=0i,tx_queue_2_restart=0i,tx_aborted_errors=0i,rx_queue_6_csum_err=0i,tx_queue_5_restart=0i,tx_queue_4_bytes=64810835i,tx_abort_late_coll=0i,tx_queue_4_packets=109102i,os2bmc_tx_by_bmc=0i,tx_bytes=427527435i,tx_queue_7_packets=66665i,dropped_smbus=0i,rx_queue_0_csum_err=0i,tx_flow_control_xoff=0i,rx_packets=25926536i,rx_queue_7_csum_err=0i,rx_queue_3_bytes=84326060i,rx_multicast=83771i,rx_queue_4_alloc_failed=0i,rx_queue_3_drops=0i,rx_queue_3_csum_err=0i,rx_errors=0i,tx_errors=0i,tx_queue_6_packets=183236i,rx_broadcast=24378893i,rx_queue_7_packets=88680i,tx_dropped=0i,rx_frame_errors=0i,tx_queue_3_packets=161045i,tx_packets=1257017i,rx_queue_1_csum_err=0i,tx_window_errors=0i,tx_dma_out_of_sync=0i,rx_length_errors=0i,rx_queue_5_drops=0i,tx_timeout_count=0i,rx_queue_4_csum_err=0i,rx_flow_control_xon=0i,tx_heartbeat_errors=0i,tx_flow_control_xon=0i,collisions=0i,tx_queue_0_bytes=29465801i,rx_queue_6_drops=0i,rx_queue_0_alloc_failed=0i,tx_queue_1_restart=0i,rx_queue_0_drops=0i,tx_broadcast=9i,tx_carrier_errors=0i,tx_queue_7_bytes=13777515i,tx_queue_7_restart=0i,rx_queue_5_bytes=50732006i,rx_queue_7_bytes=35744457i,tx_deferred_ok=0i,tx_multi_coll_ok=0i,rx_crc_errors=0i,rx_fifo_errors=0i,rx_queue_6_alloc_failed=0i,tx_queue_2_packets=175206i,tx_queue_0_packets=107011i,rx_queue_4_bytes=201364548i,rx_queue_6_packets=372573i,os2bmc_rx_by_host=0i,multicast=83771i,rx_queue_4_drops=0i,rx_queue_5_packets=130535i,rx_queue_6_bytes=139488035i,tx_fifo_errors=0i,tx_queue_5_bytes=84899130i,rx_queue_0_packets=24529563i,rx_queue_3_alloc_failed=0i,rx_queue_7_drops=0i,tx_queue_6_bytes=96288614i,tx_queue_2_bytes=22132949i,tx_tcp_seg_failed=0i,rx_queue_1_bytes=246703840i,rx_queue_0_bytes=1506870738i,tx_queue_0_restart=0i,rx_queue_2_bytes=111344804i,tx_tcp_seg_good=0i,tx_queue_3_restart=0i,rx_no_buffer_count=0i,rx_smbus=0i,rx_queue_1_packets=273865i,rx_over_errors=0i,os2bmc_tx_by_host=0i,rx_queue_1_alloc_failed=0i,rx_queue_7_alloc_failed=0i,rx_short_length_errors=0i,tx_hwtstamp_timeouts=0i,tx_queue_6_restart=0i,rx_queue_2_packets=207136i,tx_queue_3_bytes=70391970i,rx_queue_3_packets=112007i,rx_queue_4_packets=212177i,tx_smbus=0i,rx_long_byte_count=2480280632i,rx_queue_2_csum_err=0i,rx_missed_errors=0i,rx_bytes=2480280632i,rx_queue_5_alloc_failed=0i,rx_queue_2_drops=0i,os2bmc_rx_by_bmc=0i,rx_align_errors=0i,rx_long_length_errors=0i,rx_hwtstamp_cleared=0i,rx_flow_control_xoff=0i 1564658080000000000 +ethtool,driver=igb,host=test02,interface=mgmt0 rx_queue_2_bytes=111344804i,tx_queue_3_bytes=70439858i,multicast=83771i,rx_broadcast=24378975i,tx_queue_0_packets=107011i,rx_queue_6_alloc_failed=0i,rx_queue_6_drops=0i,rx_hwtstamp_cleared=0i,tx_window_errors=0i,tx_tcp_seg_good=0i,rx_queue_1_drops=0i,tx_queue_1_restart=0i,rx_queue_7_csum_err=0i,rx_no_buffer_count=0i,tx_queue_1_bytes=39675245i,tx_queue_5_bytes=84899130i,tx_broadcast=9i,rx_queue_1_csum_err=0i,tx_flow_control_xoff=0i,rx_queue_6_csum_err=0i,tx_timeout_count=0i,os2bmc_tx_by_bmc=0i,rx_queue_6_packets=372577i,rx_queue_0_alloc_failed=0i,tx_flow_control_xon=0i,rx_queue_2_drops=0i,tx_queue_2_packets=175206i,rx_queue_3_csum_err=0i,tx_abort_late_coll=0i,tx_queue_5_restart=0i,tx_dropped=0i,rx_queue_2_alloc_failed=0i,tx_multi_coll_ok=0i,rx_queue_1_packets=273865i,rx_flow_control_xon=0i,tx_single_coll_ok=0i,rx_length_errors=0i,rx_queue_7_bytes=35744457i,rx_queue_4_alloc_failed=0i,rx_queue_6_bytes=139488395i,rx_queue_2_csum_err=0i,rx_long_byte_count=2480288216i,rx_queue_1_alloc_failed=0i,tx_queue_0_restart=0i,rx_queue_0_csum_err=0i,tx_queue_2_bytes=22132949i,rx_queue_5_drops=0i,tx_dma_out_of_sync=0i,rx_queue_3_drops=0i,rx_queue_4_packets=212177i,tx_queue_6_restart=0i,rx_packets=25926650i,rx_queue_7_packets=88680i,rx_frame_errors=0i,rx_queue_3_bytes=84326060i,rx_short_length_errors=0i,tx_queue_7_bytes=13777515i,rx_queue_3_alloc_failed=0i,tx_queue_6_packets=183236i,rx_queue_0_drops=0i,rx_multicast=83771i,rx_queue_2_packets=207136i,rx_queue_5_csum_err=0i,rx_queue_5_packets=130535i,rx_queue_7_alloc_failed=0i,tx_smbus=0i,tx_queue_3_packets=161081i,rx_queue_7_drops=0i,tx_queue_2_restart=0i,tx_multicast=7i,tx_fifo_errors=0i,tx_queue_3_restart=0i,rx_long_length_errors=0i,tx_queue_6_bytes=96288614i,tx_queue_1_packets=280786i,tx_tcp_seg_failed=0i,rx_align_errors=0i,tx_errors=0i,rx_crc_errors=0i,rx_queue_0_packets=24529673i,rx_flow_control_xoff=0i,tx_queue_0_bytes=29465801i,rx_over_errors=0i,rx_queue_4_drops=0i,os2bmc_rx_by_bmc=0i,rx_smbus=0i,dropped_smbus=0i,tx_hwtstamp_timeouts=0i,rx_errors=0i,tx_queue_4_packets=109102i,tx_carrier_errors=0i,tx_queue_4_bytes=64810835i,tx_queue_4_restart=0i,rx_queue_4_csum_err=0i,tx_queue_7_packets=66665i,tx_aborted_errors=0i,rx_missed_errors=0i,tx_bytes=427575843i,collisions=0i,rx_queue_1_bytes=246703840i,rx_queue_5_bytes=50732006i,rx_bytes=2480288216i,os2bmc_rx_by_host=0i,rx_queue_5_alloc_failed=0i,rx_queue_3_packets=112007i,tx_deferred_ok=0i,os2bmc_tx_by_host=0i,tx_heartbeat_errors=0i,rx_queue_0_bytes=1506877506i,tx_queue_7_restart=0i,tx_packets=1257057i,rx_queue_4_bytes=201364548i,rx_fifo_errors=0i,tx_queue_5_packets=173970i 1564658090000000000 +``` diff --git a/plugins/inputs/ethtool/ethtool.go b/plugins/inputs/ethtool/ethtool.go new file mode 100644 index 000000000..3f8f8e156 --- /dev/null +++ b/plugins/inputs/ethtool/ethtool.go @@ -0,0 +1,50 @@ +package ethtool + +import ( + "net" + + "github.com/influxdata/telegraf" +) + +type Command interface { + Init() error + DriverName(intf string) (string, error) + Interfaces() ([]net.Interface, error) + Stats(intf string) (map[string]uint64, error) +} + +type Ethtool struct { + // This is the list of interface names to include + InterfaceInclude []string `toml:"interface_include"` + + // This is the list of interface names to ignore + InterfaceExclude []string `toml:"interface_exclude"` + + Log telegraf.Logger `toml:"-"` + + // the ethtool command + command Command +} + +const ( + pluginName = "ethtool" + tagInterface = "interface" + tagDriverName = "driver" + + sampleConfig = ` + ## List of interfaces to pull metrics for + # interface_include = ["eth0"] + + ## List of interfaces to ignore when pulling metrics. + # interface_exclude = ["eth1"] +` +) + +func (e *Ethtool) SampleConfig() string { + return sampleConfig +} + +// Description returns a one-sentence description on the Input +func (e *Ethtool) Description() string { + return "Returns ethtool statistics for given interfaces" +} diff --git a/plugins/inputs/ethtool/ethtool_linux.go b/plugins/inputs/ethtool/ethtool_linux.go new file mode 100644 index 000000000..b8c9312cb --- /dev/null +++ b/plugins/inputs/ethtool/ethtool_linux.go @@ -0,0 +1,136 @@ +// +build linux + +package ethtool + +import ( + "net" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/pkg/errors" + "github.com/safchain/ethtool" +) + +type CommandEthtool struct { + ethtool *ethtool.Ethtool +} + +func (e *Ethtool) Gather(acc telegraf.Accumulator) error { + + // Get the list of interfaces + interfaces, err := e.command.Interfaces() + if err != nil { + acc.AddError(err) + return nil + } + + interfaceFilter, err := filter.NewIncludeExcludeFilter(e.InterfaceInclude, e.InterfaceExclude) + if err != nil { + return err + } + + // parallelize the ethtool call in event of many interfaces + var wg sync.WaitGroup + + for _, iface := range interfaces { + + // Check this isn't a loop back and that its matched by the filter + if (iface.Flags&net.FlagLoopback == 0) && interfaceFilter.Match(iface.Name) { + wg.Add(1) + + go func(i net.Interface) { + e.gatherEthtoolStats(i, acc) + wg.Done() + }(iface) + } + } + + // Waiting for all the interfaces + wg.Wait() + return nil +} + +// Initialise the Command Tool +func (e *Ethtool) Init() error { + return e.command.Init() +} + +// Gather the stats for the interface. +func (e *Ethtool) gatherEthtoolStats(iface net.Interface, acc telegraf.Accumulator) { + + tags := make(map[string]string) + tags[tagInterface] = iface.Name + + driverName, err := e.command.DriverName(iface.Name) + if err != nil { + driverErr := errors.Wrapf(err, "%s driver", iface.Name) + acc.AddError(driverErr) + return + } + + tags[tagDriverName] = driverName + + fields := make(map[string]interface{}) + stats, err := e.command.Stats(iface.Name) + if err != nil { + statsErr := errors.Wrapf(err, "%s stats", iface.Name) + acc.AddError(statsErr) + return + } + + for k, v := range stats { + fields[k] = v + } + + acc.AddFields(pluginName, fields, tags) +} + +func NewCommandEthtool() *CommandEthtool { + return &CommandEthtool{} +} + +func (c *CommandEthtool) Init() error { + + if c.ethtool != nil { + return nil + } + + e, err := ethtool.NewEthtool() + if err == nil { + c.ethtool = e + } + + return err +} + +func (c *CommandEthtool) DriverName(intf string) (string, error) { + return c.ethtool.DriverName(intf) +} + +func (c *CommandEthtool) Stats(intf string) (map[string]uint64, error) { + return c.ethtool.Stats(intf) +} + +func (c *CommandEthtool) Interfaces() ([]net.Interface, error) { + + // Get the list of interfaces + interfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + return interfaces, nil +} + +func init() { + + inputs.Add(pluginName, func() telegraf.Input { + return &Ethtool{ + InterfaceInclude: []string{}, + InterfaceExclude: []string{}, + command: NewCommandEthtool(), + } + }) +} diff --git a/plugins/inputs/ethtool/ethtool_notlinux.go b/plugins/inputs/ethtool/ethtool_notlinux.go new file mode 100644 index 000000000..b022e0a46 --- /dev/null +++ b/plugins/inputs/ethtool/ethtool_notlinux.go @@ -0,0 +1,23 @@ +// +build !linux + +package ethtool + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +func (e *Ethtool) Init() error { + e.Log.Warn("Current platform is not supported") + return nil +} + +func (e *Ethtool) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add(pluginName, func() telegraf.Input { + return &Ethtool{} + }) +} diff --git a/plugins/inputs/ethtool/ethtool_test.go b/plugins/inputs/ethtool/ethtool_test.go new file mode 100644 index 000000000..d281644a5 --- /dev/null +++ b/plugins/inputs/ethtool/ethtool_test.go @@ -0,0 +1,381 @@ +// +build linux + +package ethtool + +import ( + "net" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +var command *Ethtool +var interfaceMap map[string]*InterfaceMock + +type InterfaceMock struct { + Name string + DriverName string + Stat map[string]uint64 + LoopBack bool +} + +type CommandEthtoolMock struct { + InterfaceMap map[string]*InterfaceMock +} + +func (c *CommandEthtoolMock) Init() error { + // Not required for test mock + return nil +} + +func (c *CommandEthtoolMock) DriverName(intf string) (driverName string, err error) { + i := c.InterfaceMap[intf] + if i != nil { + driverName = i.DriverName + return + } + return driverName, errors.New("interface not found") +} + +func (c *CommandEthtoolMock) Interfaces() ([]net.Interface, error) { + interfaceNames := make([]net.Interface, 0) + for k, v := range c.InterfaceMap { + + // Whether to set the flag to loopback + flag := net.FlagUp + if v.LoopBack { + flag = net.FlagLoopback + } + + // Create a dummy interface + iface := net.Interface{ + Index: 0, + MTU: 1500, + Name: k, + HardwareAddr: nil, + Flags: flag, + } + interfaceNames = append(interfaceNames, iface) + } + return interfaceNames, nil +} + +func (c *CommandEthtoolMock) Stats(intf string) (stat map[string]uint64, err error) { + i := c.InterfaceMap[intf] + if i != nil { + stat = i.Stat + return + } + return stat, errors.New("interface not found") +} + +func setup() { + + interfaceMap = make(map[string]*InterfaceMock) + + eth1Stat := map[string]uint64{ + "port_rx_1024_to_15xx": 25167245, + "port_rx_128_to_255": 1573526387, + "port_rx_15xx_to_jumbo": 137819058, + "port_rx_256_to_511": 772038107, + "port_rx_512_to_1023": 78294457, + "port_rx_64": 8798065, + "port_rx_65_to_127": 450348015, + "port_rx_bad": 0, + "port_rx_bad_bytes": 0, + "port_rx_bad_gtjumbo": 0, + "port_rx_broadcast": 6428250, + "port_rx_bytes": 893460472634, + "port_rx_control": 0, + "port_rx_dp_di_dropped_packets": 2772680304, + "port_rx_dp_hlb_fetch": 0, + "port_rx_dp_hlb_wait": 0, + "port_rx_dp_q_disabled_packets": 0, + "port_rx_dp_streaming_packets": 0, + "port_rx_good": 3045991334, + "port_rx_good_bytes": 893460472927, + "port_rx_gtjumbo": 0, + "port_rx_lt64": 0, + "port_rx_multicast": 1639566045, + "port_rx_nodesc_drops": 0, + "port_rx_overflow": 0, + "port_rx_packets": 3045991334, + "port_rx_pause": 0, + "port_rx_pm_discard_bb_overflow": 0, + "port_rx_pm_discard_mapping": 0, + "port_rx_pm_discard_qbb": 0, + "port_rx_pm_discard_vfifo_full": 0, + "port_rx_pm_trunc_bb_overflow": 0, + "port_rx_pm_trunc_qbb": 0, + "port_rx_pm_trunc_vfifo_full": 0, + "port_rx_unicast": 1399997040, + "port_tx_1024_to_15xx": 236, + "port_tx_128_to_255": 275090219, + "port_tx_15xx_to_jumbo": 926, + "port_tx_256_to_511": 48567221, + "port_tx_512_to_1023": 5142016, + "port_tx_64": 113903973, + "port_tx_65_to_127": 161935699, + "port_tx_broadcast": 8, + "port_tx_bytes": 94357131016, + "port_tx_control": 0, + "port_tx_lt64": 0, + "port_tx_multicast": 325891647, + "port_tx_packets": 604640290, + "port_tx_pause": 0, + "port_tx_unicast": 278748635, + "ptp_bad_syncs": 1, + "ptp_fast_syncs": 1, + "ptp_filter_matches": 0, + "ptp_good_syncs": 136151, + "ptp_invalid_sync_windows": 0, + "ptp_no_time_syncs": 1, + "ptp_non_filter_matches": 0, + "ptp_oversize_sync_windows": 53, + "ptp_rx_no_timestamp": 0, + "ptp_rx_timestamp_packets": 0, + "ptp_sync_timeouts": 1, + "ptp_timestamp_packets": 0, + "ptp_tx_timestamp_packets": 0, + "ptp_undersize_sync_windows": 3, + "rx-0.rx_packets": 55659234, + "rx-1.rx_packets": 87880538, + "rx-2.rx_packets": 26746234, + "rx-3.rx_packets": 103026471, + "rx-4.rx_packets": 0, + "rx_eth_crc_err": 0, + "rx_frm_trunc": 0, + "rx_inner_ip_hdr_chksum_err": 0, + "rx_inner_tcp_udp_chksum_err": 0, + "rx_ip_hdr_chksum_err": 0, + "rx_mcast_mismatch": 0, + "rx_merge_events": 0, + "rx_merge_packets": 0, + "rx_nodesc_trunc": 0, + "rx_noskb_drops": 0, + "rx_outer_ip_hdr_chksum_err": 0, + "rx_outer_tcp_udp_chksum_err": 0, + "rx_reset": 0, + "rx_tcp_udp_chksum_err": 0, + "rx_tobe_disc": 0, + "tx-0.tx_packets": 85843565, + "tx-1.tx_packets": 108642725, + "tx-2.tx_packets": 202596078, + "tx-3.tx_packets": 207561010, + "tx-4.tx_packets": 0, + "tx_cb_packets": 4, + "tx_merge_events": 11025, + "tx_pio_packets": 531928114, + "tx_pushes": 604643378, + "tx_tso_bursts": 0, + "tx_tso_fallbacks": 0, + "tx_tso_long_headers": 0, + } + eth1 := &InterfaceMock{"eth1", "driver1", eth1Stat, false} + interfaceMap[eth1.Name] = eth1 + + eth2Stat := map[string]uint64{ + "port_rx_1024_to_15xx": 11529312, + "port_rx_128_to_255": 1868952037, + "port_rx_15xx_to_jumbo": 130339387, + "port_rx_256_to_511": 843846270, + "port_rx_512_to_1023": 173194372, + "port_rx_64": 9190374, + "port_rx_65_to_127": 507806115, + "port_rx_bad": 0, + "port_rx_bad_bytes": 0, + "port_rx_bad_gtjumbo": 0, + "port_rx_broadcast": 6648019, + "port_rx_bytes": 1007358162202, + "port_rx_control": 0, + "port_rx_dp_di_dropped_packets": 3164124639, + "port_rx_dp_hlb_fetch": 0, + "port_rx_dp_hlb_wait": 0, + "port_rx_dp_q_disabled_packets": 0, + "port_rx_dp_streaming_packets": 0, + "port_rx_good": 3544857867, + "port_rx_good_bytes": 1007358162202, + "port_rx_gtjumbo": 0, + "port_rx_lt64": 0, + "port_rx_multicast": 2231999743, + "port_rx_nodesc_drops": 0, + "port_rx_overflow": 0, + "port_rx_packets": 3544857867, + "port_rx_pause": 0, + "port_rx_pm_discard_bb_overflow": 0, + "port_rx_pm_discard_mapping": 0, + "port_rx_pm_discard_qbb": 0, + "port_rx_pm_discard_vfifo_full": 0, + "port_rx_pm_trunc_bb_overflow": 0, + "port_rx_pm_trunc_qbb": 0, + "port_rx_pm_trunc_vfifo_full": 0, + "port_rx_unicast": 1306210105, + "port_tx_1024_to_15xx": 379, + "port_tx_128_to_255": 202767251, + "port_tx_15xx_to_jumbo": 558, + "port_tx_256_to_511": 31454719, + "port_tx_512_to_1023": 6865731, + "port_tx_64": 17268276, + "port_tx_65_to_127": 272816313, + "port_tx_broadcast": 6, + "port_tx_bytes": 78071946593, + "port_tx_control": 0, + "port_tx_lt64": 0, + "port_tx_multicast": 239510586, + "port_tx_packets": 531173227, + "port_tx_pause": 0, + "port_tx_unicast": 291662635, + "ptp_bad_syncs": 0, + "ptp_fast_syncs": 0, + "ptp_filter_matches": 0, + "ptp_good_syncs": 0, + "ptp_invalid_sync_windows": 0, + "ptp_no_time_syncs": 0, + "ptp_non_filter_matches": 0, + "ptp_oversize_sync_windows": 0, + "ptp_rx_no_timestamp": 0, + "ptp_rx_timestamp_packets": 0, + "ptp_sync_timeouts": 0, + "ptp_timestamp_packets": 0, + "ptp_tx_timestamp_packets": 0, + "ptp_undersize_sync_windows": 0, + "rx-0.rx_packets": 84587075, + "rx-1.rx_packets": 74029305, + "rx-2.rx_packets": 134586471, + "rx-3.rx_packets": 87531322, + "rx-4.rx_packets": 0, + "rx_eth_crc_err": 0, + "rx_frm_trunc": 0, + "rx_inner_ip_hdr_chksum_err": 0, + "rx_inner_tcp_udp_chksum_err": 0, + "rx_ip_hdr_chksum_err": 0, + "rx_mcast_mismatch": 0, + "rx_merge_events": 0, + "rx_merge_packets": 0, + "rx_nodesc_trunc": 0, + "rx_noskb_drops": 0, + "rx_outer_ip_hdr_chksum_err": 0, + "rx_outer_tcp_udp_chksum_err": 0, + "rx_reset": 0, + "rx_tcp_udp_chksum_err": 0, + "rx_tobe_disc": 0, + "tx-0.tx_packets": 232521451, + "tx-1.tx_packets": 97876137, + "tx-2.tx_packets": 106822111, + "tx-3.tx_packets": 93955050, + "tx-4.tx_packets": 0, + "tx_cb_packets": 1, + "tx_merge_events": 8402, + "tx_pio_packets": 481040054, + "tx_pushes": 531174491, + "tx_tso_bursts": 128, + "tx_tso_fallbacks": 0, + "tx_tso_long_headers": 0, + } + eth2 := &InterfaceMock{"eth2", "driver1", eth2Stat, false} + interfaceMap[eth2.Name] = eth2 + + // dummy loopback including dummy stat to ensure that the ignore feature is working + lo0Stat := map[string]uint64{ + "dummy": 0, + } + lo0 := &InterfaceMock{"lo0", "", lo0Stat, true} + interfaceMap[lo0.Name] = lo0 + + c := &CommandEthtoolMock{interfaceMap} + command = &Ethtool{ + InterfaceInclude: []string{}, + InterfaceExclude: []string{}, + command: c, + } +} + +func toStringMapInterface(in map[string]uint64) map[string]interface{} { + var m = map[string]interface{}{} + for k, v := range in { + m[k] = v + } + return m +} + +func TestGather(t *testing.T) { + + setup() + var acc testutil.Accumulator + + err := command.Gather(&acc) + assert.NoError(t, err) + assert.Len(t, acc.Metrics, 2) + + expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) + expectedTagsEth1 := map[string]string{ + "interface": "eth1", + "driver": "driver1", + } + acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth1, expectedTagsEth1) + expectedFieldsEth2 := toStringMapInterface(interfaceMap["eth2"].Stat) + expectedTagsEth2 := map[string]string{ + "interface": "eth2", + "driver": "driver1", + } + acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2) +} + +func TestGatherIncludeInterfaces(t *testing.T) { + + setup() + var acc testutil.Accumulator + + command.InterfaceInclude = append(command.InterfaceInclude, "eth1") + + err := command.Gather(&acc) + assert.NoError(t, err) + assert.Len(t, acc.Metrics, 1) + + // Should contain eth1 + expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) + expectedTagsEth1 := map[string]string{ + "interface": "eth1", + "driver": "driver1", + } + acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth1, expectedTagsEth1) + + // Should not contain eth2 + expectedFieldsEth2 := toStringMapInterface(interfaceMap["eth2"].Stat) + expectedTagsEth2 := map[string]string{ + "interface": "eth2", + "driver": "driver1", + } + acc.AssertDoesNotContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2) +} + +func TestGatherIgnoreInterfaces(t *testing.T) { + + setup() + var acc testutil.Accumulator + + command.InterfaceExclude = append(command.InterfaceExclude, "eth1") + + err := command.Gather(&acc) + assert.NoError(t, err) + assert.Len(t, acc.Metrics, 1) + + // Should not contain eth1 + expectedFieldsEth1 := toStringMapInterface(interfaceMap["eth1"].Stat) + expectedTagsEth1 := map[string]string{ + "interface": "eth1", + "driver": "driver1", + } + acc.AssertDoesNotContainsTaggedFields(t, pluginName, expectedFieldsEth1, expectedTagsEth1) + + // Should contain eth2 + expectedFieldsEth2 := toStringMapInterface(interfaceMap["eth2"].Stat) + expectedTagsEth2 := map[string]string{ + "interface": "eth2", + "driver": "driver1", + } + acc.AssertContainsTaggedFields(t, pluginName, expectedFieldsEth2, expectedTagsEth2) + +} diff --git a/plugins/inputs/eventhub_consumer/README.md b/plugins/inputs/eventhub_consumer/README.md new file mode 100644 index 000000000..06c43cf31 --- /dev/null +++ b/plugins/inputs/eventhub_consumer/README.md @@ -0,0 +1,98 @@ +# Event Hub Consumer Input Plugin + +This plugin provides a consumer for use with Azure Event Hubs and Azure IoT Hub. + +### IoT Hub Setup + +The main focus for development of this plugin is Azure IoT hub: + +1. Create an Azure IoT Hub by following any of the guides provided here: https://docs.microsoft.com/en-us/azure/iot-hub/ +2. Create a device, for example a [simulated Raspberry Pi](https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-raspberry-pi-web-simulator-get-started) +3. The connection string needed for the plugin is located under *Shared access policies*, both the *iothubowner* and *service* policies should work + +### Configuration + +```toml +[[inputs.eventhub_consumer]] + ## The default behavior is to create a new Event Hub client from environment variables. + ## This requires one of the following sets of environment variables to be set: + ## + ## 1) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "EVENTHUB_CONNECTION_STRING" + ## + ## 2) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "EVENTHUB_KEY_NAME" + ## - "EVENTHUB_KEY_VALUE" + + ## Uncommenting the option below will create an Event Hub client based solely on the connection string. + ## This can either be the associated environment variable or hard coded directly. + # connection_string = "" + + ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister + # persistence_dir = "" + + ## Change the default consumer group + # consumer_group = "" + + ## By default the event hub receives all messages present on the broker, alternative modes can be set below. + ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). + ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). + # from_timestamp = + # latest = true + + ## Set a custom prefetch count for the receiver(s) + # prefetch_count = 1000 + + ## Add an epoch to the receiver(s) + # epoch = 0 + + ## Change to set a custom user agent, "telegraf" is used by default + # user_agent = "telegraf" + + ## To consume from a specific partition, set the partition_ids option. + ## An empty array will result in receiving from all partitions. + # partition_ids = ["0","1"] + + ## Max undelivered messages + # max_undelivered_messages = 1000 + + ## Set either option below to true to use a system property as timestamp. + ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. + ## It is recommended to use this setting when the data itself has no timestamp. + # enqueued_time_as_ts = true + # iot_hub_enqueued_time_as_ts = true + + ## Tags or fields to create from keys present in the application property bag. + ## These could for example be set by message enrichments in Azure IoT Hub. + # application_property_tags = [] + # application_property_fields = [] + + ## Tag or field name to use for metadata + ## By default all metadata is disabled + # sequence_number_field = "SequenceNumber" + # enqueued_time_field = "EnqueuedTime" + # offset_field = "Offset" + # partition_id_tag = "PartitionID" + # partition_key_tag = "PartitionKey" + # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" + # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" + # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" + # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" + # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +#### Environment Variables + +[Full documentation of the available environment variables][envvar]. + +[envvar]: https://github.com/Azure/azure-event-hubs-go#environment-variables diff --git a/plugins/inputs/eventhub_consumer/eventhub_consumer.go b/plugins/inputs/eventhub_consumer/eventhub_consumer.go new file mode 100644 index 000000000..72cc4c25f --- /dev/null +++ b/plugins/inputs/eventhub_consumer/eventhub_consumer.go @@ -0,0 +1,422 @@ +package eventhub + +import ( + "context" + "fmt" + "sync" + "time" + + eventhub "github.com/Azure/azure-event-hubs-go/v3" + "github.com/Azure/azure-event-hubs-go/v3/persist" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +const ( + defaultMaxUndeliveredMessages = 1000 +) + +type empty struct{} +type semaphore chan empty + +// EventHub is the top level struct for this plugin +type EventHub struct { + // Configuration + ConnectionString string `toml:"connection_string"` + PersistenceDir string `toml:"persistence_dir"` + ConsumerGroup string `toml:"consumer_group"` + FromTimestamp time.Time `toml:"from_timestamp"` + Latest bool `toml:"latest"` + PrefetchCount uint32 `toml:"prefetch_count"` + Epoch int64 `toml:"epoch"` + UserAgent string `toml:"user_agent"` + PartitionIDs []string `toml:"partition_ids"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + EnqueuedTimeAsTs bool `toml:"enqueued_time_as_ts"` + IotHubEnqueuedTimeAsTs bool `toml:"iot_hub_enqueued_time_as_ts"` + + // Metadata + ApplicationPropertyFields []string `toml:"application_property_fields"` + ApplicationPropertyTags []string `toml:"application_property_tags"` + SequenceNumberField string `toml:"sequence_number_field"` + EnqueuedTimeField string `toml:"enqueued_time_field"` + OffsetField string `toml:"offset_field"` + PartitionIDTag string `toml:"partition_id_tag"` + PartitionKeyTag string `toml:"partition_key_tag"` + IoTHubDeviceConnectionIDTag string `toml:"iot_hub_device_connection_id_tag"` + IoTHubAuthGenerationIDTag string `toml:"iot_hub_auth_generation_id_tag"` + IoTHubConnectionAuthMethodTag string `toml:"iot_hub_connection_auth_method_tag"` + IoTHubConnectionModuleIDTag string `toml:"iot_hub_connection_module_id_tag"` + IoTHubEnqueuedTimeField string `toml:"iot_hub_enqueued_time_field"` + + Log telegraf.Logger `toml:"-"` + + // Azure + hub *eventhub.Hub + cancel context.CancelFunc + wg sync.WaitGroup + + parser parsers.Parser + in chan []telegraf.Metric +} + +// SampleConfig is provided here +func (*EventHub) SampleConfig() string { + return ` + ## The default behavior is to create a new Event Hub client from environment variables. + ## This requires one of the following sets of environment variables to be set: + ## + ## 1) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "EVENTHUB_CONNECTION_STRING" + ## + ## 2) Expected Environment Variables: + ## - "EVENTHUB_NAMESPACE" + ## - "EVENTHUB_NAME" + ## - "EVENTHUB_KEY_NAME" + ## - "EVENTHUB_KEY_VALUE" + + ## Uncommenting the option below will create an Event Hub client based solely on the connection string. + ## This can either be the associated environment variable or hard coded directly. + # connection_string = "" + + ## Set persistence directory to a valid folder to use a file persister instead of an in-memory persister + # persistence_dir = "" + + ## Change the default consumer group + # consumer_group = "" + + ## By default the event hub receives all messages present on the broker, alternative modes can be set below. + ## The timestamp should be in https://github.com/toml-lang/toml#offset-date-time format (RFC 3339). + ## The 3 options below only apply if no valid persister is read from memory or file (e.g. first run). + # from_timestamp = + # latest = true + + ## Set a custom prefetch count for the receiver(s) + # prefetch_count = 1000 + + ## Add an epoch to the receiver(s) + # epoch = 0 + + ## Change to set a custom user agent, "telegraf" is used by default + # user_agent = "telegraf" + + ## To consume from a specific partition, set the partition_ids option. + ## An empty array will result in receiving from all partitions. + # partition_ids = ["0","1"] + + ## Max undelivered messages + # max_undelivered_messages = 1000 + + ## Set either option below to true to use a system property as timestamp. + ## You have the choice between EnqueuedTime and IoTHubEnqueuedTime. + ## It is recommended to use this setting when the data itself has no timestamp. + # enqueued_time_as_ts = true + # iot_hub_enqueued_time_as_ts = true + + ## Tags or fields to create from keys present in the application property bag. + ## These could for example be set by message enrichments in Azure IoT Hub. + # application_property_tags = [] + # application_property_fields = [] + + ## Tag or field name to use for metadata + ## By default all metadata is disabled + # sequence_number_field = "SequenceNumber" + # enqueued_time_field = "EnqueuedTime" + # offset_field = "Offset" + # partition_id_tag = "PartitionID" + # partition_key_tag = "PartitionKey" + # iot_hub_device_connection_id_tag = "IoTHubDeviceConnectionID" + # iot_hub_auth_generation_id_tag = "IoTHubAuthGenerationID" + # iot_hub_connection_auth_method_tag = "IoTHubConnectionAuthMethod" + # iot_hub_connection_module_id_tag = "IoTHubConnectionModuleID" + # iot_hub_enqueued_time_field = "IoTHubEnqueuedTime" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + ` +} + +// Description of the plugin +func (*EventHub) Description() string { + return "Azure Event Hubs service input plugin" +} + +// SetParser sets the parser +func (e *EventHub) SetParser(parser parsers.Parser) { + e.parser = parser +} + +// Gather function is unused +func (*EventHub) Gather(telegraf.Accumulator) error { + return nil +} + +// Init the EventHub ServiceInput +func (e *EventHub) Init() (err error) { + if e.MaxUndeliveredMessages == 0 { + e.MaxUndeliveredMessages = defaultMaxUndeliveredMessages + } + + // Set hub options + hubOpts := []eventhub.HubOption{} + + if e.PersistenceDir != "" { + persister, err := persist.NewFilePersister(e.PersistenceDir) + if err != nil { + return err + } + + hubOpts = append(hubOpts, eventhub.HubWithOffsetPersistence(persister)) + } + + if e.UserAgent != "" { + hubOpts = append(hubOpts, eventhub.HubWithUserAgent(e.UserAgent)) + } else { + hubOpts = append(hubOpts, eventhub.HubWithUserAgent(internal.ProductToken())) + } + + // Create event hub connection + if e.ConnectionString != "" { + e.hub, err = eventhub.NewHubFromConnectionString(e.ConnectionString, hubOpts...) + } else { + e.hub, err = eventhub.NewHubFromEnvironment(hubOpts...) + } + + return err +} + +// Start the EventHub ServiceInput +func (e *EventHub) Start(acc telegraf.Accumulator) error { + e.in = make(chan []telegraf.Metric) + + var ctx context.Context + ctx, e.cancel = context.WithCancel(context.Background()) + + // Start tracking + e.wg.Add(1) + go func() { + defer e.wg.Done() + e.startTracking(ctx, acc) + }() + + // Configure receiver options + receiveOpts, err := e.configureReceiver() + if err != nil { + return err + } + + partitions := e.PartitionIDs + + if len(e.PartitionIDs) == 0 { + runtimeinfo, err := e.hub.GetRuntimeInformation(ctx) + if err != nil { + return err + } + + partitions = runtimeinfo.PartitionIDs + } + + for _, partitionID := range partitions { + _, err = e.hub.Receive(ctx, partitionID, e.onMessage, receiveOpts...) + if err != nil { + return fmt.Errorf("creating receiver for partition %q: %v", partitionID, err) + } + } + + return nil +} + +func (e *EventHub) configureReceiver() ([]eventhub.ReceiveOption, error) { + receiveOpts := []eventhub.ReceiveOption{} + + if e.ConsumerGroup != "" { + receiveOpts = append(receiveOpts, eventhub.ReceiveWithConsumerGroup(e.ConsumerGroup)) + } + + if !e.FromTimestamp.IsZero() { + receiveOpts = append(receiveOpts, eventhub.ReceiveFromTimestamp(e.FromTimestamp)) + } else if e.Latest { + receiveOpts = append(receiveOpts, eventhub.ReceiveWithLatestOffset()) + } + + if e.PrefetchCount != 0 { + receiveOpts = append(receiveOpts, eventhub.ReceiveWithPrefetchCount(e.PrefetchCount)) + } + + if e.Epoch != 0 { + receiveOpts = append(receiveOpts, eventhub.ReceiveWithEpoch(e.Epoch)) + } + + return receiveOpts, nil +} + +// OnMessage handles an Event. When this function returns without error the +// Event is immediately accepted and the offset is updated. If an error is +// returned the Event is marked for redelivery. +func (e *EventHub) onMessage(ctx context.Context, event *eventhub.Event) error { + metrics, err := e.createMetrics(event) + if err != nil { + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + case e.in <- metrics: + return nil + } +} + +// OnDelivery returns true if a new slot has opened up in the TrackingAccumulator. +func (e *EventHub) onDelivery( + acc telegraf.TrackingAccumulator, + groups map[telegraf.TrackingID][]telegraf.Metric, + track telegraf.DeliveryInfo, +) bool { + if track.Delivered() { + delete(groups, track.ID()) + return true + } + + // The metric was already accepted when onMessage completed, so we can't + // fallback on redelivery from Event Hub. Add a new copy of the metric for + // reprocessing. + metrics, ok := groups[track.ID()] + delete(groups, track.ID()) + if !ok { + // The metrics should always be found, this message indicates a programming error. + e.Log.Errorf("Could not find delivery: %d", track.ID()) + return true + } + + backup := deepCopyMetrics(metrics) + id := acc.AddTrackingMetricGroup(metrics) + groups[id] = backup + return false +} + +func (e *EventHub) startTracking(ctx context.Context, ac telegraf.Accumulator) { + acc := ac.WithTracking(e.MaxUndeliveredMessages) + sem := make(semaphore, e.MaxUndeliveredMessages) + groups := make(map[telegraf.TrackingID][]telegraf.Metric, e.MaxUndeliveredMessages) + + for { + select { + case <-ctx.Done(): + return + case track := <-acc.Delivered(): + if e.onDelivery(acc, groups, track) { + <-sem + } + case sem <- empty{}: + select { + case <-ctx.Done(): + return + case track := <-acc.Delivered(): + if e.onDelivery(acc, groups, track) { + <-sem + <-sem + } + case metrics := <-e.in: + backup := deepCopyMetrics(metrics) + id := acc.AddTrackingMetricGroup(metrics) + groups[id] = backup + } + } + } +} + +func deepCopyMetrics(in []telegraf.Metric) []telegraf.Metric { + metrics := make([]telegraf.Metric, 0, len(in)) + for _, m := range in { + metrics = append(metrics, m.Copy()) + } + return metrics +} + +// CreateMetrics returns the Metrics from the Event. +func (e *EventHub) createMetrics(event *eventhub.Event) ([]telegraf.Metric, error) { + metrics, err := e.parser.Parse(event.Data) + if err != nil { + return nil, err + } + + for i := range metrics { + for _, field := range e.ApplicationPropertyFields { + if val, ok := event.Get(field); ok { + metrics[i].AddField(field, val) + } + } + + for _, tag := range e.ApplicationPropertyTags { + if val, ok := event.Get(tag); ok { + metrics[i].AddTag(tag, fmt.Sprintf("%v", val)) + } + } + + if e.SequenceNumberField != "" { + metrics[i].AddField(e.SequenceNumberField, *event.SystemProperties.SequenceNumber) + } + + if e.EnqueuedTimeAsTs { + metrics[i].SetTime(*event.SystemProperties.EnqueuedTime) + } else if e.EnqueuedTimeField != "" { + metrics[i].AddField(e.EnqueuedTimeField, (*event.SystemProperties.EnqueuedTime).UnixNano()/int64(time.Millisecond)) + } + + if e.OffsetField != "" { + metrics[i].AddField(e.OffsetField, *event.SystemProperties.Offset) + } + + if event.SystemProperties.PartitionID != nil && e.PartitionIDTag != "" { + metrics[i].AddTag(e.PartitionIDTag, string(*event.SystemProperties.PartitionID)) + } + if event.SystemProperties.PartitionKey != nil && e.PartitionKeyTag != "" { + metrics[i].AddTag(e.PartitionKeyTag, *event.SystemProperties.PartitionKey) + } + if event.SystemProperties.IoTHubDeviceConnectionID != nil && e.IoTHubDeviceConnectionIDTag != "" { + metrics[i].AddTag(e.IoTHubDeviceConnectionIDTag, *event.SystemProperties.IoTHubDeviceConnectionID) + } + if event.SystemProperties.IoTHubAuthGenerationID != nil && e.IoTHubAuthGenerationIDTag != "" { + metrics[i].AddTag(e.IoTHubAuthGenerationIDTag, *event.SystemProperties.IoTHubAuthGenerationID) + } + if event.SystemProperties.IoTHubConnectionAuthMethod != nil && e.IoTHubConnectionAuthMethodTag != "" { + metrics[i].AddTag(e.IoTHubConnectionAuthMethodTag, *event.SystemProperties.IoTHubConnectionAuthMethod) + } + if event.SystemProperties.IoTHubConnectionModuleID != nil && e.IoTHubConnectionModuleIDTag != "" { + metrics[i].AddTag(e.IoTHubConnectionModuleIDTag, *event.SystemProperties.IoTHubConnectionModuleID) + } + if event.SystemProperties.IoTHubEnqueuedTime != nil { + if e.IotHubEnqueuedTimeAsTs { + metrics[i].SetTime(*event.SystemProperties.IoTHubEnqueuedTime) + } else if e.IoTHubEnqueuedTimeField != "" { + metrics[i].AddField(e.IoTHubEnqueuedTimeField, (*event.SystemProperties.IoTHubEnqueuedTime).UnixNano()/int64(time.Millisecond)) + } + } + } + + return metrics, nil +} + +// Stop the EventHub ServiceInput +func (e *EventHub) Stop() { + err := e.hub.Close(context.Background()) + if err != nil { + e.Log.Errorf("Error closing Event Hub connection: %v", err) + } + e.cancel() + e.wg.Wait() +} + +func init() { + inputs.Add("eventhub_consumer", func() telegraf.Input { + return &EventHub{} + }) +} diff --git a/plugins/inputs/exec/README.md b/plugins/inputs/exec/README.md index 788c8eec0..8ed0b5111 100644 --- a/plugins/inputs/exec/README.md +++ b/plugins/inputs/exec/README.md @@ -40,7 +40,7 @@ This script produces static values, since no timestamp is specified the values a echo 'example,tag1=a,tag2=b i=42i,j=43i,k=44i' ``` -It can be paired with the following configuration and will be ran at the `interval` of the agent. +It can be paired with the following configuration and will be run at the `interval` of the agent. ```toml [[inputs.exec]] commands = ["sh /tmp/test.sh"] @@ -50,8 +50,16 @@ It can be paired with the following configuration and will be ran at the `interv ### Common Issues: -#### Q: My script works when I run it by hand, but not when Telegraf is running as a service. +#### My script works when I run it by hand, but not when Telegraf is running as a service. This may be related to the Telegraf service running as a different user. The official packages run Telegraf as the `telegraf` user and group on Linux systems. + +#### With a PowerShell on Windows, the output of the script appears to be truncated. + +You may need to set a variable in your script to increase the number of columns +available for output: +``` +$host.UI.RawUI.BufferSize = new-object System.Management.Automation.Host.Size(1024,50) +``` diff --git a/plugins/inputs/exec/dev/telegraf.conf b/plugins/inputs/exec/dev/telegraf.conf new file mode 100644 index 000000000..04433410e --- /dev/null +++ b/plugins/inputs/exec/dev/telegraf.conf @@ -0,0 +1,26 @@ +[agent] + interval="1s" + flush_interval="1s" + +[[inputs.exec]] + timeout = "1s" + data_format = "influx" + commands = [ + "echo 'deal,computer_name=hosta message=\"stuff\" 1530654676316265790'", + "echo 'deal,computer_name=hostb message=\"stuff\" 1530654676316265790'", + ] + +[[processors.regex]] + [[processors.regex.tags]] + key = "computer_name" + pattern = "^(.*?)a$" + replacement = "${1}" + result_key = "server_name" + [[processors.regex.tags]] + key = "computer_name" + pattern = "^(.*?)b$" + replacement = "${1}" + result_key = "server_name" + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/exec/exec.go b/plugins/inputs/exec/exec.go index 9cb86c3cd..cb4420b0f 100644 --- a/plugins/inputs/exec/exec.go +++ b/plugins/inputs/exec/exec.go @@ -8,16 +8,14 @@ import ( "runtime" "strings" "sync" - "syscall" "time" - "github.com/kballard/go-shellquote" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/parsers/nagios" + "github.com/kballard/go-shellquote" ) const sampleConfig = ` @@ -51,6 +49,7 @@ type Exec struct { parser parsers.Parser runner Runner + Log telegraf.Logger `toml:"-"` } func NewExec() *Exec { @@ -61,39 +60,18 @@ func NewExec() *Exec { } type Runner interface { - Run(*Exec, string, telegraf.Accumulator) ([]byte, error) + Run(string, time.Duration) ([]byte, []byte, error) } type CommandRunner struct{} -func AddNagiosState(exitCode error, acc telegraf.Accumulator) error { - nagiosState := 0 - if exitCode != nil { - exiterr, ok := exitCode.(*exec.ExitError) - if ok { - status, ok := exiterr.Sys().(syscall.WaitStatus) - if ok { - nagiosState = status.ExitStatus() - } else { - return fmt.Errorf("exec: unable to get nagios plugin exit code") - } - } else { - return fmt.Errorf("exec: unable to get nagios plugin exit code") - } - } - fields := map[string]interface{}{"state": nagiosState} - acc.AddFields("nagios_state", fields, nil) - return nil -} - func (c CommandRunner) Run( - e *Exec, command string, - acc telegraf.Accumulator, -) ([]byte, error) { + timeout time.Duration, +) ([]byte, []byte, error) { split_cmd, err := shellquote.Split(command) if err != nil || len(split_cmd) == 0 { - return nil, fmt.Errorf("exec: unable to parse command, %s", err) + return nil, nil, fmt.Errorf("exec: unable to parse command, %s", err) } cmd := exec.Command(split_cmd[0], split_cmd[1:]...) @@ -105,44 +83,35 @@ func (c CommandRunner) Run( cmd.Stdout = &out cmd.Stderr = &stderr - if err := internal.RunTimeout(cmd, e.Timeout.Duration); err != nil { - switch e.parser.(type) { - case *nagios.NagiosParser: - AddNagiosState(err, acc) - default: - var errMessage = "" - if stderr.Len() > 0 { - stderr = removeCarriageReturns(stderr) - // Limit the number of bytes. - didTruncate := false - if stderr.Len() > MaxStderrBytes { - stderr.Truncate(MaxStderrBytes) - didTruncate = true - } - if i := bytes.IndexByte(stderr.Bytes(), '\n'); i > 0 { - // Only show truncation if the newline wasn't the last character. - if i < stderr.Len()-1 { - didTruncate = true - } - stderr.Truncate(i) - } - if didTruncate { - stderr.WriteString("...") - } - - errMessage = fmt.Sprintf(": %s", stderr.String()) - } - return nil, fmt.Errorf("exec: %s for command '%s'%s", err, command, errMessage) - } - } else { - switch e.parser.(type) { - case *nagios.NagiosParser: - AddNagiosState(nil, acc) - } - } + runErr := internal.RunTimeout(cmd, timeout) out = removeCarriageReturns(out) - return out.Bytes(), nil + if stderr.Len() > 0 { + stderr = removeCarriageReturns(stderr) + stderr = truncate(stderr) + } + + return out.Bytes(), stderr.Bytes(), runErr +} + +func truncate(buf bytes.Buffer) bytes.Buffer { + // Limit the number of bytes. + didTruncate := false + if buf.Len() > MaxStderrBytes { + buf.Truncate(MaxStderrBytes) + didTruncate = true + } + if i := bytes.IndexByte(buf.Bytes(), '\n'); i > 0 { + // Only show truncation if the newline wasn't the last character. + if i < buf.Len()-1 { + didTruncate = true + } + buf.Truncate(i) + } + if didTruncate { + buf.WriteString("...") + } + return buf } // removeCarriageReturns removes all carriage returns from the input if the @@ -173,9 +142,11 @@ func removeCarriageReturns(b bytes.Buffer) bytes.Buffer { func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync.WaitGroup) { defer wg.Done() + _, isNagios := e.parser.(*nagios.NagiosParser) - out, err := e.runner.Run(e, command, acc) - if err != nil { + out, errbuf, runErr := e.runner.Run(command, e.Timeout.Duration) + if !isNagios && runErr != nil { + err := fmt.Errorf("exec: %s for command '%s': %s", runErr, command, string(errbuf)) acc.AddError(err) return } @@ -183,11 +154,19 @@ func (e *Exec) ProcessCommand(command string, acc telegraf.Accumulator, wg *sync metrics, err := e.parser.Parse(out) if err != nil { acc.AddError(err) - } else { - for _, metric := range metrics { - acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) + return + } + + if isNagios { + metrics, err = nagios.TryAddState(runErr, metrics) + if err != nil { + e.Log.Errorf("Failed to add nagios state: %s", err) } } + + for _, m := range metrics { + acc.AddMetric(m) + } } func (e *Exec) SampleConfig() string { @@ -249,6 +228,10 @@ func (e *Exec) Gather(acc telegraf.Accumulator) error { return nil } +func (e *Exec) Init() error { + return nil +} + func init() { inputs.Add("exec", func() telegraf.Input { return NewExec() diff --git a/plugins/inputs/exec/exec_test.go b/plugins/inputs/exec/exec_test.go index c7c181b17..d0fcc71f6 100644 --- a/plugins/inputs/exec/exec_test.go +++ b/plugins/inputs/exec/exec_test.go @@ -5,10 +5,9 @@ import ( "fmt" "runtime" "testing" + "time" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -74,28 +73,31 @@ var crTests = []CarriageReturnTest{ } type runnerMock struct { - out []byte - err error + out []byte + errout []byte + err error } -func newRunnerMock(out []byte, err error) Runner { +func newRunnerMock(out []byte, errout []byte, err error) Runner { return &runnerMock{ - out: out, - err: err, + out: out, + errout: errout, + err: err, } } -func (r runnerMock) Run(e *Exec, command string, acc telegraf.Accumulator) ([]byte, error) { - if r.err != nil { - return nil, r.err - } - return r.out, nil +func (r runnerMock) Run(command string, _ time.Duration) ([]byte, []byte, error) { + return r.out, r.errout, r.err } func TestExec(t *testing.T) { - parser, _ := parsers.NewJSONParser("exec", []string{}, nil) + parser, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "exec", + }) e := &Exec{ - runner: newRunnerMock([]byte(validJson), nil), + Log: testutil.Logger{}, + runner: newRunnerMock([]byte(validJson), nil, nil), Commands: []string{"testcommand arg1"}, parser: parser, } @@ -119,9 +121,13 @@ func TestExec(t *testing.T) { } func TestExecMalformed(t *testing.T) { - parser, _ := parsers.NewJSONParser("exec", []string{}, nil) + parser, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "exec", + }) e := &Exec{ - runner: newRunnerMock([]byte(malformedJson), nil), + Log: testutil.Logger{}, + runner: newRunnerMock([]byte(malformedJson), nil, nil), Commands: []string{"badcommand arg1"}, parser: parser, } @@ -132,9 +138,13 @@ func TestExecMalformed(t *testing.T) { } func TestCommandError(t *testing.T) { - parser, _ := parsers.NewJSONParser("exec", []string{}, nil) + parser, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "exec", + }) e := &Exec{ - runner: newRunnerMock(nil, fmt.Errorf("exit status code 1")), + Log: testutil.Logger{}, + runner: newRunnerMock(nil, nil, fmt.Errorf("exit status code 1")), Commands: []string{"badcommand"}, parser: parser, } @@ -192,6 +202,66 @@ func TestExecCommandWithoutGlobAndPath(t *testing.T) { acc.AssertContainsFields(t, "metric", fields) } +func TestTruncate(t *testing.T) { + tests := []struct { + name string + bufF func() *bytes.Buffer + expF func() *bytes.Buffer + }{ + { + name: "should not truncate", + bufF: func() *bytes.Buffer { + var b bytes.Buffer + b.WriteString("hello world") + return &b + }, + expF: func() *bytes.Buffer { + var b bytes.Buffer + b.WriteString("hello world") + return &b + }, + }, + { + name: "should truncate up to the new line", + bufF: func() *bytes.Buffer { + var b bytes.Buffer + b.WriteString("hello world\nand all the people") + return &b + }, + expF: func() *bytes.Buffer { + var b bytes.Buffer + b.WriteString("hello world...") + return &b + }, + }, + { + name: "should truncate to the MaxStderrBytes", + bufF: func() *bytes.Buffer { + var b bytes.Buffer + for i := 0; i < 2*MaxStderrBytes; i++ { + b.WriteByte('b') + } + return &b + }, + expF: func() *bytes.Buffer { + var b bytes.Buffer + for i := 0; i < MaxStderrBytes; i++ { + b.WriteByte('b') + } + b.WriteString("...") + return &b + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res := truncate(*tt.bufF()) + require.Equal(t, tt.expF().Bytes(), res.Bytes()) + }) + } +} + func TestRemoveCarriageReturns(t *testing.T) { if runtime.GOOS == "windows" { // Test that all carriage returns are removed diff --git a/plugins/inputs/execd/README.md b/plugins/inputs/execd/README.md new file mode 100644 index 000000000..f8709c8be --- /dev/null +++ b/plugins/inputs/execd/README.md @@ -0,0 +1,119 @@ +# Execd Input Plugin + +The `execd` plugin runs an external program as a long-running daemon. +The programs must output metrics in any one of the accepted +[Input Data Formats](input_formats) on the process's STDOUT, and is expected to +stay running. If you'd instead like the process to collect metrics and then exit, +check out the [inputs.exec](exec_plugin) plugin. + +The `signal` can be configured to send a signal the running daemon on each +collection interval. + +Program output on standard error is mirrored to the telegraf log. + +### Configuration: + +```toml +[[inputs.execd]] + ## Program to run as daemon + command = ["telegraf-smartctl", "-d", "/dev/sda"] + + ## Define how the process is signaled on each collection interval. + ## Valid values are: + ## "none" : Do not signal anything. (Recommended for service inputs) + ## The process must output metrics by itself. + ## "STDIN" : Send a newline on STDIN. (Recommended for gather inputs) + ## "SIGHUP" : Send a HUP signal. Not available on Windows. (not recommended) + ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. + ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. + signal = "none" + + ## Delay before the process is restarted after an unexpected termination + restart_delay = "10s" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +### Example + +##### Daemon written in bash using STDIN signaling + +```bash +#!/bin/bash + +counter=0 + +while IFS= read -r LINE; do + echo "counter_bash count=${counter}" + let counter=counter+1 +done +``` + +```toml +[[inputs.execd]] + command = ["plugins/inputs/execd/examples/count.sh"] + signal = "STDIN" +``` + +##### Go daemon using SIGHUP + +```go +package main + +import ( + "fmt" + "os" + "os/signal" + "syscall" +) + +func main() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + + counter := 0 + + for { + <-c + + fmt.Printf("counter_go count=%d\n", counter) + counter++ + } +} + +``` + +```toml +[[inputs.execd]] + command = ["plugins/inputs/execd/examples/count.go.exe"] + signal = "SIGHUP" +``` + +##### Ruby daemon running standalone + +```ruby +#!/usr/bin/env ruby + +counter = 0 + +loop do + puts "counter_ruby count=#{counter}" + STDOUT.flush + + counter += 1 + sleep 1 +end +``` + +```toml +[[inputs.execd]] + command = ["plugins/inputs/execd/examples/count.rb"] + signal = "none" +``` + +[input_formats]: https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +[exec_plugin]: https://github.com/influxdata/telegraf/blob/master/plugins/inputs/exec/README.md diff --git a/plugins/inputs/execd/examples/count.go b/plugins/inputs/execd/examples/count.go new file mode 100644 index 000000000..d5e4a12e1 --- /dev/null +++ b/plugins/inputs/execd/examples/count.go @@ -0,0 +1,24 @@ +package main + +// Example using HUP signaling + +import ( + "fmt" + "os" + "os/signal" + "syscall" +) + +func main() { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + + counter := 0 + + for { + <-c + + fmt.Printf("counter_go count=%d\n", counter) + counter++ + } +} diff --git a/plugins/inputs/execd/examples/count.rb b/plugins/inputs/execd/examples/count.rb new file mode 100755 index 000000000..6b60fbc17 --- /dev/null +++ b/plugins/inputs/execd/examples/count.rb @@ -0,0 +1,21 @@ +#!/usr/bin/env ruby + +## Example in Ruby not using any signaling + +counter = 0 + +def time_ns_str(t) + ns = t.nsec.to_s + (9 - ns.size).times do + ns = "0" + ns # left pad + end + t.to_i.to_s + ns +end + +loop do + puts "counter_ruby count=#{counter} #{time_ns_str(Time.now)}" + STDOUT.flush + counter += 1 + + sleep 1 +end diff --git a/plugins/inputs/execd/examples/count.sh b/plugins/inputs/execd/examples/count.sh new file mode 100755 index 000000000..bbbe8619c --- /dev/null +++ b/plugins/inputs/execd/examples/count.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +## Example in bash using STDIN signaling + +counter=0 + +while read LINE; do + echo "counter_bash count=${counter}" + counter=$((counter+1)) +done + +trap "echo terminate 1>&2" EXIT \ No newline at end of file diff --git a/plugins/inputs/execd/execd.go b/plugins/inputs/execd/execd.go new file mode 100644 index 000000000..ca9e589d9 --- /dev/null +++ b/plugins/inputs/execd/execd.go @@ -0,0 +1,267 @@ +package execd + +import ( + "bufio" + "context" + "fmt" + "io" + "log" + "os/exec" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/influx" +) + +const sampleConfig = ` + ## Program to run as daemon + command = ["telegraf-smartctl", "-d", "/dev/sda"] + + ## Define how the process is signaled on each collection interval. + ## Valid values are: + ## "none" : Do not signal anything. + ## The process must output metrics by itself. + ## "STDIN" : Send a newline on STDIN. + ## "SIGHUP" : Send a HUP signal. Not available on Windows. + ## "SIGUSR1" : Send a USR1 signal. Not available on Windows. + ## "SIGUSR2" : Send a USR2 signal. Not available on Windows. + signal = "none" + + ## Delay before the process is restarted after an unexpected termination + restart_delay = "10s" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +type Execd struct { + Command []string + Signal string + RestartDelay config.Duration + + acc telegraf.Accumulator + cmd *exec.Cmd + parser parsers.Parser + stdin io.WriteCloser + stdout io.ReadCloser + stderr io.ReadCloser + cancel context.CancelFunc + mainLoopWg sync.WaitGroup +} + +func (e *Execd) SampleConfig() string { + return sampleConfig +} + +func (e *Execd) Description() string { + return "Run executable as long-running input plugin" +} + +func (e *Execd) SetParser(parser parsers.Parser) { + e.parser = parser +} + +func (e *Execd) Start(acc telegraf.Accumulator) error { + e.acc = acc + + if len(e.Command) == 0 { + return fmt.Errorf("FATAL no command specified") + } + + e.mainLoopWg.Add(1) + + ctx, cancel := context.WithCancel(context.Background()) + e.cancel = cancel + + if err := e.cmdStart(); err != nil { + return err + } + + go func() { + if err := e.cmdLoop(ctx); err != nil { + log.Printf("Process quit with message: %s", err.Error()) + } + e.mainLoopWg.Done() + }() + + return nil +} + +func (e *Execd) Stop() { + // don't try to stop before all stream readers have started. + e.cancel() + e.mainLoopWg.Wait() +} + +// cmdLoop watches an already running process, restarting it when appropriate. +func (e *Execd) cmdLoop(ctx context.Context) error { + for { + // Use a buffered channel to ensure goroutine below can exit + // if `ctx.Done` is selected and nothing reads on `done` anymore + done := make(chan error, 1) + go func() { + done <- e.cmdWait() + }() + + select { + case <-ctx.Done(): + if e.stdin != nil { + e.stdin.Close() + gracefulStop(e.cmd, 5*time.Second) + } + return nil + case err := <-done: + log.Printf("Process %s terminated: %s", e.Command, err) + if isQuitting(ctx) { + return err + } + } + + log.Printf("Restarting in %s...", time.Duration(e.RestartDelay)) + + select { + case <-ctx.Done(): + return nil + case <-time.After(time.Duration(e.RestartDelay)): + // Continue the loop and restart the process + if err := e.cmdStart(); err != nil { + return err + } + } + } +} + +func isQuitting(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +func (e *Execd) cmdStart() (err error) { + if len(e.Command) > 1 { + e.cmd = exec.Command(e.Command[0], e.Command[1:]...) + } else { + e.cmd = exec.Command(e.Command[0]) + } + + e.stdin, err = e.cmd.StdinPipe() + if err != nil { + return fmt.Errorf("Error opening stdin pipe: %s", err) + } + + e.stdout, err = e.cmd.StdoutPipe() + if err != nil { + return fmt.Errorf("Error opening stdout pipe: %s", err) + } + + e.stderr, err = e.cmd.StderrPipe() + if err != nil { + return fmt.Errorf("Error opening stderr pipe: %s", err) + } + + log.Printf("Starting process: %s", e.Command) + + err = e.cmd.Start() + if err != nil { + return fmt.Errorf("Error starting process: %s", err) + } + + return nil +} + +func (e *Execd) cmdWait() error { + var wg sync.WaitGroup + wg.Add(2) + + go func() { + e.cmdReadOut(e.stdout) + wg.Done() + }() + + go func() { + e.cmdReadErr(e.stderr) + wg.Done() + }() + + wg.Wait() + return e.cmd.Wait() +} + +func (e *Execd) cmdReadOut(out io.Reader) { + if _, isInfluxParser := e.parser.(*influx.Parser); isInfluxParser { + // work around the lack of built-in streaming parser. :( + e.cmdReadOutStream(out) + return + } + + scanner := bufio.NewScanner(out) + + for scanner.Scan() { + metrics, err := e.parser.Parse(scanner.Bytes()) + if err != nil { + e.acc.AddError(fmt.Errorf("Parse error: %s", err)) + } + + for _, metric := range metrics { + e.acc.AddMetric(metric) + } + } + + if err := scanner.Err(); err != nil { + e.acc.AddError(fmt.Errorf("Error reading stdout: %s", err)) + } +} + +func (e *Execd) cmdReadOutStream(out io.Reader) { + parser := influx.NewStreamParser(out) + + for { + metric, err := parser.Next() + if err != nil { + if err == influx.EOF { + break // stream ended + } + if parseErr, isParseError := err.(*influx.ParseError); isParseError { + // parse error. + e.acc.AddError(parseErr) + continue + } + // some non-recoverable error? + e.acc.AddError(err) + return + } + + e.acc.AddMetric(metric) + } +} + +func (e *Execd) cmdReadErr(out io.Reader) { + scanner := bufio.NewScanner(out) + + for scanner.Scan() { + log.Printf("stderr: %q", scanner.Text()) + } + + if err := scanner.Err(); err != nil { + e.acc.AddError(fmt.Errorf("Error reading stderr: %s", err)) + } +} + +func init() { + inputs.Add("execd", func() telegraf.Input { + return &Execd{ + Signal: "none", + RestartDelay: config.Duration(10 * time.Second), + } + }) +} diff --git a/plugins/inputs/execd/execd_posix.go b/plugins/inputs/execd/execd_posix.go new file mode 100644 index 000000000..cc3a8e8bb --- /dev/null +++ b/plugins/inputs/execd/execd_posix.go @@ -0,0 +1,49 @@ +// +build !windows + +package execd + +import ( + "fmt" + "io" + "os" + "os/exec" + "syscall" + "time" + + "github.com/influxdata/telegraf" +) + +func (e *Execd) Gather(acc telegraf.Accumulator) error { + if e.cmd == nil || e.cmd.Process == nil { + return nil + } + + switch e.Signal { + case "SIGHUP": + e.cmd.Process.Signal(syscall.SIGHUP) + case "SIGUSR1": + e.cmd.Process.Signal(syscall.SIGUSR1) + case "SIGUSR2": + e.cmd.Process.Signal(syscall.SIGUSR2) + case "STDIN": + if osStdin, ok := e.stdin.(*os.File); ok { + osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second)) + } + if _, err := io.WriteString(e.stdin, "\n"); err != nil { + return fmt.Errorf("Error writing to stdin: %s", err) + } + case "none": + default: + return fmt.Errorf("invalid signal: %s", e.Signal) + } + + return nil +} + +func gracefulStop(cmd *exec.Cmd, timeout time.Duration) { + cmd.Process.Signal(syscall.SIGTERM) + go func() { + <-time.NewTimer(timeout).C + cmd.Process.Kill() + }() +} diff --git a/plugins/inputs/execd/execd_test.go b/plugins/inputs/execd/execd_test.go new file mode 100644 index 000000000..52c0a214b --- /dev/null +++ b/plugins/inputs/execd/execd_test.go @@ -0,0 +1,133 @@ +// +build !windows + +package execd + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/config" + "github.com/influxdata/telegraf/models" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/parsers" + + "github.com/influxdata/telegraf" +) + +func TestExternalInputWorks(t *testing.T) { + jsonParser, err := parsers.NewInfluxParser() + require.NoError(t, err) + + e := &Execd{ + Command: []string{shell(), fileShellScriptPath()}, + RestartDelay: config.Duration(5 * time.Second), + parser: jsonParser, + Signal: "STDIN", + } + + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + acc := agent.NewAccumulator(&TestMetricMaker{}, metrics) + + require.NoError(t, e.Start(acc)) + require.NoError(t, e.Gather(acc)) + + // grab a metric and make sure it's a thing + m := readChanWithTimeout(t, metrics, 10*time.Second) + + e.Stop() + + require.Equal(t, "counter_bash", m.Name()) + val, ok := m.GetField("count") + require.True(t, ok) + require.Equal(t, float64(0), val) + // test that a later gather will not panic + e.Gather(acc) +} + +func TestParsesLinesContainingNewline(t *testing.T) { + parser, err := parsers.NewInfluxParser() + require.NoError(t, err) + + metrics := make(chan telegraf.Metric, 10) + defer close(metrics) + acc := agent.NewAccumulator(&TestMetricMaker{}, metrics) + + e := &Execd{ + Command: []string{shell(), fileShellScriptPath()}, + RestartDelay: config.Duration(5 * time.Second), + parser: parser, + Signal: "STDIN", + acc: acc, + } + + cases := []struct { + Name string + Value string + }{ + { + Name: "no-newline", + Value: "my message", + }, { + Name: "newline", + Value: "my\nmessage", + }, + } + + for _, test := range cases { + t.Run(test.Name, func(t *testing.T) { + line := fmt.Sprintf("event message=\"%v\" 1587128639239000000", test.Value) + + e.cmdReadOut(strings.NewReader(line)) + + m := readChanWithTimeout(t, metrics, 1*time.Second) + + require.Equal(t, "event", m.Name()) + val, ok := m.GetField("message") + require.True(t, ok) + require.Equal(t, test.Value, val) + }) + } +} + +func readChanWithTimeout(t *testing.T, metrics chan telegraf.Metric, timeout time.Duration) telegraf.Metric { + to := time.NewTimer(timeout) + defer to.Stop() + select { + case m := <-metrics: + return m + case <-to.C: + require.FailNow(t, "timeout waiting for metric") + } + return nil +} + +func fileShellScriptPath() string { + return "./examples/count.sh" +} + +func shell() string { + return "sh" +} + +type TestMetricMaker struct{} + +func (tm *TestMetricMaker) Name() string { + return "TestPlugin" +} + +func (tm *TestMetricMaker) LogName() string { + return tm.Name() +} + +func (tm *TestMetricMaker) MakeMetric(metric telegraf.Metric) telegraf.Metric { + return metric +} + +func (tm *TestMetricMaker) Log() telegraf.Logger { + return models.NewLogger("TestPlugin", "test", "") +} diff --git a/plugins/inputs/execd/execd_windows.go b/plugins/inputs/execd/execd_windows.go new file mode 100644 index 000000000..82935d4ac --- /dev/null +++ b/plugins/inputs/execd/execd_windows.go @@ -0,0 +1,38 @@ +// +build windows + +package execd + +import ( + "fmt" + "io" + "os" + "os/exec" + "time" + + "github.com/influxdata/telegraf" +) + +func (e *Execd) Gather(acc telegraf.Accumulator) error { + if e.cmd == nil || e.cmd.Process == nil { + return nil + } + + switch e.Signal { + case "STDIN": + if osStdin, ok := e.stdin.(*os.File); ok { + osStdin.SetWriteDeadline(time.Now().Add(1 * time.Second)) + } + if _, err := io.WriteString(e.stdin, "\n"); err != nil { + return fmt.Errorf("Error writing to stdin: %s", err) + } + case "none": + default: + return fmt.Errorf("invalid signal: %s", e.Signal) + } + + return nil +} + +func gracefulStop(cmd *exec.Cmd, timeout time.Duration) { + cmd.Process.Kill() +} diff --git a/plugins/inputs/execd/shim/README.md b/plugins/inputs/execd/shim/README.md new file mode 100644 index 000000000..3bdb69f92 --- /dev/null +++ b/plugins/inputs/execd/shim/README.md @@ -0,0 +1,48 @@ +# Telegraf Execd Go Shim + +The goal of this _shim_ is to make it trivial to extract an internal input plugin +out to a stand-alone repo for the purpose of compiling it as a separate app and +running it from the inputs.execd plugin. + +The execd-shim is still experimental and the interface may change in the future. +Especially as the concept expands to processors, aggregators, and outputs. + +## Steps to externalize a plugin + +1. Move the project to an external repo, optionally preserving the + _plugins/inputs/plugin_name_ folder structure. For an example of what this might + look at, take a look at [ssoroka/rand](https://github.com/ssoroka/rand) or + [danielnelson/telegraf-plugins](https://github.com/danielnelson/telegraf-plugins) +1. Copy [main.go](./example/cmd/main.go) into your project under the cmd folder. + This will be the entrypoint to the plugin when run as a stand-alone program, and + it will call the shim code for you to make that happen. +1. Edit the main.go file to import your plugin. Within Telegraf this would have + been done in an all.go file, but here we don't split the two apart, and the change + just goes in the top of main.go. If you skip this step, your plugin will do nothing. +1. Optionally add a [plugin.conf](./example/cmd/plugin.conf) for configuration + specific to your plugin. Note that this config file **must be separate from the + rest of the config for Telegraf, and must not be in a shared directory where + Telegraf is expecting to load all configs**. If Telegraf reads this config file + it will not know which plugin it relates to. + +## Steps to build and run your plugin + +1. Build the cmd/main.go. For my rand project this looks like `go build -o rand cmd/main.go` +1. Test out the binary if you haven't done this yet. eg `./rand -config plugin.conf` + Depending on your polling settings and whether you implemented a service plugin or + an input gathering plugin, you may see data right away, or you may have to hit enter + first, or wait for your poll duration to elapse, but the metrics will be written to + STDOUT. Ctrl-C to end your test. +1. Configure Telegraf to call your new plugin binary. eg: + +``` +[[inputs.execd]] + command = ["/path/to/rand", "-config", "/path/to/plugin.conf"] + signal = "none" +``` + +## Congratulations! + +You've done it! Consider publishing your plugin to github and open a Pull Request +back to the Telegraf repo letting us know about the availability of your +[external plugin](https://github.com/influxdata/telegraf/blob/master/EXTERNAL_PLUGINS.md). \ No newline at end of file diff --git a/plugins/inputs/execd/shim/example/cmd/main.go b/plugins/inputs/execd/shim/example/cmd/main.go new file mode 100644 index 000000000..bf8bd50d8 --- /dev/null +++ b/plugins/inputs/execd/shim/example/cmd/main.go @@ -0,0 +1,60 @@ +package main + +import ( + "flag" + "fmt" + "os" + "time" + + // TODO: import your plugins + // _ "github.com/my_github_user/my_plugin_repo/plugins/inputs/mypluginname" + + "github.com/influxdata/telegraf/plugins/inputs/execd/shim" +) + +var pollInterval = flag.Duration("poll_interval", 1*time.Second, "how often to send metrics") +var pollIntervalDisabled = flag.Bool("poll_interval_disabled", false, "how often to send metrics") +var configFile = flag.String("config", "", "path to the config file for this plugin") +var err error + +// This is designed to be simple; Just change the import above and you're good. +// +// However, if you want to do all your config in code, you can like so: +// +// // initialize your plugin with any settngs you want +// myInput := &mypluginname.MyPlugin{ +// DefaultSettingHere: 3, +// } +// +// shim := shim.New() +// +// shim.AddInput(myInput) +// +// // now the shim.Run() call as below. +// +func main() { + // parse command line options + flag.Parse() + if *pollIntervalDisabled { + *pollInterval = shim.PollIntervalDisabled + } + + // create the shim. This is what will run your plugins. + shim := shim.New() + + // If no config is specified, all imported plugins are loaded. + // otherwise follow what the config asks for. + // Check for settings from a config toml file, + // (or just use whatever plugins were imported above) + err = shim.LoadConfig(configFile) + if err != nil { + fmt.Fprintf(os.Stderr, "Err loading input: %s\n", err) + os.Exit(1) + } + + // run the input plugin(s) until stdin closes or we receive a termination signal + if err := shim.Run(*pollInterval); err != nil { + fmt.Fprintf(os.Stderr, "Err: %s\n", err) + os.Exit(1) + } +} diff --git a/plugins/inputs/execd/shim/example/cmd/plugin.conf b/plugins/inputs/execd/shim/example/cmd/plugin.conf new file mode 100644 index 000000000..53f89a559 --- /dev/null +++ b/plugins/inputs/execd/shim/example/cmd/plugin.conf @@ -0,0 +1,2 @@ +[[inputs.my_plugin_name]] + value_name = "value" diff --git a/plugins/inputs/execd/shim/goshim.go b/plugins/inputs/execd/shim/goshim.go new file mode 100644 index 000000000..4c1589b48 --- /dev/null +++ b/plugins/inputs/execd/shim/goshim.go @@ -0,0 +1,319 @@ +package shim + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/signal" + "strings" + "sync" + "syscall" + "time" + + "github.com/BurntSushi/toml" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/agent" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/serializers/influx" +) + +type empty struct{} + +var ( + stdout io.Writer = os.Stdout + stdin io.Reader = os.Stdin + forever = 100 * 365 * 24 * time.Hour + envVarEscaper = strings.NewReplacer( + `"`, `\"`, + `\`, `\\`, + ) +) + +const ( + // PollIntervalDisabled is used to indicate that you want to disable polling, + // as opposed to duration 0 meaning poll constantly. + PollIntervalDisabled = time.Duration(0) +) + +// Shim allows you to wrap your inputs and run them as if they were part of Telegraf, +// except built externally. +type Shim struct { + Inputs []telegraf.Input + gatherPromptChans []chan empty + metricCh chan telegraf.Metric +} + +// New creates a new shim interface +func New() *Shim { + return &Shim{} +} + +// AddInput adds the input to the shim. Later calls to Run() will run this input. +func (s *Shim) AddInput(input telegraf.Input) error { + if p, ok := input.(telegraf.Initializer); ok { + err := p.Init() + if err != nil { + return fmt.Errorf("failed to init input: %s", err) + } + } + + s.Inputs = append(s.Inputs, input) + return nil +} + +// AddInputs adds multiple inputs to the shim. Later calls to Run() will run these. +func (s *Shim) AddInputs(newInputs []telegraf.Input) error { + for _, inp := range newInputs { + if err := s.AddInput(inp); err != nil { + return err + } + } + return nil +} + +// Run the input plugins.. +func (s *Shim) Run(pollInterval time.Duration) error { + // context is used only to close the stdin reader. everything else cascades + // from that point and closes cleanly when it's done. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s.metricCh = make(chan telegraf.Metric, 1) + + wg := sync.WaitGroup{} + quit := make(chan os.Signal, 1) + signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) + + collectMetricsPrompt := make(chan os.Signal, 1) + listenForCollectMetricsSignals(ctx, collectMetricsPrompt) + + serializer := influx.NewSerializer() + + for _, input := range s.Inputs { + wrappedInput := inputShim{Input: input} + + acc := agent.NewAccumulator(wrappedInput, s.metricCh) + acc.SetPrecision(time.Nanosecond) + + if serviceInput, ok := input.(telegraf.ServiceInput); ok { + if err := serviceInput.Start(acc); err != nil { + return fmt.Errorf("failed to start input: %s", err) + } + } + gatherPromptCh := make(chan empty, 1) + s.gatherPromptChans = append(s.gatherPromptChans, gatherPromptCh) + wg.Add(1) + go func(input telegraf.Input) { + startGathering(ctx, input, acc, gatherPromptCh, pollInterval) + if serviceInput, ok := input.(telegraf.ServiceInput); ok { + serviceInput.Stop() + } + close(gatherPromptCh) + wg.Done() + }(input) + } + + go s.stdinCollectMetricsPrompt(ctx, cancel, collectMetricsPrompt) + go s.closeMetricChannelWhenInputsFinish(&wg) + +loop: + for { + select { + case <-quit: // user-triggered quit + // cancel, but keep looping until the metric channel closes. + cancel() + case _, open := <-collectMetricsPrompt: + if !open { // stdin-close-triggered quit + cancel() + continue + } + s.collectMetrics(ctx) + case m, open := <-s.metricCh: + if !open { + break loop + } + b, err := serializer.Serialize(m) + if err != nil { + return fmt.Errorf("failed to serialize metric: %s", err) + } + // Write this to stdout + fmt.Fprint(stdout, string(b)) + } + } + + return nil +} + +func hasQuit(ctx context.Context) bool { + select { + case <-ctx.Done(): + return true + default: + return false + } +} + +func (s *Shim) stdinCollectMetricsPrompt(ctx context.Context, cancel context.CancelFunc, collectMetricsPrompt chan<- os.Signal) { + defer func() { + cancel() + close(collectMetricsPrompt) + }() + + scanner := bufio.NewScanner(stdin) + // for every line read from stdin, make sure we're not supposed to quit, + // then push a message on to the collectMetricsPrompt + for scanner.Scan() { + // first check if we should quit + if hasQuit(ctx) { + return + } + + // now push a non-blocking message to trigger metric collection. + pushCollectMetricsRequest(collectMetricsPrompt) + } +} + +// pushCollectMetricsRequest pushes a non-blocking (nil) message to the +// collectMetricsPrompt channel to trigger metric collection. +// The channel is defined with a buffer of 1, so while it's full, subsequent +// requests are discarded. +func pushCollectMetricsRequest(collectMetricsPrompt chan<- os.Signal) { + select { + case collectMetricsPrompt <- nil: + default: + } +} + +func (s *Shim) collectMetrics(ctx context.Context) { + if hasQuit(ctx) { + return + } + for i := 0; i < len(s.gatherPromptChans); i++ { + // push a message out to each channel to collect metrics. don't block. + select { + case s.gatherPromptChans[i] <- empty{}: + default: + } + } +} + +func startGathering(ctx context.Context, input telegraf.Input, acc telegraf.Accumulator, gatherPromptCh <-chan empty, pollInterval time.Duration) { + if pollInterval == PollIntervalDisabled { + return // don't poll + } + t := time.NewTicker(pollInterval) + defer t.Stop() + for { + // give priority to stopping. + if hasQuit(ctx) { + return + } + // see what's up + select { + case <-ctx.Done(): + return + case _, open := <-gatherPromptCh: + if !open { + // stdin has closed. + return + } + if err := input.Gather(acc); err != nil { + fmt.Fprintf(os.Stderr, "failed to gather metrics: %s", err) + } + case <-t.C: + if err := input.Gather(acc); err != nil { + fmt.Fprintf(os.Stderr, "failed to gather metrics: %s", err) + } + } + } +} + +// LoadConfig loads and adds the inputs to the shim +func (s *Shim) LoadConfig(filePath *string) error { + loadedInputs, err := LoadConfig(filePath) + if err != nil { + return err + } + return s.AddInputs(loadedInputs) +} + +// DefaultImportedPlugins defaults to whatever plugins happen to be loaded and +// have registered themselves with the registry. This makes loading plugins +// without having to define a config dead easy. +func DefaultImportedPlugins() (i []telegraf.Input, e error) { + for _, inputCreatorFunc := range inputs.Inputs { + i = append(i, inputCreatorFunc()) + } + return i, nil +} + +// LoadConfig loads the config and returns inputs that later need to be loaded. +func LoadConfig(filePath *string) ([]telegraf.Input, error) { + if filePath == nil || *filePath == "" { + return DefaultImportedPlugins() + } + + b, err := ioutil.ReadFile(*filePath) + if err != nil { + return nil, err + } + + s := expandEnvVars(b) + + conf := struct { + Inputs map[string][]toml.Primitive + }{} + + md, err := toml.Decode(s, &conf) + if err != nil { + return nil, err + } + + loadedInputs, err := loadConfigIntoInputs(md, conf.Inputs) + + if len(md.Undecoded()) > 0 { + fmt.Fprintf(stdout, "Some plugins were loaded but not used: %q\n", md.Undecoded()) + } + return loadedInputs, err +} + +func expandEnvVars(contents []byte) string { + return os.Expand(string(contents), getEnv) +} + +func getEnv(key string) string { + v := os.Getenv(key) + + return envVarEscaper.Replace(v) +} + +func loadConfigIntoInputs(md toml.MetaData, inputConfigs map[string][]toml.Primitive) ([]telegraf.Input, error) { + renderedInputs := []telegraf.Input{} + + for name, primitives := range inputConfigs { + inputCreator, ok := inputs.Inputs[name] + if !ok { + return nil, errors.New("unknown input " + name) + } + + for _, primitive := range primitives { + inp := inputCreator() + // Parse specific configuration + if err := md.PrimitiveDecode(primitive, inp); err != nil { + return nil, err + } + + renderedInputs = append(renderedInputs, inp) + } + } + return renderedInputs, nil +} + +func (s *Shim) closeMetricChannelWhenInputsFinish(wg *sync.WaitGroup) { + wg.Wait() + close(s.metricCh) +} diff --git a/plugins/inputs/execd/shim/goshim_posix.go b/plugins/inputs/execd/shim/goshim_posix.go new file mode 100644 index 000000000..4e4a04f14 --- /dev/null +++ b/plugins/inputs/execd/shim/goshim_posix.go @@ -0,0 +1,23 @@ +// +build !windows + +package shim + +import ( + "context" + "os" + "os/signal" + "syscall" +) + +func listenForCollectMetricsSignals(ctx context.Context, collectMetricsPrompt chan os.Signal) { + // just listen to all the signals. + signal.Notify(collectMetricsPrompt, syscall.SIGHUP, syscall.SIGUSR1, syscall.SIGUSR2) + + go func() { + select { + case <-ctx.Done(): + // context done. stop to signals to avoid pushing messages to a closed channel + signal.Stop(collectMetricsPrompt) + } + }() +} diff --git a/plugins/inputs/execd/shim/goshim_windows.go b/plugins/inputs/execd/shim/goshim_windows.go new file mode 100644 index 000000000..317f8a2f3 --- /dev/null +++ b/plugins/inputs/execd/shim/goshim_windows.go @@ -0,0 +1,22 @@ +// +build windows + +package shim + +import ( + "context" + "os" + "os/signal" + "syscall" +) + +func listenForCollectMetricsSignals(ctx context.Context, collectMetricsPrompt chan os.Signal) { + signal.Notify(collectMetricsPrompt, syscall.SIGHUP) + + go func() { + select { + case <-ctx.Done(): + // context done. stop to signals to avoid pushing messages to a closed channel + signal.Stop(collectMetricsPrompt) + } + }() +} diff --git a/plugins/inputs/execd/shim/input.go b/plugins/inputs/execd/shim/input.go new file mode 100644 index 000000000..6dff9cd7f --- /dev/null +++ b/plugins/inputs/execd/shim/input.go @@ -0,0 +1,20 @@ +package shim + +import "github.com/influxdata/telegraf" + +// inputShim implements the MetricMaker interface. +type inputShim struct { + Input telegraf.Input +} + +func (i inputShim) LogName() string { + return "" +} + +func (i inputShim) MakeMetric(m telegraf.Metric) telegraf.Metric { + return m // don't need to do anything to it. +} + +func (i inputShim) Log() telegraf.Logger { + return nil +} diff --git a/plugins/inputs/execd/shim/shim_posix_test.go b/plugins/inputs/execd/shim/shim_posix_test.go new file mode 100644 index 000000000..de549cc3c --- /dev/null +++ b/plugins/inputs/execd/shim/shim_posix_test.go @@ -0,0 +1,70 @@ +// +build !windows + +package shim + +import ( + "bufio" + "context" + "io" + "os" + "runtime" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestShimUSR1SignalingWorks(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip() + return + } + stdinReader, stdinWriter := io.Pipe() + stdoutReader, stdoutWriter := io.Pipe() + + stdin = stdinReader + stdout = stdoutWriter + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + metricProcessed, exited := runInputPlugin(t, 20*time.Minute) + + // signal USR1 to yourself. + pid := os.Getpid() + process, err := os.FindProcess(pid) + require.NoError(t, err) + + go func() { + // On slow machines this signal can fire before the service comes up. + // rather than depend on accurate sleep times, we'll just retry sending + // the signal every so often until it goes through. + for { + select { + case <-ctx.Done(): + return // test is done + default: + // test isn't done, keep going. + process.Signal(syscall.SIGUSR1) + time.Sleep(200 * time.Millisecond) + } + } + }() + + timeout := time.NewTimer(10 * time.Second) + + select { + case <-metricProcessed: + case <-timeout.C: + require.Fail(t, "Timeout waiting for metric to arrive") + } + cancel() + + r := bufio.NewReader(stdoutReader) + out, err := r.ReadString('\n') + require.NoError(t, err) + require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) + + stdinWriter.Close() + <-exited +} diff --git a/plugins/inputs/execd/shim/shim_test.go b/plugins/inputs/execd/shim/shim_test.go new file mode 100644 index 000000000..5fd79895f --- /dev/null +++ b/plugins/inputs/execd/shim/shim_test.go @@ -0,0 +1,174 @@ +package shim + +import ( + "bufio" + "bytes" + "io" + "os" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +func TestShimWorks(t *testing.T) { + stdoutBytes := bytes.NewBufferString("") + stdout = stdoutBytes + + stdin, _ = io.Pipe() // hold the stdin pipe open + + timeout := time.NewTimer(10 * time.Second) + metricProcessed, _ := runInputPlugin(t, 10*time.Millisecond) + + select { + case <-metricProcessed: + case <-timeout.C: + require.Fail(t, "Timeout waiting for metric to arrive") + } + for stdoutBytes.Len() == 0 { + select { + case <-timeout.C: + require.Fail(t, "Timeout waiting to read metric from stdout") + return + default: + time.Sleep(10 * time.Millisecond) + } + } + + out := string(stdoutBytes.Bytes()) + require.Contains(t, out, "\n") + metricLine := strings.Split(out, "\n")[0] + require.Equal(t, "measurement,tag=tag field=1i 1234000005678", metricLine) +} + +func TestShimStdinSignalingWorks(t *testing.T) { + stdinReader, stdinWriter := io.Pipe() + stdoutReader, stdoutWriter := io.Pipe() + + stdin = stdinReader + stdout = stdoutWriter + + timeout := time.NewTimer(10 * time.Second) + metricProcessed, exited := runInputPlugin(t, 40*time.Second) + + stdinWriter.Write([]byte("\n")) + + select { + case <-metricProcessed: + case <-timeout.C: + require.Fail(t, "Timeout waiting for metric to arrive") + } + + r := bufio.NewReader(stdoutReader) + out, err := r.ReadString('\n') + require.NoError(t, err) + require.Equal(t, "measurement,tag=tag field=1i 1234000005678\n", out) + + stdinWriter.Close() + // check that it exits cleanly + <-exited +} + +func runInputPlugin(t *testing.T, interval time.Duration) (metricProcessed chan bool, exited chan bool) { + metricProcessed = make(chan bool) + exited = make(chan bool) + inp := &testInput{ + metricProcessed: metricProcessed, + } + + shim := New() + shim.AddInput(inp) + go func() { + err := shim.Run(interval) + require.NoError(t, err) + exited <- true + }() + return metricProcessed, exited +} + +type testInput struct { + metricProcessed chan bool +} + +func (i *testInput) SampleConfig() string { + return "" +} + +func (i *testInput) Description() string { + return "" +} + +func (i *testInput) Gather(acc telegraf.Accumulator) error { + acc.AddFields("measurement", + map[string]interface{}{ + "field": 1, + }, + map[string]string{ + "tag": "tag", + }, time.Unix(1234, 5678)) + i.metricProcessed <- true + return nil +} + +func (i *testInput) Start(acc telegraf.Accumulator) error { + return nil +} + +func (i *testInput) Stop() { +} + +func TestLoadConfig(t *testing.T) { + os.Setenv("SECRET_TOKEN", "xxxxxxxxxx") + os.Setenv("SECRET_VALUE", `test"\test`) + + inputs.Add("test", func() telegraf.Input { + return &serviceInput{} + }) + + c := "./testdata/plugin.conf" + inputs, err := LoadConfig(&c) + require.NoError(t, err) + + inp := inputs[0].(*serviceInput) + + require.Equal(t, "awesome name", inp.ServiceName) + require.Equal(t, "xxxxxxxxxx", inp.SecretToken) + require.Equal(t, `test"\test`, inp.SecretValue) +} + +type serviceInput struct { + ServiceName string `toml:"service_name"` + SecretToken string `toml:"secret_token"` + SecretValue string `toml:"secret_value"` +} + +func (i *serviceInput) SampleConfig() string { + return "" +} + +func (i *serviceInput) Description() string { + return "" +} + +func (i *serviceInput) Gather(acc telegraf.Accumulator) error { + acc.AddFields("measurement", + map[string]interface{}{ + "field": 1, + }, + map[string]string{ + "tag": "tag", + }, time.Unix(1234, 5678)) + + return nil +} + +func (i *serviceInput) Start(acc telegraf.Accumulator) error { + return nil +} + +func (i *serviceInput) Stop() { +} diff --git a/plugins/inputs/execd/shim/testdata/plugin.conf b/plugins/inputs/execd/shim/testdata/plugin.conf new file mode 100644 index 000000000..78dbb33a9 --- /dev/null +++ b/plugins/inputs/execd/shim/testdata/plugin.conf @@ -0,0 +1,4 @@ +[[inputs.test]] + service_name = "awesome name" + secret_token = "${SECRET_TOKEN}" + secret_value = "$SECRET_VALUE" diff --git a/plugins/inputs/fail2ban/README.md b/plugins/inputs/fail2ban/README.md index b0f6666bb..1762bbaf2 100644 --- a/plugins/inputs/fail2ban/README.md +++ b/plugins/inputs/fail2ban/README.md @@ -1,42 +1,54 @@ # Fail2ban Input Plugin -The fail2ban plugin gathers the count of failed and banned ip addresses using [fail2ban](https://www.fail2ban.org). +The fail2ban plugin gathers the count of failed and banned ip addresses using +[fail2ban](https://www.fail2ban.org). This plugin runs the `fail2ban-client` command which generally requires root access. Acquiring the required permissions can be done using several methods: -- Use sudo run fail2ban-client. +- [Use sudo](#using-sudo) run fail2ban-client. - Run telegraf as root. (not recommended) -### Using sudo +### Configuration -You may edit your sudo configuration with the following: - -``` sudo -telegraf ALL=(root) NOEXEC: NOPASSWD: /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status * -``` - -### Configuration: - -``` toml +```toml # Read metrics from fail2ban. [[inputs.fail2ban]] ## Use sudo to run fail2ban-client use_sudo = false ``` -### Measurements & Fields: +### Using sudo + +Make sure to set `use_sudo = true` in your configuration file. + +You will also need to update your sudoers file. It is recommended to modify a +file in the `/etc/sudoers.d` directory using `visudo`: + +```bash +$ sudo visudo -f /etc/sudoers.d/telegraf +``` + +Add the following lines to the file, these commands allow the `telegraf` user +to call `fail2ban-client` without needing to provide a password and disables +logging of the call in the auth.log. Consult `man 8 visudo` and `man 5 +sudoers` for details. +``` +Cmnd_Alias FAIL2BAN = /usr/bin/fail2ban-client status, /usr/bin/fail2ban-client status * +telegraf ALL=(root) NOEXEC: NOPASSWD: FAIL2BAN +Defaults!FAIL2BAN !logfile, !syslog, !pam_session +``` + +### Metrics - fail2ban - - failed (integer, count) - - banned (integer, count) + - tags: + - jail + - fields: + - failed (integer, count) + - banned (integer, count) -### Tags: - -- All measurements have the following tags: - - jail - -### Example Output: +### Example Output ``` # fail2ban-client status sshd diff --git a/plugins/inputs/fibaro/README.md b/plugins/inputs/fibaro/README.md index 68fda0586..54c203102 100644 --- a/plugins/inputs/fibaro/README.md +++ b/plugins/inputs/fibaro/README.md @@ -30,6 +30,7 @@ Those values could be true (1) or false (0) for switches, percentage for dimmers - name (device name) - type (device type) - fields: + - batteryLevel (float, when available from device) - energy (float, when available from device) - power (float, when available from device) - value (float) @@ -52,4 +53,5 @@ fibaro,deviceId=220,host=vm1,name=CO2\ (ppm),room=Salon,section=Pièces\ commune fibaro,deviceId=221,host=vm1,name=Humidité\ (%),room=Salon,section=Pièces\ communes,type=com.fibaro.humiditySensor value=61 1529996807000000000 fibaro,deviceId=222,host=vm1,name=Pression\ (mb),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=1013.7 1529996807000000000 fibaro,deviceId=223,host=vm1,name=Bruit\ (db),room=Salon,section=Pièces\ communes,type=com.fibaro.multilevelSensor value=44 1529996807000000000 +fibaro,deviceId=248,host=vm1,name=Température,room=Garage,section=Extérieur,type=com.fibaro.temperatureSensor batteryLevel=85,value=10.8 1529996807000000000 ``` diff --git a/plugins/inputs/fibaro/fibaro.go b/plugins/inputs/fibaro/fibaro.go index 6eacb3ee6..62889cc8d 100644 --- a/plugins/inputs/fibaro/fibaro.go +++ b/plugins/inputs/fibaro/fibaro.go @@ -5,12 +5,15 @@ import ( "fmt" "net/http" "strconv" + "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +const defaultTimeout = 5 * time.Second + const sampleConfig = ` ## Required Fibaro controller address/hostname. ## Note: at the time of writing this plugin, Fibaro only implemented http - no https available @@ -28,13 +31,13 @@ const description = "Read devices value(s) from a Fibaro controller" // Fibaro contains connection information type Fibaro struct { - URL string + URL string `toml:"url"` // HTTP Basic Auth Credentials - Username string - Password string + Username string `toml:"username"` + Password string `toml:"password"` - Timeout internal.Duration + Timeout internal.Duration `toml:"timeout"` client *http.Client } @@ -66,11 +69,12 @@ type Devices struct { Type string `json:"type"` Enabled bool `json:"enabled"` Properties struct { - Dead interface{} `json:"dead"` - Energy interface{} `json:"energy"` - Power interface{} `json:"power"` - Value interface{} `json:"value"` - Value2 interface{} `json:"value2"` + BatteryLevel *string `json:"batteryLevel"` + Dead string `json:"dead"` + Energy *string `json:"energy"` + Power *string `json:"power"` + Value interface{} `json:"value"` + Value2 *string `json:"value2"` } `json:"properties"` } @@ -94,6 +98,7 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { if err != nil { return err } + defer resp.Body.Close() if resp.StatusCode != http.StatusOK { err = fmt.Errorf("Response from url \"%s\" has status code %d (%s), expected %d (%s)", @@ -105,8 +110,6 @@ func (f *Fibaro) getJSON(path string, dataStruct interface{}) error { return err } - defer resp.Body.Close() - dec := json.NewDecoder(resp.Body) err = dec.Decode(&dataStruct) if err != nil { @@ -172,14 +175,20 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { } fields := make(map[string]interface{}) + if device.Properties.BatteryLevel != nil { + if fValue, err := strconv.ParseFloat(*device.Properties.BatteryLevel, 64); err == nil { + fields["batteryLevel"] = fValue + } + } + if device.Properties.Energy != nil { - if fValue, err := strconv.ParseFloat(device.Properties.Energy.(string), 64); err == nil { + if fValue, err := strconv.ParseFloat(*device.Properties.Energy, 64); err == nil { fields["energy"] = fValue } } if device.Properties.Power != nil { - if fValue, err := strconv.ParseFloat(device.Properties.Power.(string), 64); err == nil { + if fValue, err := strconv.ParseFloat(*device.Properties.Power, 64); err == nil { fields["power"] = fValue } } @@ -199,7 +208,7 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { } if device.Properties.Value2 != nil { - if fValue, err := strconv.ParseFloat(device.Properties.Value2.(string), 64); err == nil { + if fValue, err := strconv.ParseFloat(*device.Properties.Value2, 64); err == nil { fields["value2"] = fValue } } @@ -212,6 +221,8 @@ func (f *Fibaro) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("fibaro", func() telegraf.Input { - return &Fibaro{} + return &Fibaro{ + Timeout: internal.Duration{Duration: defaultTimeout}, + } }) } diff --git a/plugins/inputs/fibaro/fibaro_test.go b/plugins/inputs/fibaro/fibaro_test.go index a58ad7c31..32a1447e3 100644 --- a/plugins/inputs/fibaro/fibaro_test.go +++ b/plugins/inputs/fibaro/fibaro_test.go @@ -107,6 +107,7 @@ const devicesJSON = ` "type": "com.fibaro.temperatureSensor", "enabled": true, "properties": { + "batteryLevel": "100", "dead": "false", "value": "22.80" }, @@ -196,7 +197,7 @@ func TestJSONSuccess(t *testing.T) { // Ensure fields / values are correct - Device 4 tags = map[string]string{"deviceId": "4", "section": "Section 3", "room": "Room 4", "name": "Device 4", "type": "com.fibaro.temperatureSensor"} - fields = map[string]interface{}{"value": float64(22.8)} + fields = map[string]interface{}{"batteryLevel": float64(100), "value": float64(22.8)} acc.AssertContainsTaggedFields(t, "fibaro", fields, tags) // Ensure fields / values are correct - Device 5 diff --git a/plugins/inputs/file/README.md b/plugins/inputs/file/README.md new file mode 100644 index 000000000..ef0fb90b0 --- /dev/null +++ b/plugins/inputs/file/README.md @@ -0,0 +1,29 @@ +# File Input Plugin + +The file plugin parses the **complete** contents of a file **every interval** using +the selected [input data format][]. + +**Note:** If you wish to parse only newly appended lines use the [tail][] input +plugin instead. + +### Configuration: + +```toml +[[inputs.file]] + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. + files = ["/tmp/metrics.out"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. + # file_tag = "" +``` + +[input data format]: /docs/DATA_FORMATS_INPUT.md +[tail]: /plugins/inputs/tail diff --git a/plugins/inputs/file/dev/docker-compose.yml b/plugins/inputs/file/dev/docker-compose.yml new file mode 100644 index 000000000..efce389f7 --- /dev/null +++ b/plugins/inputs/file/dev/docker-compose.yml @@ -0,0 +1,13 @@ +version: '3' + +services: + telegraf: + image: glinton/scratch + volumes: + - ./telegraf.conf:/telegraf.conf + - ../../../../telegraf:/telegraf + - ./dev/json_a.log:/var/log/test.log + entrypoint: + - /telegraf + - --config + - /telegraf.conf diff --git a/plugins/inputs/file/dev/telegraf.conf b/plugins/inputs/file/dev/telegraf.conf new file mode 100644 index 000000000..8cc0fb85d --- /dev/null +++ b/plugins/inputs/file/dev/telegraf.conf @@ -0,0 +1,7 @@ +[[inputs.file]] + files = ["/var/log/test.log"] + data_format = "json" + name_override = "json_file" + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/file/dev/testfiles/grok_a.log b/plugins/inputs/file/dev/testfiles/grok_a.log new file mode 100644 index 000000000..5295fcb75 --- /dev/null +++ b/plugins/inputs/file/dev/testfiles/grok_a.log @@ -0,0 +1,2 @@ +127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 +128.0.0.1 user-identifier tony [10/Oct/2000:13:55:36 -0800] "GET /apache_pb.gif HTTP/1.0" 300 45 \ No newline at end of file diff --git a/plugins/inputs/file/dev/testfiles/json_a.log b/plugins/inputs/file/dev/testfiles/json_a.log new file mode 100644 index 000000000..609c40a09 --- /dev/null +++ b/plugins/inputs/file/dev/testfiles/json_a.log @@ -0,0 +1,14 @@ +{ + "parent": { + "child": 3.0, + "ignored_child": "hi" + }, + "ignored_null": null, + "integer": 4, + "list": [3, 4], + "ignored_parent": { + "another_ignored_null": null, + "ignored_string": "hello, world!" + }, + "another_list": [4] +} diff --git a/plugins/inputs/file/file.go b/plugins/inputs/file/file.go new file mode 100644 index 000000000..fe2a840fa --- /dev/null +++ b/plugins/inputs/file/file.go @@ -0,0 +1,103 @@ +package file + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/globpath" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +type File struct { + Files []string `toml:"files"` + FileTag string `toml:"file_tag"` + parser parsers.Parser + + filenames []string +} + +const sampleConfig = ` + ## Files to parse each interval. Accept standard unix glob matching rules, + ## as well as ** to match recursive files and directories. + files = ["/tmp/metrics.out"] + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Name a tag containing the name of the file the data was parsed from. Leave empty + ## to disable. + # file_tag = "" +` + +// SampleConfig returns the default configuration of the Input +func (f *File) SampleConfig() string { + return sampleConfig +} + +func (f *File) Description() string { + return "Parse a complete file each interval" +} + +func (f *File) Gather(acc telegraf.Accumulator) error { + err := f.refreshFilePaths() + if err != nil { + return err + } + for _, k := range f.filenames { + metrics, err := f.readMetric(k) + if err != nil { + return err + } + + for _, m := range metrics { + if f.FileTag != "" { + m.AddTag(f.FileTag, filepath.Base(k)) + } + acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + } + } + return nil +} + +func (f *File) SetParser(p parsers.Parser) { + f.parser = p +} + +func (f *File) refreshFilePaths() error { + var allFiles []string + for _, file := range f.Files { + g, err := globpath.Compile(file) + if err != nil { + return fmt.Errorf("could not compile glob %v: %v", file, err) + } + files := g.Match() + if len(files) <= 0 { + return fmt.Errorf("could not find file: %v", file) + } + allFiles = append(allFiles, files...) + } + + f.filenames = allFiles + return nil +} + +func (f *File) readMetric(filename string) ([]telegraf.Metric, error) { + fileContents, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("E! Error file: %v could not be read, %s", filename, err) + } + return f.parser.Parse(fileContents) + +} + +func init() { + inputs.Add("file", func() telegraf.Input { + return &File{} + }) +} diff --git a/plugins/inputs/file/file_test.go b/plugins/inputs/file/file_test.go new file mode 100644 index 000000000..19341fc08 --- /dev/null +++ b/plugins/inputs/file/file_test.go @@ -0,0 +1,89 @@ +package file + +import ( + "os" + "path/filepath" + "testing" + + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRefreshFilePaths(t *testing.T) { + wd, err := os.Getwd() + r := File{ + Files: []string{filepath.Join(wd, "dev/testfiles/**.log")}, + } + + err = r.refreshFilePaths() + require.NoError(t, err) + assert.Equal(t, 2, len(r.filenames)) +} + +func TestFileTag(t *testing.T) { + acc := testutil.Accumulator{} + wd, err := os.Getwd() + require.NoError(t, err) + r := File{ + Files: []string{filepath.Join(wd, "dev/testfiles/json_a.log")}, + FileTag: "filename", + } + + parserConfig := parsers.Config{ + DataFormat: "json", + } + nParser, err := parsers.NewParser(&parserConfig) + assert.NoError(t, err) + r.parser = nParser + + err = r.Gather(&acc) + require.NoError(t, err) + + for _, m := range acc.Metrics { + for key, value := range m.Tags { + assert.Equal(t, r.FileTag, key) + assert.Equal(t, filepath.Base(r.Files[0]), value) + } + } +} + +func TestJSONParserCompile(t *testing.T) { + var acc testutil.Accumulator + wd, _ := os.Getwd() + r := File{ + Files: []string{filepath.Join(wd, "dev/testfiles/json_a.log")}, + } + parserConfig := parsers.Config{ + DataFormat: "json", + TagKeys: []string{"parent_ignored_child"}, + } + nParser, err := parsers.NewParser(&parserConfig) + assert.NoError(t, err) + r.parser = nParser + + r.Gather(&acc) + assert.Equal(t, map[string]string{"parent_ignored_child": "hi"}, acc.Metrics[0].Tags) + assert.Equal(t, 5, len(acc.Metrics[0].Fields)) +} + +func TestGrokParser(t *testing.T) { + wd, _ := os.Getwd() + var acc testutil.Accumulator + r := File{ + Files: []string{filepath.Join(wd, "dev/testfiles/grok_a.log")}, + } + + parserConfig := parsers.Config{ + DataFormat: "grok", + GrokPatterns: []string{"%{COMMON_LOG_FORMAT}"}, + } + + nParser, err := parsers.NewParser(&parserConfig) + r.parser = nParser + assert.NoError(t, err) + + err = r.Gather(&acc) + assert.Equal(t, len(acc.Metrics), 2) +} diff --git a/plugins/inputs/filecount/README.md b/plugins/inputs/filecount/README.md new file mode 100644 index 000000000..81fc75908 --- /dev/null +++ b/plugins/inputs/filecount/README.md @@ -0,0 +1,59 @@ +# Filecount Input Plugin + +Reports the number and total size of files in specified directories. + +### Configuration: + +```toml +[[inputs.filecount]] + ## Directory to gather stats about. + ## deprecated in 1.9; use the directories option + # directory = "/var/cache/apt/archives" + + ## Directories to gather stats about. + ## This accept standard unit glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/** -> recursively find all directories in /var/log and count files in each directories + ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories + ## /var/log -> count all files in /var/log and all of its subdirectories + directories = ["/var/cache/apt", "/tmp"] + + ## Only count files that match the name pattern. Defaults to "*". + name = "*" + + ## Count files in subdirectories. Defaults to true. + recursive = true + + ## Only count regular files. Defaults to true. + regular_only = true + + ## Follow all symlinks while walking the directory tree. Defaults to false. + follow_symlinks = false + + ## Only count files that are at least this size. If size is + ## a negative number, only count files that are smaller than the + ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... + ## Without quotes and units, interpreted as size in bytes. + size = "0B" + + ## Only count files that have not been touched for at least this + ## duration. If mtime is negative, only count files that have been + ## touched in this duration. Defaults to "0s". + mtime = "0s" +``` + +### Metrics + +- filecount + - tags: + - directory (the directory path) + - fields: + - count (integer) + - size_bytes (integer) + +### Example Output: + +``` +filecount,directory=/var/cache/apt count=7i,size_bytes=7438336i 1530034445000000000 +filecount,directory=/tmp count=17i,size_bytes=28934786i 1530034445000000000 +``` diff --git a/plugins/inputs/filecount/filecount.go b/plugins/inputs/filecount/filecount.go new file mode 100644 index 000000000..30815541c --- /dev/null +++ b/plugins/inputs/filecount/filecount.go @@ -0,0 +1,317 @@ +package filecount + +import ( + "os" + "path/filepath" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/globpath" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/karrick/godirwalk" + "github.com/pkg/errors" +) + +const sampleConfig = ` + ## Directory to gather stats about. + ## deprecated in 1.9; use the directories option + # directory = "/var/cache/apt/archives" + + ## Directories to gather stats about. + ## This accept standard unit glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/** -> recursively find all directories in /var/log and count files in each directories + ## /var/log/*/* -> find all directories with a parent dir in /var/log and count files in each directories + ## /var/log -> count all files in /var/log and all of its subdirectories + directories = ["/var/cache/apt/archives"] + + ## Only count files that match the name pattern. Defaults to "*". + name = "*.deb" + + ## Count files in subdirectories. Defaults to true. + recursive = false + + ## Only count regular files. Defaults to true. + regular_only = true + + ## Follow all symlinks while walking the directory tree. Defaults to false. + follow_symlinks = false + + ## Only count files that are at least this size. If size is + ## a negative number, only count files that are smaller than the + ## absolute value of size. Acceptable units are B, KiB, MiB, KB, ... + ## Without quotes and units, interpreted as size in bytes. + size = "0B" + + ## Only count files that have not been touched for at least this + ## duration. If mtime is negative, only count files that have been + ## touched in this duration. Defaults to "0s". + mtime = "0s" +` + +type FileCount struct { + Directory string // deprecated in 1.9 + Directories []string + Name string + Recursive bool + RegularOnly bool + FollowSymlinks bool + Size internal.Size + MTime internal.Duration `toml:"mtime"` + fileFilters []fileFilterFunc + globPaths []globpath.GlobPath + Fs fileSystem + Log telegraf.Logger +} + +func (_ *FileCount) Description() string { + return "Count files in a directory" +} + +func (_ *FileCount) SampleConfig() string { return sampleConfig } + +type fileFilterFunc func(os.FileInfo) (bool, error) + +func rejectNilFilters(filters []fileFilterFunc) []fileFilterFunc { + filtered := make([]fileFilterFunc, 0, len(filters)) + for _, f := range filters { + if f != nil { + filtered = append(filtered, f) + } + } + return filtered +} + +func (fc *FileCount) nameFilter() fileFilterFunc { + if fc.Name == "*" { + return nil + } + + return func(f os.FileInfo) (bool, error) { + match, err := filepath.Match(fc.Name, f.Name()) + if err != nil { + return false, err + } + return match, nil + } +} + +func (fc *FileCount) regularOnlyFilter() fileFilterFunc { + if !fc.RegularOnly { + return nil + } + + return func(f os.FileInfo) (bool, error) { + return f.Mode().IsRegular(), nil + } +} + +func (fc *FileCount) sizeFilter() fileFilterFunc { + if fc.Size.Size == 0 { + return nil + } + + return func(f os.FileInfo) (bool, error) { + if !f.Mode().IsRegular() { + return false, nil + } + if fc.Size.Size < 0 { + return f.Size() < -fc.Size.Size, nil + } + return f.Size() >= fc.Size.Size, nil + } +} + +func (fc *FileCount) mtimeFilter() fileFilterFunc { + if fc.MTime.Duration == 0 { + return nil + } + + return func(f os.FileInfo) (bool, error) { + age := absDuration(fc.MTime.Duration) + mtime := time.Now().Add(-age) + if fc.MTime.Duration < 0 { + return f.ModTime().After(mtime), nil + } + return f.ModTime().Before(mtime), nil + } +} + +func absDuration(x time.Duration) time.Duration { + if x < 0 { + return -x + } + return x +} + +func (fc *FileCount) initFileFilters() { + filters := []fileFilterFunc{ + fc.nameFilter(), + fc.regularOnlyFilter(), + fc.sizeFilter(), + fc.mtimeFilter(), + } + fc.fileFilters = rejectNilFilters(filters) +} + +func (fc *FileCount) count(acc telegraf.Accumulator, basedir string, glob globpath.GlobPath) { + childCount := make(map[string]int64) + childSize := make(map[string]int64) + + walkFn := func(path string, de *godirwalk.Dirent) error { + rel, err := filepath.Rel(basedir, path) + if err == nil && rel == "." { + return nil + } + file, err := fc.Fs.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + match, err := fc.filter(file) + if err != nil { + acc.AddError(err) + return nil + } + if match { + parent := filepath.Dir(path) + childCount[parent]++ + childSize[parent] += file.Size() + } + if file.IsDir() && !fc.Recursive && !glob.HasSuperMeta { + return filepath.SkipDir + } + return nil + } + + postChildrenFn := func(path string, de *godirwalk.Dirent) error { + if glob.MatchString(path) { + gauge := map[string]interface{}{ + "count": childCount[path], + "size_bytes": childSize[path], + } + acc.AddGauge("filecount", gauge, + map[string]string{ + "directory": path, + }) + } + parent := filepath.Dir(path) + if fc.Recursive { + childCount[parent] += childCount[path] + childSize[parent] += childSize[path] + } + delete(childCount, path) + delete(childSize, path) + return nil + } + + err := godirwalk.Walk(basedir, &godirwalk.Options{ + Callback: walkFn, + PostChildrenCallback: postChildrenFn, + Unsorted: true, + FollowSymbolicLinks: fc.FollowSymlinks, + ErrorCallback: func(osPathname string, err error) godirwalk.ErrorAction { + if os.IsPermission(errors.Cause(err)) { + fc.Log.Debug(err) + return godirwalk.SkipNode + } + return godirwalk.Halt + }, + }) + if err != nil { + acc.AddError(err) + } +} + +func (fc *FileCount) filter(file os.FileInfo) (bool, error) { + if fc.fileFilters == nil { + fc.initFileFilters() + } + + for _, fileFilter := range fc.fileFilters { + match, err := fileFilter(file) + if err != nil { + return false, err + } + if !match { + return false, nil + } + } + + return true, nil +} + +func (fc *FileCount) Gather(acc telegraf.Accumulator) error { + if fc.globPaths == nil { + fc.initGlobPaths(acc) + } + + for _, glob := range fc.globPaths { + for _, dir := range fc.onlyDirectories(glob.GetRoots()) { + fc.count(acc, dir, glob) + } + } + + return nil +} + +func (fc *FileCount) onlyDirectories(directories []string) []string { + out := make([]string, 0) + for _, path := range directories { + info, err := fc.Fs.Stat(path) + if err == nil && info.IsDir() { + out = append(out, path) + } + } + return out +} + +func (fc *FileCount) getDirs() []string { + dirs := make([]string, len(fc.Directories)) + for i, dir := range fc.Directories { + dirs[i] = filepath.Clean(dir) + } + + if fc.Directory != "" { + dirs = append(dirs, filepath.Clean(fc.Directory)) + } + + return dirs +} + +func (fc *FileCount) initGlobPaths(acc telegraf.Accumulator) { + fc.globPaths = []globpath.GlobPath{} + for _, directory := range fc.getDirs() { + glob, err := globpath.Compile(directory) + if err != nil { + acc.AddError(err) + } else { + fc.globPaths = append(fc.globPaths, *glob) + } + } + +} + +func NewFileCount() *FileCount { + return &FileCount{ + Directory: "", + Directories: []string{}, + Name: "*", + Recursive: true, + RegularOnly: true, + FollowSymlinks: false, + Size: internal.Size{Size: 0}, + MTime: internal.Duration{Duration: 0}, + fileFilters: nil, + Fs: osFS{}, + } +} + +func init() { + inputs.Add("filecount", func() telegraf.Input { + return NewFileCount() + }) +} diff --git a/plugins/inputs/filecount/filecount_test.go b/plugins/inputs/filecount/filecount_test.go new file mode 100644 index 000000000..568ee07b5 --- /dev/null +++ b/plugins/inputs/filecount/filecount_test.go @@ -0,0 +1,238 @@ +package filecount + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestNoFilters(t *testing.T) { + fc := getNoFilterFileCount() + matches := []string{"foo", "bar", "baz", "qux", + "subdir/", "subdir/quux", "subdir/quuz", + "subdir/nested2", "subdir/nested2/qux"} + fileCountEquals(t, fc, len(matches), 5096) +} + +func TestNoFiltersOnChildDir(t *testing.T) { + fc := getNoFilterFileCount() + fc.Directories = []string{getTestdataDir() + "/*"} + matches := []string{"subdir/quux", "subdir/quuz", + "subdir/nested2/qux", "subdir/nested2"} + + tags := map[string]string{"directory": getTestdataDir() + "/subdir"} + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(600))) +} + +func TestNoRecursiveButSuperMeta(t *testing.T) { + fc := getNoFilterFileCount() + fc.Recursive = false + fc.Directories = []string{getTestdataDir() + "/**"} + matches := []string{"subdir/quux", "subdir/quuz", "subdir/nested2"} + + tags := map[string]string{"directory": getTestdataDir() + "/subdir"} + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(200))) +} + +func TestNameFilter(t *testing.T) { + fc := getNoFilterFileCount() + fc.Name = "ba*" + matches := []string{"bar", "baz"} + fileCountEquals(t, fc, len(matches), 0) +} + +func TestNonRecursive(t *testing.T) { + fc := getNoFilterFileCount() + fc.Recursive = false + matches := []string{"foo", "bar", "baz", "qux", "subdir"} + + fileCountEquals(t, fc, len(matches), 4496) +} + +func TestDoubleAndSimpleStar(t *testing.T) { + fc := getNoFilterFileCount() + fc.Directories = []string{getTestdataDir() + "/**/*"} + matches := []string{"qux"} + + tags := map[string]string{"directory": getTestdataDir() + "/subdir/nested2"} + + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + + require.True(t, acc.HasPoint("filecount", tags, "count", int64(len(matches)))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(400))) +} + +func TestRegularOnlyFilter(t *testing.T) { + fc := getNoFilterFileCount() + fc.RegularOnly = true + matches := []string{ + "foo", "bar", "baz", "qux", "subdir/quux", "subdir/quuz", + "subdir/nested2/qux"} + + fileCountEquals(t, fc, len(matches), 800) +} + +func TestSizeFilter(t *testing.T) { + fc := getNoFilterFileCount() + fc.Size = internal.Size{Size: -100} + matches := []string{"foo", "bar", "baz", + "subdir/quux", "subdir/quuz"} + fileCountEquals(t, fc, len(matches), 0) + + fc.Size = internal.Size{Size: 100} + matches = []string{"qux", "subdir/nested2//qux"} + + fileCountEquals(t, fc, len(matches), 800) +} + +func TestMTimeFilter(t *testing.T) { + mtime := time.Date(2011, time.December, 14, 18, 25, 5, 0, time.UTC) + fileAge := time.Since(mtime) - (60 * time.Second) + + fc := getNoFilterFileCount() + fc.MTime = internal.Duration{Duration: -fileAge} + matches := []string{"foo", "bar", "qux", + "subdir/", "subdir/quux", "subdir/quuz", + "subdir/nested2", "subdir/nested2/qux"} + + fileCountEquals(t, fc, len(matches), 5096) + + fc.MTime = internal.Duration{Duration: fileAge} + matches = []string{"baz"} + fileCountEquals(t, fc, len(matches), 0) +} + +// The library dependency karrick/godirwalk completely abstracts out the +// behavior of the FollowSymlinks plugin input option. However, it should at +// least behave identically when enabled on a filesystem with no symlinks. +func TestFollowSymlinks(t *testing.T) { + fc := getNoFilterFileCount() + fc.FollowSymlinks = true + matches := []string{"foo", "bar", "baz", "qux", + "subdir/", "subdir/quux", "subdir/quuz", + "subdir/nested2", "subdir/nested2/qux"} + + fileCountEquals(t, fc, len(matches), 5096) +} + +// Paths with a trailing slash will not exactly match paths produced during the +// walk as these paths are cleaned before being returned from godirwalk. #6329 +func TestDirectoryWithTrailingSlash(t *testing.T) { + plugin := &FileCount{ + Directories: []string{getTestdataDir() + string(filepath.Separator)}, + Name: "*", + Recursive: true, + Fs: getFakeFileSystem(getTestdataDir()), + } + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "filecount", + map[string]string{ + "directory": getTestdataDir(), + }, + map[string]interface{}{ + "count": 9, + "size_bytes": 5096, + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func getNoFilterFileCount() FileCount { + return FileCount{ + Log: testutil.Logger{}, + Directories: []string{getTestdataDir()}, + Name: "*", + Recursive: true, + RegularOnly: false, + Size: internal.Size{Size: 0}, + MTime: internal.Duration{Duration: 0}, + fileFilters: nil, + Fs: getFakeFileSystem(getTestdataDir()), + } +} + +func getTestdataDir() string { + dir, err := os.Getwd() + if err != nil { + // if we cannot even establish the test directory, further progress is meaningless + panic(err) + } + + var chunks []string + var testDirectory string + + if runtime.GOOS == "windows" { + chunks = strings.Split(dir, "\\") + testDirectory = strings.Join(chunks[:], "\\") + "\\testdata" + } else { + chunks = strings.Split(dir, "/") + testDirectory = strings.Join(chunks[:], "/") + "/testdata" + } + return testDirectory +} + +func getFakeFileSystem(basePath string) fakeFileSystem { + // create our desired "filesystem" object, complete with an internal map allowing our funcs to return meta data as requested + + mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) + olderMtime := time.Date(2010, time.December, 14, 18, 25, 5, 0, time.UTC) + + // set file permissions + var fmask uint32 = 0666 + var dmask uint32 = 0666 + + // set directory bit + dmask |= (1 << uint(32-1)) + + // create a lookup map for getting "files" from the "filesystem" + fileList := map[string]fakeFileInfo{ + basePath: {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, + basePath + "/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, + basePath + "/bar": {name: "bar", filemode: uint32(fmask), modtime: mtime}, + basePath + "/baz": {name: "baz", filemode: uint32(fmask), modtime: olderMtime}, + basePath + "/qux": {name: "qux", size: int64(400), filemode: uint32(fmask), modtime: mtime}, + basePath + "/subdir": {name: "subdir", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, + basePath + "/subdir/quux": {name: "quux", filemode: uint32(fmask), modtime: mtime}, + basePath + "/subdir/quuz": {name: "quuz", filemode: uint32(fmask), modtime: mtime}, + basePath + "/subdir/nested2": {name: "nested2", size: int64(200), filemode: uint32(dmask), modtime: mtime, isdir: true}, + basePath + "/subdir/nested2/qux": {name: "qux", filemode: uint32(fmask), modtime: mtime, size: int64(400)}, + } + + fs := fakeFileSystem{files: fileList} + return fs + +} + +func fileCountEquals(t *testing.T, fc FileCount, expectedCount int, expectedSize int) { + tags := map[string]string{"directory": getTestdataDir()} + acc := testutil.Accumulator{} + acc.GatherError(fc.Gather) + require.True(t, acc.HasPoint("filecount", tags, "count", int64(expectedCount))) + require.True(t, acc.HasPoint("filecount", tags, "size_bytes", int64(expectedSize))) +} diff --git a/plugins/inputs/filecount/filesystem_helpers.go b/plugins/inputs/filecount/filesystem_helpers.go new file mode 100644 index 000000000..2bd6c0951 --- /dev/null +++ b/plugins/inputs/filecount/filesystem_helpers.go @@ -0,0 +1,73 @@ +package filecount + +import ( + "errors" + "io" + "os" + "time" +) + +/* + The code below is lifted from numerous articles and originates from Andrew Gerrand's 10 things you (probably) don't know about Go. + it allows for mocking a filesystem; this allows for consistent testing of this code across platforms (directory sizes reported + differently by different platforms, for example), while preserving the rest of the functionality as-is, without modification. +*/ + +type fileSystem interface { + Open(name string) (file, error) + Stat(name string) (os.FileInfo, error) +} + +type file interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + Stat() (os.FileInfo, error) +} + +// osFS implements fileSystem using the local disk +type osFS struct{} + +func (osFS) Open(name string) (file, error) { return os.Open(name) } +func (osFS) Stat(name string) (os.FileInfo, error) { return os.Stat(name) } + +/* + The following are for mocking the filesystem - this allows us to mock Stat() files. This means that we can set file attributes, and know that they + will be the same regardless of the platform sitting underneath our tests (directory sizes vary see https://github.com/influxdata/telegraf/issues/6011) + + NOTE: still need the on-disk file structure to mirror this because the 3rd party library ("github.com/karrick/godirwalk") uses its own + walk functions, that we cannot mock from here. +*/ + +type fakeFileSystem struct { + files map[string]fakeFileInfo +} + +type fakeFileInfo struct { + name string + size int64 + filemode uint32 + modtime time.Time + isdir bool + sys interface{} +} + +func (f fakeFileInfo) Name() string { return f.name } +func (f fakeFileInfo) Size() int64 { return f.size } +func (f fakeFileInfo) Mode() os.FileMode { return os.FileMode(f.filemode) } +func (f fakeFileInfo) ModTime() time.Time { return f.modtime } +func (f fakeFileInfo) IsDir() bool { return f.isdir } +func (f fakeFileInfo) Sys() interface{} { return f.sys } + +func (f fakeFileSystem) Open(name string) (file, error) { + return nil, &os.PathError{Op: "Open", Path: name, Err: errors.New("Not implemented by fake filesystem")} +} + +func (f fakeFileSystem) Stat(name string) (os.FileInfo, error) { + if fakeInfo, found := f.files[name]; found { + return fakeInfo, nil + } + return nil, &os.PathError{Op: "Stat", Path: name, Err: errors.New("No such file or directory")} + +} diff --git a/plugins/inputs/filecount/filesystem_helpers_test.go b/plugins/inputs/filecount/filesystem_helpers_test.go new file mode 100644 index 000000000..4e7d16e16 --- /dev/null +++ b/plugins/inputs/filecount/filesystem_helpers_test.go @@ -0,0 +1,90 @@ +package filecount + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestMTime(t *testing.T) { + //this is the time our foo file should have + mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) + + fs := getTestFileSystem() + fileInfo, err := fs.Stat("/testdata/foo") + require.Nil(t, err) + require.Equal(t, mtime, fileInfo.ModTime()) +} + +func TestSize(t *testing.T) { + //this is the time our foo file should have + size := int64(4096) + fs := getTestFileSystem() + fileInfo, err := fs.Stat("/testdata") + require.Nil(t, err) + require.Equal(t, size, fileInfo.Size()) +} + +func TestIsDir(t *testing.T) { + //this is the time our foo file should have + dir := true + fs := getTestFileSystem() + fileInfo, err := fs.Stat("/testdata") + require.Nil(t, err) + require.Equal(t, dir, fileInfo.IsDir()) +} + +func TestRealFS(t *testing.T) { + //test that the default (non-test) empty FS causes expected behaviour + var fs fileSystem = osFS{} + //the following file exists on disk - and not in our fake fs + fileInfo, err := fs.Stat(getTestdataDir() + "/qux") + require.Nil(t, err) + require.Equal(t, false, fileInfo.IsDir()) + require.Equal(t, int64(446), fileInfo.Size()) + + // now swap out real, for fake filesystem + fs = getTestFileSystem() + // now, the same test as above will return an error as the file doesn't exist in our fake fs + expectedError := "Stat " + getTestdataDir() + "/qux: No such file or directory" + fileInfo, err = fs.Stat(getTestdataDir() + "/qux") + require.Equal(t, expectedError, err.Error()) + // and verify that what we DO expect to find, we do + fileInfo, err = fs.Stat("/testdata/foo") + require.Nil(t, err) +} + +func getTestFileSystem() fakeFileSystem { + /* + create our desired "filesystem" object, complete with an internal map allowing our funcs to return meta data as requested + + type FileInfo interface { + Name() string // base name of the file + Size() int64 // length in bytes of file + Mode() FileMode // file mode bits + ModTime() time.Time // modification time + IsDir() bool // returns bool indicating if a Dir or not + Sys() interface{} // underlying data source. always nil (in this case) + } + + */ + + mtime := time.Date(2015, time.December, 14, 18, 25, 5, 0, time.UTC) + + // set file permissions + var fmask uint32 = 0666 + var dmask uint32 = 0666 + + // set directory bit + dmask |= (1 << uint(32-1)) + + fileList := map[string]fakeFileInfo{ + "/testdata": {name: "testdata", size: int64(4096), filemode: uint32(dmask), modtime: mtime, isdir: true}, + "/testdata/foo": {name: "foo", filemode: uint32(fmask), modtime: mtime}, + } + + fs := fakeFileSystem{files: fileList} + return fs + +} diff --git a/plugins/inputs/filecount/testdata/bar b/plugins/inputs/filecount/testdata/bar new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/filecount/testdata/baz b/plugins/inputs/filecount/testdata/baz new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/filecount/testdata/foo b/plugins/inputs/filecount/testdata/foo new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/filecount/testdata/qux b/plugins/inputs/filecount/testdata/qux new file mode 100644 index 000000000..c7288f23d --- /dev/null +++ b/plugins/inputs/filecount/testdata/qux @@ -0,0 +1,7 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do +eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad +minim veniam, quis nostrud exercitation ullamco laboris nisi ut +aliquip ex ea commodo consequat. Duis aute irure dolor in +reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla +pariatur. Excepteur sint occaecat cupidatat non proident, sunt in +culpa qui officia deserunt mollit anim id est laborum. diff --git a/plugins/inputs/filecount/testdata/subdir/nested2/qux b/plugins/inputs/filecount/testdata/subdir/nested2/qux new file mode 100644 index 000000000..c7288f23d --- /dev/null +++ b/plugins/inputs/filecount/testdata/subdir/nested2/qux @@ -0,0 +1,7 @@ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do +eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad +minim veniam, quis nostrud exercitation ullamco laboris nisi ut +aliquip ex ea commodo consequat. Duis aute irure dolor in +reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla +pariatur. Excepteur sint occaecat cupidatat non proident, sunt in +culpa qui officia deserunt mollit anim id est laborum. diff --git a/plugins/inputs/filecount/testdata/subdir/quux b/plugins/inputs/filecount/testdata/subdir/quux new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/filecount/testdata/subdir/quuz b/plugins/inputs/filecount/testdata/subdir/quuz new file mode 100644 index 000000000..e69de29bb diff --git a/plugins/inputs/filestat/README.md b/plugins/inputs/filestat/README.md index 79eec6c71..840cafb53 100644 --- a/plugins/inputs/filestat/README.md +++ b/plugins/inputs/filestat/README.md @@ -11,6 +11,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". See https://github.com/gobwas/glob. files = ["/etc/telegraf/telegraf.conf", "/var/log/**.log"] + ## If true, read the entire file and calculate an md5 checksum. md5 = false ``` @@ -20,7 +21,7 @@ The filestat plugin gathers metrics about file existence, size, and other stats. - filestat - exists (int, 0 | 1) - size_bytes (int, bytes) - - modification_time (int, unixtime) + - modification_time (int, unix time nanoseconds) - md5 (optional, string) ### Tags: diff --git a/plugins/inputs/filestat/filestat.go b/plugins/inputs/filestat/filestat.go index 762eaa420..bf8ea6c16 100644 --- a/plugins/inputs/filestat/filestat.go +++ b/plugins/inputs/filestat/filestat.go @@ -4,7 +4,6 @@ import ( "crypto/md5" "fmt" "io" - "log" "os" "github.com/influxdata/telegraf" @@ -23,6 +22,7 @@ const sampleConfig = ` ## See https://github.com/gobwas/glob for more examples ## files = ["/var/log/**.log"] + ## If true, read the entire file and calculate an md5 checksum. md5 = false ` @@ -31,6 +31,8 @@ type FileStat struct { Md5 bool Files []string + Log telegraf.Logger + // maps full file paths to globmatch obj globs map[string]*globpath.GlobPath } @@ -41,11 +43,11 @@ func NewFileStat() *FileStat { } } -func (_ *FileStat) Description() string { +func (*FileStat) Description() string { return "Read stats about given file(s)" } -func (_ *FileStat) SampleConfig() string { return sampleConfig } +func (*FileStat) SampleConfig() string { return sampleConfig } func (f *FileStat) Gather(acc telegraf.Accumulator) error { var err error @@ -73,16 +75,20 @@ func (f *FileStat) Gather(acc telegraf.Accumulator) error { continue } - for fileName, fileInfo := range files { + for _, fileName := range files { tags := map[string]string{ "file": fileName, } fields := map[string]interface{}{ "exists": int64(1), } + fileInfo, err := os.Stat(fileName) + if os.IsNotExist(err) { + fields["exists"] = int64(0) + } if fileInfo == nil { - log.Printf("E! Unable to get info for file [%s], possible permissions issue", + f.Log.Errorf("Unable to get info for file %q, possible permissions issue", fileName) } else { fields["size_bytes"] = fileInfo.Size() diff --git a/plugins/inputs/filestat/filestat_test.go b/plugins/inputs/filestat/filestat_test.go index 7fdf6cde8..a38d3b0aa 100644 --- a/plugins/inputs/filestat/filestat_test.go +++ b/plugins/inputs/filestat/filestat_test.go @@ -14,6 +14,7 @@ import ( func TestGatherNoMd5(t *testing.T) { dir := getTestdataDir() fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Files = []string{ dir + "log1.log", dir + "log2.log", @@ -44,6 +45,7 @@ func TestGatherNoMd5(t *testing.T) { func TestGatherExplicitFiles(t *testing.T) { dir := getTestdataDir() fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ dir + "log1.log", @@ -77,6 +79,7 @@ func TestGatherExplicitFiles(t *testing.T) { func TestGatherGlob(t *testing.T) { dir := getTestdataDir() fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ dir + "*.log", @@ -103,6 +106,7 @@ func TestGatherGlob(t *testing.T) { func TestGatherSuperAsterisk(t *testing.T) { dir := getTestdataDir() fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Md5 = true fs.Files = []string{ dir + "**", @@ -136,6 +140,7 @@ func TestGatherSuperAsterisk(t *testing.T) { func TestModificationTime(t *testing.T) { dir := getTestdataDir() fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Files = []string{ dir + "log1.log", } @@ -153,6 +158,7 @@ func TestModificationTime(t *testing.T) { func TestNoModificationTime(t *testing.T) { fs := NewFileStat() + fs.Log = testutil.Logger{} fs.Files = []string{ "/non/existant/file", } diff --git a/plugins/inputs/fireboard/README.md b/plugins/inputs/fireboard/README.md new file mode 100644 index 000000000..7e1f351fa --- /dev/null +++ b/plugins/inputs/fireboard/README.md @@ -0,0 +1,58 @@ +# Fireboard Input Plugin + +The fireboard plugin gathers the real time temperature data from fireboard +thermometers. In order to use this input plugin, you'll need to sign up to use +the [Fireboard REST API](https://docs.fireboard.io/reference/restapi.html). + +### Configuration + +```toml +[[inputs.fireboard]] + ## Specify auth token for your account + auth_token = "invalidAuthToken" + ## You can override the fireboard server URL if necessary + # url = https://fireboard.io/api/v1/devices.json + ## You can set a different http_timeout if you need to + # http_timeout = 4 +``` + +#### auth_token + +In lieu of requiring a username and password, this plugin requires an +authentication token that you can generate using the [Fireboard REST +API](https://docs.fireboard.io/reference/restapi.html#Authentication). + +#### url + +While there should be no reason to override the URL, the option is available +in case Fireboard changes their site, etc. + +#### http_timeout + +If you need to increase the HTTP timeout, you can do so here. You can set this +value in seconds. The default value is four (4) seconds. + +### Metrics + +The Fireboard REST API docs have good examples of the data that is available, +currently this input only returns the real time temperatures. Temperature +values are included if they are less than a minute old. + +- fireboard + - tags: + - channel + - scale (Celcius; Farenheit) + - title (name of the Fireboard) + - uuid (UUID of the Fireboard) + - fields: + - temperature (float, unit) + +### Example Output + +This section shows example output in Line Protocol format. You can often use +`telegraf --input-filter --test` or use the `file` output to get +this information. + +``` +fireboard,channel=2,host=patas-mbp,scale=Farenheit,title=telegraf-FireBoard,uuid=b55e766c-b308-49b5-93a4-df89fe31efd0 temperature=78.2 1561690040000000000 +``` diff --git a/plugins/inputs/fireboard/fireboard.go b/plugins/inputs/fireboard/fireboard.go new file mode 100644 index 000000000..a92930aae --- /dev/null +++ b/plugins/inputs/fireboard/fireboard.go @@ -0,0 +1,157 @@ +package fireboard + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Fireboard gathers statistics from the fireboard.io servers +type Fireboard struct { + AuthToken string `toml:"auth_token"` + URL string `toml:"url"` + HTTPTimeout internal.Duration `toml:"http_timeout"` + + client *http.Client +} + +// NewFireboard return a new instance of Fireboard with a default http client +func NewFireboard() *Fireboard { + tr := &http.Transport{ResponseHeaderTimeout: time.Duration(3 * time.Second)} + client := &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } + return &Fireboard{client: client} +} + +// RTT fireboardStats represents the data that is received from Fireboard +type RTT struct { + Temp float64 `json:"temp"` + Channel int64 `json:"channel"` + Degreetype int `json:"degreetype"` + Created string `json:"created"` +} + +type fireboardStats struct { + Title string `json:"title"` + UUID string `json:"uuid"` + Latesttemps []RTT `json:"latest_temps"` +} + +// A sample configuration to only gather stats from localhost, default port. +const sampleConfig = ` + ## Specify auth token for your account + auth_token = "invalidAuthToken" + ## You can override the fireboard server URL if necessary + # url = https://fireboard.io/api/v1/devices.json + ## You can set a different http_timeout if you need to + ## You should set a string using an number and time indicator + ## for example "12s" for 12 seconds. + # http_timeout = "4s" +` + +// SampleConfig Returns a sample configuration for the plugin +func (r *Fireboard) SampleConfig() string { + return sampleConfig +} + +// Description Returns a description of the plugin +func (r *Fireboard) Description() string { + return "Read real time temps from fireboard.io servers" +} + +// Init the things +func (r *Fireboard) Init() error { + + if len(r.AuthToken) == 0 { + return fmt.Errorf("You must specify an authToken") + } + if len(r.URL) == 0 { + r.URL = "https://fireboard.io/api/v1/devices.json" + } + // Have a default timeout of 4s + if r.HTTPTimeout.Duration == 0 { + r.HTTPTimeout.Duration = time.Second * 4 + } + + r.client.Timeout = r.HTTPTimeout.Duration + + return nil +} + +// Gather Reads stats from all configured servers. +func (r *Fireboard) Gather(acc telegraf.Accumulator) error { + + // Perform the GET request to the fireboard servers + req, err := http.NewRequest("GET", r.URL, nil) + if err != nil { + return err + } + req.Header.Set("Authorization", "Token "+r.AuthToken) + resp, err := r.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + // Successful responses will always return status code 200 + if resp.StatusCode != http.StatusOK { + if resp.StatusCode == http.StatusForbidden { + return fmt.Errorf("fireboard server responded with %d [Forbidden], verify your authToken", resp.StatusCode) + } + return fmt.Errorf("fireboard responded with unexpected status code %d", resp.StatusCode) + } + // Decode the response JSON into a new stats struct + var stats []fireboardStats + if err := json.NewDecoder(resp.Body).Decode(&stats); err != nil { + return fmt.Errorf("unable to decode fireboard response: %s", err) + } + // Range over all devices, gathering stats. Returns early in case of any error. + for _, s := range stats { + r.gatherTemps(s, acc) + } + return nil +} + +// Return text description of degree type (scale) +func scale(n int) string { + switch n { + case 1: + return "Celcius" + case 2: + return "Fahrenheit" + default: + return "" + } +} + +// Gathers stats from a single device, adding them to the accumulator +func (r *Fireboard) gatherTemps(s fireboardStats, acc telegraf.Accumulator) { + // Construct lookup for scale values + + for _, t := range s.Latesttemps { + tags := map[string]string{ + "title": s.Title, + "uuid": s.UUID, + "channel": strconv.FormatInt(t.Channel, 10), + "scale": scale(t.Degreetype), + } + fields := map[string]interface{}{ + "temperature": t.Temp, + } + acc.AddFields("fireboard", fields, tags) + } +} + +func init() { + inputs.Add("fireboard", func() telegraf.Input { + return NewFireboard() + }) +} diff --git a/plugins/inputs/fireboard/fireboard_test.go b/plugins/inputs/fireboard/fireboard_test.go new file mode 100644 index 000000000..a5e93a453 --- /dev/null +++ b/plugins/inputs/fireboard/fireboard_test.go @@ -0,0 +1,74 @@ +package fireboard + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestFireboard(t *testing.T) { + // Create a test server with the const response JSON + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, response) + })) + defer ts.Close() + + // Parse the URL of the test server, used to verify the expected host + u, err := url.Parse(ts.URL) + require.NoError(t, err) + + // Create a new fb instance with our given test server + fireboard := NewFireboard() + fireboard.AuthToken = "b4bb6e6a7b6231acb9f71b304edb2274693d8849" + fireboard.URL = u.String() + + // Create a test accumulator + acc := &testutil.Accumulator{} + + // Gather data from the test server + err = fireboard.Gather(acc) + require.NoError(t, err) + + // Expect the correct values for all known keys + expectFields := map[string]interface{}{ + "temperature": float64(79.9), + } + // Expect the correct values for all tags + expectTags := map[string]string{ + "title": "telegraf-FireBoard", + "uuid": "b55e766c-b308-49b5-93a4-df89fe31efd0", + "channel": strconv.FormatInt(1, 10), + "scale": "Fahrenheit", + } + + acc.AssertContainsTaggedFields(t, "fireboard", expectFields, expectTags) +} + +var response = ` +[{ + "id": 99999, + "title": "telegraf-FireBoard", + "created": "2019-03-23T16:48:32.152010Z", + "uuid": "b55e766c-b308-49b5-93a4-df89fe31efd0", + "hardware_id": "XXXXXXXXX", + "latest_temps": [ + { + "temp": 79.9, + "channel": 1, + "degreetype": 2, + "created": "2019-06-25T06:07:10Z" + } + ], + "last_templog": "2019-06-25T06:06:40Z", + "model": "FBX11E", + "channel_count": 6, + "degreetype": 2 + }] +` diff --git a/plugins/inputs/fluentd/README.md b/plugins/inputs/fluentd/README.md index e46428417..3fabbddb7 100644 --- a/plugins/inputs/fluentd/README.md +++ b/plugins/inputs/fluentd/README.md @@ -1,12 +1,12 @@ # Fluentd Input Plugin -The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor plugin](http://docs.fluentd.org/v0.12/articles/monitoring). +The fluentd plugin gathers metrics from plugin endpoint provided by [in_monitor plugin](https://docs.fluentd.org/input/monitor_agent). This plugin understands data provided by /api/plugin.json resource (/api/config.json is not covered). -You might need to adjust your fluentd configuration, in order to reduce series cardinality in case whene your fluentd restarts frequently. Every time when fluentd starts, `plugin_id` value is given a new random value. -According to [fluentd documentation](http://docs.fluentd.org/v0.12/articles/config-file), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`. +You might need to adjust your fluentd configuration, in order to reduce series cardinality in case your fluentd restarts frequently. Every time fluentd starts, `plugin_id` value is given a new random value. +According to [fluentd documentation](https://docs.fluentd.org/configuration/config-file#common-plugin-parameter), you are able to add `@id` parameter for each plugin to avoid this behaviour and define custom `plugin_id`. -example configuratio with `@id` parameter for http plugin: +example configuration with `@id` parameter for http plugin: ``` @type http @@ -36,7 +36,7 @@ example configuratio with `@id` parameter for http plugin: ### Measurements & Fields: -Fields may vary depends on type of the plugin +Fields may vary depending on the plugin type - fluentd - retry_count (float, unit) diff --git a/plugins/inputs/fluentd/fluentd.go b/plugins/inputs/fluentd/fluentd.go index c99960740..7d4a0cd5e 100644 --- a/plugins/inputs/fluentd/fluentd.go +++ b/plugins/inputs/fluentd/fluentd.go @@ -53,7 +53,7 @@ type pluginData struct { // parse JSON from fluentd Endpoint // Parameters: -// data: unprocessed json recivied from endpoint +// data: unprocessed json received from endpoint // // Returns: // pluginData: slice that contains parsed plugins @@ -76,7 +76,7 @@ func parse(data []byte) (datapointArray []pluginData, err error) { // Description - display description func (h *Fluentd) Description() string { return description } -// SampleConfig - generate configuretion +// SampleConfig - generate configuration func (h *Fluentd) SampleConfig() string { return sampleConfig } // Gather - Main code responsible for gathering, processing and creating metrics diff --git a/plugins/inputs/github/README.md b/plugins/inputs/github/README.md new file mode 100644 index 000000000..46127082e --- /dev/null +++ b/plugins/inputs/github/README.md @@ -0,0 +1,64 @@ +# GitHub Input Plugin + +Gather repository information from [GitHub][] hosted repositories. + +**Note:** Telegraf also contains the [webhook][] input which can be used as an +alternative method for collecting repository information. + +### Configuration + +```toml +[[inputs.github]] + ## List of repositories to monitor + repositories = [ + "influxdata/telegraf", + "influxdata/influxdb" + ] + + ## Github API access token. Unauthenticated requests are limited to 60 per hour. + # access_token = "" + + ## Github API enterprise url. Github Enterprise accounts must specify their base url. + # enterprise_base_url = "" + + ## Timeout for HTTP requests. + # http_timeout = "5s" +``` + +### Metrics + +- github_repository + - tags: + - name - The repository name + - owner - The owner of the repository + - language - The primary language of the repository + - license - The license set for the repository + - fields: + - forks (int) + - open_issues (int) + - networks (int) + - size (int) + - subscribers (int) + - stars (int) + - watchers (int) + +When the [internal][] input is enabled: + ++ internal_github + - tags: + - access_token - An obfuscated reference to the configured access token or "Unauthenticated" + - fields: + - limit - How many requests you are limited to (per hour) + - remaining - How many requests you have remaining (per hour) + - blocks - How many requests have been blocked due to rate limit + +### Example Output + +``` +github_repository,language=Go,license=MIT\ License,name=telegraf,owner=influxdata forks=2679i,networks=2679i,open_issues=794i,size=23263i,stars=7091i,subscribers=316i,watchers=7091i 1563901372000000000 +internal_github,access_token=Unauthenticated rate_limit_remaining=59i,rate_limit_limit=60i,rate_limit_blocks=0i 1552653551000000000 +``` + +[GitHub]: https://www.github.com +[internal]: /plugins/inputs/internal +[webhook]: /plugins/inputs/webhooks/github diff --git a/plugins/inputs/github/github.go b/plugins/inputs/github/github.go new file mode 100644 index 000000000..3e5597707 --- /dev/null +++ b/plugins/inputs/github/github.go @@ -0,0 +1,200 @@ +package github + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" + "time" + + "github.com/google/go-github/github" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/selfstat" + "golang.org/x/oauth2" +) + +// GitHub - plugin main structure +type GitHub struct { + Repositories []string `toml:"repositories"` + AccessToken string `toml:"access_token"` + EnterpriseBaseURL string `toml:"enterprise_base_url"` + HTTPTimeout internal.Duration `toml:"http_timeout"` + githubClient *github.Client + + obfuscatedToken string + + RateLimit selfstat.Stat + RateLimitErrors selfstat.Stat + RateRemaining selfstat.Stat +} + +const sampleConfig = ` + ## List of repositories to monitor. + repositories = [ + "influxdata/telegraf", + "influxdata/influxdb" + ] + + ## Github API access token. Unauthenticated requests are limited to 60 per hour. + # access_token = "" + + ## Github API enterprise url. Github Enterprise accounts must specify their base url. + # enterprise_base_url = "" + + ## Timeout for HTTP requests. + # http_timeout = "5s" +` + +// SampleConfig returns sample configuration for this plugin. +func (g *GitHub) SampleConfig() string { + return sampleConfig +} + +// Description returns the plugin description. +func (g *GitHub) Description() string { + return "Gather repository information from GitHub hosted repositories." +} + +// Create GitHub Client +func (g *GitHub) createGitHubClient(ctx context.Context) (*github.Client, error) { + httpClient := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + Timeout: g.HTTPTimeout.Duration, + } + + g.obfuscatedToken = "Unauthenticated" + + if g.AccessToken != "" { + tokenSource := oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: g.AccessToken}, + ) + oauthClient := oauth2.NewClient(ctx, tokenSource) + ctx = context.WithValue(ctx, oauth2.HTTPClient, oauthClient) + + g.obfuscatedToken = g.AccessToken[0:4] + "..." + g.AccessToken[len(g.AccessToken)-3:] + + return g.newGithubClient(oauthClient) + } + + return g.newGithubClient(httpClient) +} + +func (g *GitHub) newGithubClient(httpClient *http.Client) (*github.Client, error) { + if g.EnterpriseBaseURL != "" { + return github.NewEnterpriseClient(g.EnterpriseBaseURL, "", httpClient) + } + return github.NewClient(httpClient), nil +} + +// Gather GitHub Metrics +func (g *GitHub) Gather(acc telegraf.Accumulator) error { + ctx := context.Background() + + if g.githubClient == nil { + githubClient, err := g.createGitHubClient(ctx) + + if err != nil { + return err + } + + g.githubClient = githubClient + + tokenTags := map[string]string{ + "access_token": g.obfuscatedToken, + } + + g.RateLimitErrors = selfstat.Register("github", "rate_limit_blocks", tokenTags) + g.RateLimit = selfstat.Register("github", "rate_limit_limit", tokenTags) + g.RateRemaining = selfstat.Register("github", "rate_limit_remaining", tokenTags) + } + + var wg sync.WaitGroup + wg.Add(len(g.Repositories)) + + for _, repository := range g.Repositories { + go func(repositoryName string, acc telegraf.Accumulator) { + defer wg.Done() + + owner, repository, err := splitRepositoryName(repositoryName) + if err != nil { + acc.AddError(err) + return + } + + repositoryInfo, response, err := g.githubClient.Repositories.Get(ctx, owner, repository) + + if _, ok := err.(*github.RateLimitError); ok { + g.RateLimitErrors.Incr(1) + } + + if err != nil { + acc.AddError(err) + return + } + + g.RateLimit.Set(int64(response.Rate.Limit)) + g.RateRemaining.Set(int64(response.Rate.Remaining)) + + now := time.Now() + tags := getTags(repositoryInfo) + fields := getFields(repositoryInfo) + + acc.AddFields("github_repository", fields, tags, now) + }(repository, acc) + } + + wg.Wait() + return nil +} + +func splitRepositoryName(repositoryName string) (string, string, error) { + splits := strings.SplitN(repositoryName, "/", 2) + + if len(splits) != 2 { + return "", "", fmt.Errorf("%v is not of format 'owner/repository'", repositoryName) + } + + return splits[0], splits[1], nil +} + +func getLicense(rI *github.Repository) string { + if licenseName := rI.GetLicense().GetName(); licenseName != "" { + return licenseName + } + + return "None" +} + +func getTags(repositoryInfo *github.Repository) map[string]string { + return map[string]string{ + "owner": repositoryInfo.GetOwner().GetLogin(), + "name": repositoryInfo.GetName(), + "language": repositoryInfo.GetLanguage(), + "license": getLicense(repositoryInfo), + } +} + +func getFields(repositoryInfo *github.Repository) map[string]interface{} { + return map[string]interface{}{ + "stars": repositoryInfo.GetStargazersCount(), + "subscribers": repositoryInfo.GetSubscribersCount(), + "watchers": repositoryInfo.GetWatchersCount(), + "networks": repositoryInfo.GetNetworkCount(), + "forks": repositoryInfo.GetForksCount(), + "open_issues": repositoryInfo.GetOpenIssuesCount(), + "size": repositoryInfo.GetSize(), + } +} + +func init() { + inputs.Add("github", func() telegraf.Input { + return &GitHub{ + HTTPTimeout: internal.Duration{Duration: time.Second * 5}, + } + }) +} diff --git a/plugins/inputs/github/github_test.go b/plugins/inputs/github/github_test.go new file mode 100644 index 000000000..3c346b7f8 --- /dev/null +++ b/plugins/inputs/github/github_test.go @@ -0,0 +1,140 @@ +package github + +import ( + "net/http" + "reflect" + "testing" + + gh "github.com/google/go-github/github" + "github.com/stretchr/testify/require" +) + +func TestNewGithubClient(t *testing.T) { + httpClient := &http.Client{} + g := &GitHub{} + client, err := g.newGithubClient(httpClient) + require.Nil(t, err) + require.Contains(t, client.BaseURL.String(), "api.github.com") + g.EnterpriseBaseURL = "api.example.com/" + enterpriseClient, err := g.newGithubClient(httpClient) + require.Nil(t, err) + require.Contains(t, enterpriseClient.BaseURL.String(), "api.example.com") +} + +func TestSplitRepositoryNameWithWorkingExample(t *testing.T) { + var validRepositoryNames = []struct { + fullName string + owner string + repository string + }{ + {"influxdata/telegraf", "influxdata", "telegraf"}, + {"influxdata/influxdb", "influxdata", "influxdb"}, + {"rawkode/saltstack-dotfiles", "rawkode", "saltstack-dotfiles"}, + } + + for _, tt := range validRepositoryNames { + t.Run(tt.fullName, func(t *testing.T) { + owner, repository, _ := splitRepositoryName(tt.fullName) + + require.Equal(t, tt.owner, owner) + require.Equal(t, tt.repository, repository) + }) + } +} + +func TestSplitRepositoryNameWithNoSlash(t *testing.T) { + var invalidRepositoryNames = []string{ + "influxdata-influxdb", + } + + for _, tt := range invalidRepositoryNames { + t.Run(tt, func(t *testing.T) { + _, _, err := splitRepositoryName(tt) + + require.NotNil(t, err) + }) + } +} + +func TestGetLicenseWhenExists(t *testing.T) { + licenseName := "MIT" + license := gh.License{Name: &licenseName} + repository := gh.Repository{License: &license} + + getLicenseReturn := getLicense(&repository) + + require.Equal(t, "MIT", getLicenseReturn) +} + +func TestGetLicenseWhenMissing(t *testing.T) { + repository := gh.Repository{} + + getLicenseReturn := getLicense(&repository) + + require.Equal(t, "None", getLicenseReturn) +} + +func TestGetTags(t *testing.T) { + licenseName := "MIT" + license := gh.License{Name: &licenseName} + + ownerName := "influxdata" + owner := gh.User{Login: &ownerName} + + fullName := "influxdata/influxdb" + repositoryName := "influxdb" + + language := "Go" + + repository := gh.Repository{ + FullName: &fullName, + Name: &repositoryName, + License: &license, + Owner: &owner, + Language: &language, + } + + getTagsReturn := getTags(&repository) + + correctTagsReturn := map[string]string{ + "owner": ownerName, + "name": repositoryName, + "language": language, + "license": licenseName, + } + + require.Equal(t, true, reflect.DeepEqual(getTagsReturn, correctTagsReturn)) +} + +func TestGetFields(t *testing.T) { + stars := 1 + forks := 2 + openIssues := 3 + size := 4 + subscribers := 5 + watchers := 6 + + repository := gh.Repository{ + StargazersCount: &stars, + ForksCount: &forks, + OpenIssuesCount: &openIssues, + Size: &size, + NetworkCount: &forks, + SubscribersCount: &subscribers, + WatchersCount: &watchers, + } + + getFieldsReturn := getFields(&repository) + + correctFieldReturn := make(map[string]interface{}) + + correctFieldReturn["stars"] = 1 + correctFieldReturn["forks"] = 2 + correctFieldReturn["networks"] = 2 + correctFieldReturn["open_issues"] = 3 + correctFieldReturn["size"] = 4 + correctFieldReturn["subscribers"] = 5 + correctFieldReturn["watchers"] = 6 + + require.Equal(t, true, reflect.DeepEqual(getFieldsReturn, correctFieldReturn)) +} diff --git a/plugins/inputs/graylog/README.md b/plugins/inputs/graylog/README.md index 6ab4a70c4..acb191f8b 100644 --- a/plugins/inputs/graylog/README.md +++ b/plugins/inputs/graylog/README.md @@ -7,7 +7,7 @@ Plugin currently support two type of end points:- - multiple (Ex http://[graylog-server-ip]:12900/system/metrics/multiple) - namespace (Ex http://[graylog-server-ip]:12900/system/metrics/namespace/{namespace}) -End Point can be a mixe of one multiple end point and several namespaces end points +End Point can be a mix of one multiple end point and several namespaces end points Note: if namespace end point specified metrics array will be ignored for that call. @@ -33,7 +33,7 @@ Note: if namespace end point specified metrics array will be ignored for that ca ## Metrics list ## List of metrics can be found on Graylog webservice documentation. - ## Or by hitting the the web service api at: + ## Or by hitting the web service api at: ## http://[graylog-host]:12900/system/metrics metrics = [ "jvm.cl.loaded", diff --git a/plugins/inputs/graylog/graylog.go b/plugins/inputs/graylog/graylog.go index 8e580480d..4309c6481 100644 --- a/plugins/inputs/graylog/graylog.go +++ b/plugins/inputs/graylog/graylog.go @@ -47,7 +47,7 @@ type HTTPClient interface { // req: HTTP request object // // Returns: - // http.Response: HTTP respons object + // http.Response: HTTP response object // error : Any error that may have occurred MakeRequest(req *http.Request) (*http.Response, error) @@ -235,6 +235,9 @@ func (h *GrayLog) sendRequest(serverURL string) (string, float64, error) { if err != nil { return "", -1, fmt.Errorf("Invalid server URL \"%s\"", serverURL) } + // Add X-Requested-By header + headers["X-Requested-By"] = "Telegraf" + if strings.Contains(requestURL.String(), "multiple") { m := &Messagebody{Metrics: h.Metrics} http_body, err := json.Marshal(m) diff --git a/plugins/inputs/graylog/graylog_test.go b/plugins/inputs/graylog/graylog_test.go index a5088cf7d..f8008f1d9 100644 --- a/plugins/inputs/graylog/graylog_test.go +++ b/plugins/inputs/graylog/graylog_test.go @@ -135,7 +135,7 @@ func (c *mockHTTPClient) HTTPClient() *http.Client { // *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client func genMockGrayLog(response string, statusCode int) []*GrayLog { return []*GrayLog{ - &GrayLog{ + { client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ "http://localhost:12900/system/metrics/multiple", diff --git a/plugins/inputs/haproxy/README.md b/plugins/inputs/haproxy/README.md index 35b59524d..86fbb986b 100644 --- a/plugins/inputs/haproxy/README.md +++ b/plugins/inputs/haproxy/README.md @@ -15,6 +15,10 @@ or [HTTP statistics page](https://cbonte.github.io/haproxy-dconv/1.9/management. ## Make sure you specify the complete path to the stats endpoint ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats servers = ["http://myhaproxy.com:1936/haproxy?stats"] diff --git a/plugins/inputs/haproxy/haproxy.go b/plugins/inputs/haproxy/haproxy.go index 19087a978..7179540d7 100644 --- a/plugins/inputs/haproxy/haproxy.go +++ b/plugins/inputs/haproxy/haproxy.go @@ -23,6 +23,8 @@ import ( type haproxy struct { Servers []string KeepFieldNames bool + Username string + Password string tls.ClientConfig client *http.Client @@ -37,6 +39,10 @@ var sampleConfig = ` ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats servers = ["http://myhaproxy.com:1936/haproxy?stats"] + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + ## You can also use local socket with standard wildcard globbing. ## Server address not starting with 'http' will be treated as a possible ## socket, so both examples below are valid. @@ -163,12 +169,19 @@ func (g *haproxy) gatherServer(addr string, acc telegraf.Accumulator) error { if u.User != nil { p, _ := u.User.Password() req.SetBasicAuth(u.User.Username(), p) + u.User = &url.Userinfo{} + addr = u.String() + } + + if g.Username != "" || g.Password != "" { + req.SetBasicAuth(g.Username, g.Password) } res, err := g.client.Do(req) if err != nil { return fmt.Errorf("Unable to connect to haproxy server '%s': %s", addr, err) } + defer res.Body.Close() if res.StatusCode != 200 { return fmt.Errorf("Unable to get valid stat result from '%s', http response code : %d", addr, res.StatusCode) diff --git a/plugins/inputs/haproxy/haproxy_test.go b/plugins/inputs/haproxy/haproxy_test.go index 27a197304..e05031f19 100644 --- a/plugins/inputs/haproxy/haproxy_test.go +++ b/plugins/inputs/haproxy/haproxy_test.go @@ -248,30 +248,30 @@ func HaproxyGetFieldValues() map[string]interface{} { "http_response.4xx": uint64(140), "http_response.5xx": uint64(0), "http_response.other": uint64(0), - "iid": uint64(4), - "last_chk": "OK", - "lastchg": uint64(1036557), - "lastsess": int64(1342), - "lbtot": uint64(9481), - "mode": "http", - "pid": uint64(1), - "qcur": uint64(0), - "qmax": uint64(0), - "qtime": uint64(1268), - "rate": uint64(0), - "rate_max": uint64(2), - "rtime": uint64(2908), - "sid": uint64(1), - "scur": uint64(0), - "slim": uint64(2), - "smax": uint64(2), - "srv_abort": uint64(0), - "status": "UP", - "stot": uint64(14539), - "ttime": uint64(4500), - "weight": uint64(1), - "wredis": uint64(0), - "wretr": uint64(0), + "iid": uint64(4), + "last_chk": "OK", + "lastchg": uint64(1036557), + "lastsess": int64(1342), + "lbtot": uint64(9481), + "mode": "http", + "pid": uint64(1), + "qcur": uint64(0), + "qmax": uint64(0), + "qtime": uint64(1268), + "rate": uint64(0), + "rate_max": uint64(2), + "rtime": uint64(2908), + "sid": uint64(1), + "scur": uint64(0), + "slim": uint64(2), + "smax": uint64(2), + "srv_abort": uint64(0), + "status": "UP", + "stot": uint64(14539), + "ttime": uint64(4500), + "weight": uint64(1), + "wredis": uint64(0), + "wretr": uint64(0), } return fields } diff --git a/plugins/inputs/hddtemp/README.md b/plugins/inputs/hddtemp/README.md index 3bafb4f21..d2d3e4f13 100644 --- a/plugins/inputs/hddtemp/README.md +++ b/plugins/inputs/hddtemp/README.md @@ -1,43 +1,41 @@ -# Hddtemp Input Plugin +# HDDtemp Input Plugin -This plugin reads data from hddtemp daemon +This plugin reads data from hddtemp daemon. -## Requirements +Hddtemp should be installed and its daemon running. -Hddtemp should be installed and its daemon running - -## Configuration +### Configuration ```toml [[inputs.hddtemp]] -## By default, telegraf gathers temps data from all disks detected by the -## hddtemp. -## -## Only collect temps from the selected disks. -## -## A * as the device name will return the temperature values of all disks. -## -# address = "127.0.0.1:7634" -# devices = ["sda", "*"] + ## By default, telegraf gathers temps data from all disks detected by the + ## hddtemp. + ## + ## Only collect temps from the selected disks. + ## + ## A * as the device name will return the temperature values of all disks. + ## + # address = "127.0.0.1:7634" + # devices = ["sda", "*"] ``` -## Measurements +### Metrics - hddtemp - - temperature - -Tags: -- device -- model -- unit -- status + - tags: + - device + - model + - unit + - status + - source + - fields: + - temperature - -## Example output +### Example output ``` -> hddtemp,unit=C,status=,host=server1,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000 -> hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=38i 148165564700000000 -> hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,status=,host=server1 temperature=36i 1481655647000000000 +hddtemp,source=server1,unit=C,status=,device=sdb,model=WDC\ WD740GD-00FLA1 temperature=43i 1481655647000000000 +hddtemp,device=sdc,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=38i 148165564700000000 +hddtemp,device=sdd,model=SAMSUNG\ HD103UI,unit=C,source=server1,status= temperature=36i 1481655647000000000 ``` diff --git a/plugins/inputs/hddtemp/hddtemp.go b/plugins/inputs/hddtemp/hddtemp.go index dd4622df4..0f084ac21 100644 --- a/plugins/inputs/hddtemp/hddtemp.go +++ b/plugins/inputs/hddtemp/hddtemp.go @@ -1,6 +1,8 @@ package hddtemp import ( + "net" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" gohddtemp "github.com/influxdata/telegraf/plugins/inputs/hddtemp/go-hddtemp" @@ -42,8 +44,12 @@ func (h *HDDTemp) Gather(acc telegraf.Accumulator) error { if h.fetcher == nil { h.fetcher = gohddtemp.New() } - disks, err := h.fetcher.Fetch(h.Address) + source, _, err := net.SplitHostPort(h.Address) + if err != nil { + source = h.Address + } + disks, err := h.fetcher.Fetch(h.Address) if err != nil { return err } @@ -56,6 +62,7 @@ func (h *HDDTemp) Gather(acc telegraf.Accumulator) error { "model": disk.Model, "unit": disk.Unit, "status": disk.Status, + "source": source, } fields := map[string]interface{}{ diff --git a/plugins/inputs/hddtemp/hddtemp_test.go b/plugins/inputs/hddtemp/hddtemp_test.go index 37dfef7d6..f299c2ac6 100644 --- a/plugins/inputs/hddtemp/hddtemp_test.go +++ b/plugins/inputs/hddtemp/hddtemp_test.go @@ -14,13 +14,13 @@ type mockFetcher struct { func (h *mockFetcher) Fetch(address string) ([]hddtemp.Disk, error) { return []hddtemp.Disk{ - hddtemp.Disk{ + { DeviceName: "Disk1", Model: "Model1", Temperature: 13, Unit: "C", }, - hddtemp.Disk{ + { DeviceName: "Disk2", Model: "Model2", Temperature: 14, @@ -36,6 +36,7 @@ func newMockFetcher() *mockFetcher { func TestFetch(t *testing.T) { hddtemp := &HDDTemp{ fetcher: newMockFetcher(), + Address: "localhost", Devices: []string{"*"}, } @@ -58,6 +59,7 @@ func TestFetch(t *testing.T) { "model": "Model1", "unit": "C", "status": "", + "source": "localhost", }, }, { @@ -69,6 +71,7 @@ func TestFetch(t *testing.T) { "model": "Model2", "unit": "C", "status": "", + "source": "localhost", }, }, } diff --git a/plugins/inputs/http/README.md b/plugins/inputs/http/README.md index 25d3d2b2d..59abd8256 100644 --- a/plugins/inputs/http/README.md +++ b/plugins/inputs/http/README.md @@ -19,6 +19,17 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ## Optional HTTP headers # headers = {"X-Special-Header" = "Special-Value"} + ## HTTP entity-body to send with POST/PUT requests. + # body = "" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + + ## Optional file with Bearer token + ## file content is added as an Authorization header + # bearer_token = "/path/to/file" + ## Optional HTTP Basic Auth Credentials # username = "username" # password = "pa$$word" @@ -33,6 +44,9 @@ The HTTP input plugin collects metrics from one or more HTTP(S) endpoints. The ## Amount of time allowed to complete the HTTP request # timeout = "5s" + ## List of success status codes + # success_status_codes = [200] + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/inputs/http/http.go b/plugins/inputs/http/http.go index c9c3460be..8290a6f66 100644 --- a/plugins/inputs/http/http.go +++ b/plugins/inputs/http/http.go @@ -1,8 +1,8 @@ package http import ( - "errors" "fmt" + "io" "io/ioutil" "net/http" "strings" @@ -17,17 +17,24 @@ import ( ) type HTTP struct { - URLs []string `toml:"urls"` - Method string + URLs []string `toml:"urls"` + Method string `toml:"method"` + Body string `toml:"body"` + ContentEncoding string `toml:"content_encoding"` - Headers map[string]string + Headers map[string]string `toml:"headers"` // HTTP Basic Auth Credentials - Username string - Password string + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig - Timeout internal.Duration + // Absolute path to file with Bearer token + BearerToken string `toml:"bearer_token"` + + SuccessStatusCodes []int `toml:"success_status_codes"` + + Timeout internal.Duration `toml:"timeout"` client *http.Client @@ -48,12 +55,20 @@ var sampleConfig = ` ## Optional HTTP headers # headers = {"X-Special-Header" = "Special-Value"} + ## Optional file with Bearer token + ## file content is added as an Authorization header + # bearer_token = "/path/to/file" + ## Optional HTTP Basic Auth Credentials # username = "username" # password = "pa$$word" - ## Tag all metrics with the url - # tag_url = true + ## HTTP entity-body to send with POST/PUT requests. + # body = "" + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -65,6 +80,9 @@ var sampleConfig = ` ## Amount of time allowed to complete the HTTP request # timeout = "5s" + ## List of success status codes + # success_status_codes = [200] + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -82,27 +100,30 @@ func (*HTTP) Description() string { return "Read formatted metrics from one or more HTTP endpoints" } +func (h *HTTP) Init() error { + tlsCfg, err := h.ClientConfig.TLSConfig() + if err != nil { + return err + } + + h.client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: h.Timeout.Duration, + } + + // Set default as [200] + if len(h.SuccessStatusCodes) == 0 { + h.SuccessStatusCodes = []int{200} + } + return nil +} + // Gather takes in an accumulator and adds the metrics that the Input // gathers. This is called every "interval" func (h *HTTP) Gather(acc telegraf.Accumulator) error { - if h.parser == nil { - return errors.New("Parser is not set") - } - - if h.client == nil { - tlsCfg, err := h.ClientConfig.TLSConfig() - if err != nil { - return err - } - h.client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - Proxy: http.ProxyFromEnvironment, - }, - Timeout: h.Timeout.Duration, - } - } - var wg sync.WaitGroup for _, u := range h.URLs { wg.Add(1) @@ -135,10 +156,29 @@ func (h *HTTP) gatherURL( acc telegraf.Accumulator, url string, ) error { - request, err := http.NewRequest(h.Method, url, nil) + body, err := makeRequestBodyReader(h.ContentEncoding, h.Body) if err != nil { return err } + defer body.Close() + + request, err := http.NewRequest(h.Method, url, body) + if err != nil { + return err + } + + if h.BearerToken != "" { + token, err := ioutil.ReadFile(h.BearerToken) + if err != nil { + return err + } + bearer := "Bearer " + strings.Trim(string(token), "\n") + request.Header.Set("Authorization", bearer) + } + + if h.ContentEncoding == "gzip" { + request.Header.Set("Content-Encoding", "gzip") + } for k, v := range h.Headers { if strings.ToLower(k) == "host" { @@ -158,12 +198,19 @@ func (h *HTTP) gatherURL( } defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("Received status code %d (%s), expected %d (%s)", + responseHasSuccessCode := false + for _, statusCode := range h.SuccessStatusCodes { + if resp.StatusCode == statusCode { + responseHasSuccessCode = true + break + } + } + + if !responseHasSuccessCode { + return fmt.Errorf("received status code %d (%s), expected any value out of %v", resp.StatusCode, http.StatusText(resp.StatusCode), - http.StatusOK, - http.StatusText(http.StatusOK)) + h.SuccessStatusCodes) } b, err := ioutil.ReadAll(resp.Body) @@ -186,6 +233,18 @@ func (h *HTTP) gatherURL( return nil } +func makeRequestBodyReader(contentEncoding, body string) (io.ReadCloser, error) { + var reader io.Reader = strings.NewReader(body) + if contentEncoding == "gzip" { + rc, err := internal.CompressWithGzip(reader) + if err != nil { + return nil, err + } + return rc, nil + } + return ioutil.NopCloser(reader), nil +} + func init() { inputs.Add("http", func() telegraf.Input { return &HTTP{ diff --git a/plugins/inputs/http/http_test.go b/plugins/inputs/http/http_test.go index 486edabc9..993eda732 100644 --- a/plugins/inputs/http/http_test.go +++ b/plugins/inputs/http/http_test.go @@ -1,6 +1,9 @@ package http_test import ( + "compress/gzip" + "fmt" + "io/ioutil" "net/http" "net/http/httptest" "testing" @@ -26,10 +29,15 @@ func TestHTTPwithJSONFormat(t *testing.T) { URLs: []string{url}, } metricName := "metricName" - p, _ := parsers.NewJSONParser(metricName, nil, nil) + + p, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) plugin.SetParser(p) var acc testutil.Accumulator + plugin.Init() require.NoError(t, acc.GatherError(plugin.Gather)) require.Len(t, acc.Metrics, 1) @@ -63,11 +71,15 @@ func TestHTTPHeaders(t *testing.T) { URLs: []string{url}, Headers: map[string]string{header: headerValue}, } - metricName := "metricName" - p, _ := parsers.NewJSONParser(metricName, nil, nil) + + p, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) plugin.SetParser(p) var acc testutil.Accumulator + plugin.Init() require.NoError(t, acc.GatherError(plugin.Gather)) } @@ -83,13 +95,41 @@ func TestInvalidStatusCode(t *testing.T) { } metricName := "metricName" - p, _ := parsers.NewJSONParser(metricName, nil, nil) + p, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: metricName, + }) plugin.SetParser(p) var acc testutil.Accumulator + plugin.Init() require.Error(t, acc.GatherError(plugin.Gather)) } +func TestSuccessStatusCodes(t *testing.T) { + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusAccepted) + })) + defer fakeServer.Close() + + url := fakeServer.URL + "/endpoint" + plugin := &plugin.HTTP{ + URLs: []string{url}, + SuccessStatusCodes: []int{200, 202}, + } + + metricName := "metricName" + p, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: metricName, + }) + plugin.SetParser(p) + + var acc testutil.Accumulator + plugin.Init() + require.NoError(t, acc.GatherError(plugin.Gather)) +} + func TestMethod(t *testing.T) { fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.Method == "POST" { @@ -105,35 +145,110 @@ func TestMethod(t *testing.T) { Method: "POST", } - metricName := "metricName" - p, _ := parsers.NewJSONParser(metricName, nil, nil) + p, _ := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "metricName", + }) plugin.SetParser(p) var acc testutil.Accumulator + plugin.Init() require.NoError(t, acc.GatherError(plugin.Gather)) } -func TestParserNotSet(t *testing.T) { - fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/endpoint" { - _, _ = w.Write([]byte(simpleJSON)) - } else { - w.WriteHeader(http.StatusNotFound) - } - })) - defer fakeServer.Close() - - url := fakeServer.URL + "/endpoint" - plugin := &plugin.HTTP{ - URLs: []string{url}, - } - - var acc testutil.Accumulator - require.Error(t, acc.GatherError(plugin.Gather)) -} - const simpleJSON = ` { "a": 1.2 } ` + +func TestBodyAndContentEncoding(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + url := fmt.Sprintf("http://%s", ts.Listener.Addr().String()) + + tests := []struct { + name string + plugin *plugin.HTTP + queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + }{ + { + name: "no body", + plugin: &plugin.HTTP{ + Method: "POST", + URLs: []string{url}, + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Equal(t, []byte(""), body) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "post body", + plugin: &plugin.HTTP{ + URLs: []string{url}, + Method: "POST", + Body: "test", + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Equal(t, []byte("test"), body) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "get method body is sent", + plugin: &plugin.HTTP{ + URLs: []string{url}, + Method: "GET", + Body: "test", + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Equal(t, []byte("test"), body) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "gzip encoding", + plugin: &plugin.HTTP{ + URLs: []string{url}, + Method: "GET", + Body: "test", + ContentEncoding: "gzip", + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.Header.Get("Content-Encoding"), "gzip") + + gr, err := gzip.NewReader(r.Body) + require.NoError(t, err) + body, err := ioutil.ReadAll(gr) + require.NoError(t, err) + require.Equal(t, []byte("test"), body) + w.WriteHeader(http.StatusOK) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tt.queryHandlerFunc(t, w, r) + }) + + parser, err := parsers.NewParser(&parsers.Config{DataFormat: "influx"}) + require.NoError(t, err) + + tt.plugin.SetParser(parser) + + var acc testutil.Accumulator + tt.plugin.Init() + err = tt.plugin.Gather(&acc) + require.NoError(t, err) + }) + } +} diff --git a/plugins/inputs/http_listener/README.md b/plugins/inputs/http_listener/README.md deleted file mode 100644 index f1ff71f0a..000000000 --- a/plugins/inputs/http_listener/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# HTTP listener service input plugin - -The HTTP listener is a service input plugin that listens for messages sent via HTTP POST. -The plugin expects messages in the InfluxDB line-protocol ONLY, other Telegraf input data formats are not supported. -The intent of the plugin is to allow Telegraf to serve as a proxy/router for the `/write` endpoint of the InfluxDB HTTP API. - -The `/write` endpoint supports the `precision` query parameter and can be set to one of `ns`, `u`, `ms`, `s`, `m`, `h`. All other parameters are ignored and defer to the output plugins configuration. - -When chaining Telegraf instances using this plugin, CREATE DATABASE requests receive a 200 OK response with message body `{"results":[]}` but they are not relayed. The output configuration of the Telegraf instance which ultimately submits data to InfluxDB determines the destination database. - -Enable TLS by specifying the file names of a service TLS certificate and key. - -Enable mutually authenticated TLS and authorize client connections by signing certificate authority by including a list of allowed CA certificate file names in ````tls_allowed_cacerts````. - -Enable basic HTTP authentication of clients by specifying a username and password to check for. These credentials will be received from the client _as plain text_ if TLS is not configured. - -See: [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#influx). - -**Example:** -``` -curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' -``` - -### Configuration: - -This is a sample configuration for the plugin. - -```toml -# # Influx HTTP write listener -[[inputs.http_listener]] - ## Address and port to host HTTP listener on - service_address = ":8186" - - ## timeouts - read_timeout = "10s" - write_timeout = "10s" - - ## HTTPS - tls_cert= "/etc/telegraf/cert.pem" - tls_key = "/etc/telegraf/key.pem" - - ## MTLS - tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Basic authentication - basic_username = "foobar" - basic_password = "barfoo" -``` diff --git a/plugins/inputs/http_listener/bufferpool.go b/plugins/inputs/http_listener/bufferpool.go deleted file mode 100644 index 00a93652d..000000000 --- a/plugins/inputs/http_listener/bufferpool.go +++ /dev/null @@ -1,43 +0,0 @@ -package http_listener - -import ( - "sync/atomic" -) - -type pool struct { - buffers chan []byte - size int - - created int64 -} - -// NewPool returns a new pool object. -// n is the number of buffers -// bufSize is the size (in bytes) of each buffer -func NewPool(n, bufSize int) *pool { - return &pool{ - buffers: make(chan []byte, n), - size: bufSize, - } -} - -func (p *pool) get() []byte { - select { - case b := <-p.buffers: - return b - default: - atomic.AddInt64(&p.created, 1) - return make([]byte, p.size) - } -} - -func (p *pool) put(b []byte) { - select { - case p.buffers <- b: - default: - } -} - -func (p *pool) ncreated() int64 { - return atomic.LoadInt64(&p.created) -} diff --git a/plugins/inputs/http_listener/http_listener.go b/plugins/inputs/http_listener/http_listener.go deleted file mode 100644 index 6415ebc9f..000000000 --- a/plugins/inputs/http_listener/http_listener.go +++ /dev/null @@ -1,418 +0,0 @@ -package http_listener - -import ( - "bytes" - "compress/gzip" - "crypto/subtle" - "crypto/tls" - "io" - "log" - "net" - "net/http" - "sync" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - tlsint "github.com/influxdata/telegraf/internal/tls" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/parsers/influx" - "github.com/influxdata/telegraf/selfstat" -) - -const ( - // DEFAULT_MAX_BODY_SIZE is the default maximum request body size, in bytes. - // if the request body is over this size, we will return an HTTP 413 error. - // 500 MB - DEFAULT_MAX_BODY_SIZE = 500 * 1024 * 1024 - - // MAX_LINE_SIZE is the maximum size, in bytes, that can be allocated for - // a single InfluxDB point. - // 64 KB - DEFAULT_MAX_LINE_SIZE = 64 * 1024 -) - -type TimeFunc func() time.Time - -type HTTPListener struct { - ServiceAddress string - ReadTimeout internal.Duration - WriteTimeout internal.Duration - MaxBodySize int64 - MaxLineSize int - Port int - - tlsint.ServerConfig - - BasicUsername string - BasicPassword string - - TimeFunc - - mu sync.Mutex - wg sync.WaitGroup - - listener net.Listener - - handler *influx.MetricHandler - parser *influx.Parser - acc telegraf.Accumulator - pool *pool - - BytesRecv selfstat.Stat - RequestsServed selfstat.Stat - WritesServed selfstat.Stat - QueriesServed selfstat.Stat - PingsServed selfstat.Stat - RequestsRecv selfstat.Stat - WritesRecv selfstat.Stat - QueriesRecv selfstat.Stat - PingsRecv selfstat.Stat - NotFoundsServed selfstat.Stat - BuffersCreated selfstat.Stat - AuthFailures selfstat.Stat -} - -const sampleConfig = ` - ## Address and port to host HTTP listener on - service_address = ":8186" - - ## maximum duration before timing out read of the request - read_timeout = "10s" - ## maximum duration before timing out write of the response - write_timeout = "10s" - - ## Maximum allowed http request body size in bytes. - ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) - max_body_size = 0 - - ## Maximum line size allowed to be sent in bytes. - ## 0 means to use the default of 65536 bytes (64 kibibytes) - max_line_size = 0 - - ## Set one or more allowed client CA certificate file names to - ## enable mutually authenticated TLS connections - tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - - ## Add service certificate and key - tls_cert = "/etc/telegraf/cert.pem" - tls_key = "/etc/telegraf/key.pem" - - ## Optional username and password to accept for HTTP basic authentication. - ## You probably want to make sure you have TLS configured above for this. - # basic_username = "foobar" - # basic_password = "barfoo" -` - -func (h *HTTPListener) SampleConfig() string { - return sampleConfig -} - -func (h *HTTPListener) Description() string { - return "Influx HTTP write listener" -} - -func (h *HTTPListener) Gather(_ telegraf.Accumulator) error { - h.BuffersCreated.Set(h.pool.ncreated()) - return nil -} - -// Start starts the http listener service. -func (h *HTTPListener) Start(acc telegraf.Accumulator) error { - h.mu.Lock() - defer h.mu.Unlock() - - tags := map[string]string{ - "address": h.ServiceAddress, - } - h.BytesRecv = selfstat.Register("http_listener", "bytes_received", tags) - h.RequestsServed = selfstat.Register("http_listener", "requests_served", tags) - h.WritesServed = selfstat.Register("http_listener", "writes_served", tags) - h.QueriesServed = selfstat.Register("http_listener", "queries_served", tags) - h.PingsServed = selfstat.Register("http_listener", "pings_served", tags) - h.RequestsRecv = selfstat.Register("http_listener", "requests_received", tags) - h.WritesRecv = selfstat.Register("http_listener", "writes_received", tags) - h.QueriesRecv = selfstat.Register("http_listener", "queries_received", tags) - h.PingsRecv = selfstat.Register("http_listener", "pings_received", tags) - h.NotFoundsServed = selfstat.Register("http_listener", "not_founds_served", tags) - h.BuffersCreated = selfstat.Register("http_listener", "buffers_created", tags) - h.AuthFailures = selfstat.Register("http_listener", "auth_failures", tags) - - if h.MaxBodySize == 0 { - h.MaxBodySize = DEFAULT_MAX_BODY_SIZE - } - if h.MaxLineSize == 0 { - h.MaxLineSize = DEFAULT_MAX_LINE_SIZE - } - - if h.ReadTimeout.Duration < time.Second { - h.ReadTimeout.Duration = time.Second * 10 - } - if h.WriteTimeout.Duration < time.Second { - h.WriteTimeout.Duration = time.Second * 10 - } - - h.acc = acc - h.pool = NewPool(200, h.MaxLineSize) - - tlsConf, err := h.ServerConfig.TLSConfig() - if err != nil { - return err - } - - server := &http.Server{ - Addr: h.ServiceAddress, - Handler: h, - ReadTimeout: h.ReadTimeout.Duration, - WriteTimeout: h.WriteTimeout.Duration, - TLSConfig: tlsConf, - } - - var listener net.Listener - if tlsConf != nil { - listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf) - } else { - listener, err = net.Listen("tcp", h.ServiceAddress) - } - if err != nil { - return err - } - h.listener = listener - h.Port = listener.Addr().(*net.TCPAddr).Port - - h.handler = influx.NewMetricHandler() - h.parser = influx.NewParser(h.handler) - - h.wg.Add(1) - go func() { - defer h.wg.Done() - server.Serve(h.listener) - }() - - log.Printf("I! Started HTTP listener service on %s\n", h.ServiceAddress) - - return nil -} - -// Stop cleans up all resources -func (h *HTTPListener) Stop() { - h.mu.Lock() - defer h.mu.Unlock() - - h.listener.Close() - h.wg.Wait() - - log.Println("I! Stopped HTTP listener service on ", h.ServiceAddress) -} - -func (h *HTTPListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { - h.RequestsRecv.Incr(1) - defer h.RequestsServed.Incr(1) - switch req.URL.Path { - case "/write": - h.WritesRecv.Incr(1) - defer h.WritesServed.Incr(1) - h.AuthenticateIfSet(h.serveWrite, res, req) - case "/query": - h.QueriesRecv.Incr(1) - defer h.QueriesServed.Incr(1) - // Deliver a dummy response to the query endpoint, as some InfluxDB - // clients test endpoint availability with a query - h.AuthenticateIfSet(func(res http.ResponseWriter, req *http.Request) { - res.Header().Set("Content-Type", "application/json") - res.Header().Set("X-Influxdb-Version", "1.0") - res.WriteHeader(http.StatusOK) - res.Write([]byte("{\"results\":[]}")) - }, res, req) - case "/ping": - h.PingsRecv.Incr(1) - defer h.PingsServed.Incr(1) - // respond to ping requests - h.AuthenticateIfSet(func(res http.ResponseWriter, req *http.Request) { - res.WriteHeader(http.StatusNoContent) - }, res, req) - default: - defer h.NotFoundsServed.Incr(1) - // Don't know how to respond to calls to other endpoints - h.AuthenticateIfSet(http.NotFound, res, req) - } -} - -func (h *HTTPListener) serveWrite(res http.ResponseWriter, req *http.Request) { - // Check that the content length is not too large for us to handle. - if req.ContentLength > h.MaxBodySize { - tooLarge(res) - return - } - now := h.TimeFunc() - - precision := req.URL.Query().Get("precision") - - // Handle gzip request bodies - body := req.Body - if req.Header.Get("Content-Encoding") == "gzip" { - var err error - body, err = gzip.NewReader(req.Body) - defer body.Close() - if err != nil { - log.Println("E! " + err.Error()) - badRequest(res) - return - } - } - body = http.MaxBytesReader(res, body, h.MaxBodySize) - - var return400 bool - var hangingBytes bool - buf := h.pool.get() - defer h.pool.put(buf) - bufStart := 0 - for { - n, err := io.ReadFull(body, buf[bufStart:]) - if err != nil && err != io.ErrUnexpectedEOF && err != io.EOF { - log.Println("E! " + err.Error()) - // problem reading the request body - badRequest(res) - return - } - h.BytesRecv.Incr(int64(n)) - - if err == io.EOF { - if return400 { - badRequest(res) - } else { - res.WriteHeader(http.StatusNoContent) - } - return - } - - if hangingBytes { - i := bytes.IndexByte(buf, '\n') - if i == -1 { - // still didn't find a newline, keep scanning - continue - } - // rotate the bit remaining after the first newline to the front of the buffer - i++ // start copying after the newline - bufStart = len(buf) - i - if bufStart > 0 { - copy(buf, buf[i:]) - } - hangingBytes = false - continue - } - - if err == io.ErrUnexpectedEOF { - // finished reading the request body - if err := h.parse(buf[:n+bufStart], now, precision); err != nil { - log.Println("E! " + err.Error()) - return400 = true - } - if return400 { - badRequest(res) - } else { - res.WriteHeader(http.StatusNoContent) - } - return - } - - // if we got down here it means that we filled our buffer, and there - // are still bytes remaining to be read. So we will parse up until the - // final newline, then push the rest of the bytes into the next buffer. - i := bytes.LastIndexByte(buf, '\n') - if i == -1 { - // drop any line longer than the max buffer size - log.Printf("E! http_listener received a single line longer than the maximum of %d bytes", - len(buf)) - hangingBytes = true - return400 = true - bufStart = 0 - continue - } - if err := h.parse(buf[:i+1], now, precision); err != nil { - log.Println("E! " + err.Error()) - return400 = true - } - // rotate the bit remaining after the last newline to the front of the buffer - i++ // start copying after the newline - bufStart = len(buf) - i - if bufStart > 0 { - copy(buf, buf[i:]) - } - } -} - -func (h *HTTPListener) parse(b []byte, t time.Time, precision string) error { - h.mu.Lock() - defer h.mu.Unlock() - - h.handler.SetTimePrecision(getPrecisionMultiplier(precision)) - h.handler.SetTimeFunc(func() time.Time { return t }) - metrics, err := h.parser.Parse(b) - if err != nil { - return err - } - - for _, m := range metrics { - h.acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) - } - - return err -} - -func tooLarge(res http.ResponseWriter) { - res.Header().Set("Content-Type", "application/json") - res.Header().Set("X-Influxdb-Version", "1.0") - res.WriteHeader(http.StatusRequestEntityTooLarge) - res.Write([]byte(`{"error":"http: request body too large"}`)) -} - -func badRequest(res http.ResponseWriter) { - res.Header().Set("Content-Type", "application/json") - res.Header().Set("X-Influxdb-Version", "1.0") - res.WriteHeader(http.StatusBadRequest) - res.Write([]byte(`{"error":"http: bad request"}`)) -} - -func (h *HTTPListener) AuthenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { - if h.BasicUsername != "" && h.BasicPassword != "" { - reqUsername, reqPassword, ok := req.BasicAuth() - if !ok || - subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 || - subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 { - - h.AuthFailures.Incr(1) - http.Error(res, "Unauthorized.", http.StatusUnauthorized) - return - } - handler(res, req) - } else { - handler(res, req) - } -} - -func getPrecisionMultiplier(precision string) time.Duration { - d := time.Nanosecond - switch precision { - case "u": - d = time.Microsecond - case "ms": - d = time.Millisecond - case "s": - d = time.Second - case "m": - d = time.Minute - case "h": - d = time.Hour - } - return d -} - -func init() { - inputs.Add("http_listener", func() telegraf.Input { - return &HTTPListener{ - ServiceAddress: ":8186", - TimeFunc: time.Now, - } - }) -} diff --git a/plugins/inputs/http_listener_v2/README.md b/plugins/inputs/http_listener_v2/README.md new file mode 100644 index 000000000..05e480586 --- /dev/null +++ b/plugins/inputs/http_listener_v2/README.md @@ -0,0 +1,85 @@ +# HTTP Listener v2 Input Plugin + +HTTP Listener v2 is a service input plugin that listens for metrics sent via +HTTP. Metrics may be sent in any supported [data format][data_format]. + +**Note:** The plugin previously known as `http_listener` has been renamed +`influxdb_listener`. If you would like Telegraf to act as a proxy/relay for +InfluxDB it is recommended to use [`influxdb_listener`][influxdb_listener]. + +### Configuration: + +This is a sample configuration for the plugin. + +```toml +[[inputs.http_listener_v2]] + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Path to listen to. + # path = "/telegraf" + + ## HTTP methods to accept. + # methods = ["POST", "PUT"] + + ## maximum duration before timing out read of the request + # read_timeout = "10s" + ## maximum duration before timing out write of the response + # write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 524,288,000 bytes (500 mebibytes) + # max_body_size = "500MB" + + ## Part of the request to consume. Available options are "body" and + ## "query". + # data_source = "body" + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Optional username and password to accept for HTTP basic authentication. + ## You probably want to make sure you have TLS configured above for this. + # basic_username = "foobar" + # basic_password = "barfoo" + + ## Optional setting to map http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +### Metrics: + +Metrics are collected from the part of the request specified by the `data_source` param and are parsed depending on the value of `data_format`. + +### Troubleshooting: + +**Send Line Protocol** +``` +curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' +``` + +**Send JSON** +``` +curl -i -XPOST 'http://localhost:8080/telegraf' --data-binary '{"value1": 42, "value2": 42}' +``` + +**Send query params** +``` +curl -i -XGET 'http://localhost:8080/telegraf?host=server01&value=0.42' +``` + +[data_format]: /docs/DATA_FORMATS_INPUT.md +[influxdb_listener]: /plugins/inputs/influxdb_listener/README.md diff --git a/plugins/inputs/http_listener_v2/http_listener_v2.go b/plugins/inputs/http_listener_v2/http_listener_v2.go new file mode 100644 index 000000000..a4237ea2a --- /dev/null +++ b/plugins/inputs/http_listener_v2/http_listener_v2.go @@ -0,0 +1,334 @@ +package http_listener_v2 + +import ( + "compress/gzip" + "crypto/subtle" + "crypto/tls" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +// defaultMaxBodySize is the default maximum request body size, in bytes. +// if the request body is over this size, we will return an HTTP 413 error. +// 500 MB +const defaultMaxBodySize = 500 * 1024 * 1024 + +const ( + body = "body" + query = "query" +) + +// TimeFunc provides a timestamp for the metrics +type TimeFunc func() time.Time + +// HTTPListenerV2 is an input plugin that collects external metrics sent via HTTP +type HTTPListenerV2 struct { + ServiceAddress string `toml:"service_address"` + Path string `toml:"path"` + Methods []string `toml:"methods"` + DataSource string `toml:"data_source"` + ReadTimeout internal.Duration `toml:"read_timeout"` + WriteTimeout internal.Duration `toml:"write_timeout"` + MaxBodySize internal.Size `toml:"max_body_size"` + Port int `toml:"port"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + HTTPHeaderTags map[string]string `toml:"http_header_tags"` + tlsint.ServerConfig + + TimeFunc + Log telegraf.Logger + + wg sync.WaitGroup + + listener net.Listener + + parsers.Parser + acc telegraf.Accumulator +} + +const sampleConfig = ` + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Path to listen to. + # path = "/telegraf" + + ## HTTP methods to accept. + # methods = ["POST", "PUT"] + + ## maximum duration before timing out read of the request + # read_timeout = "10s" + ## maximum duration before timing out write of the response + # write_timeout = "10s" + + ## Maximum allowed http request body size in bytes. + ## 0 means to use the default of 524,288,00 bytes (500 mebibytes) + # max_body_size = "500MB" + + ## Part of the request to consume. Available options are "body" and + ## "query". + # data_source = "body" + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Optional username and password to accept for HTTP basic authentication. + ## You probably want to make sure you have TLS configured above for this. + # basic_username = "foobar" + # basic_password = "barfoo" + + ## Optional setting to map http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +func (h *HTTPListenerV2) SampleConfig() string { + return sampleConfig +} + +func (h *HTTPListenerV2) Description() string { + return "Generic HTTP write listener" +} + +func (h *HTTPListenerV2) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (h *HTTPListenerV2) SetParser(parser parsers.Parser) { + h.Parser = parser +} + +// Start starts the http listener service. +func (h *HTTPListenerV2) Start(acc telegraf.Accumulator) error { + if h.MaxBodySize.Size == 0 { + h.MaxBodySize.Size = defaultMaxBodySize + } + + if h.ReadTimeout.Duration < time.Second { + h.ReadTimeout.Duration = time.Second * 10 + } + if h.WriteTimeout.Duration < time.Second { + h.WriteTimeout.Duration = time.Second * 10 + } + + h.acc = acc + + tlsConf, err := h.ServerConfig.TLSConfig() + if err != nil { + return err + } + + server := &http.Server{ + Addr: h.ServiceAddress, + Handler: h, + ReadTimeout: h.ReadTimeout.Duration, + WriteTimeout: h.WriteTimeout.Duration, + TLSConfig: tlsConf, + } + + var listener net.Listener + if tlsConf != nil { + listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf) + } else { + listener, err = net.Listen("tcp", h.ServiceAddress) + } + if err != nil { + return err + } + h.listener = listener + h.Port = listener.Addr().(*net.TCPAddr).Port + + h.wg.Add(1) + go func() { + defer h.wg.Done() + server.Serve(h.listener) + }() + + h.Log.Infof("Listening on %s", listener.Addr().String()) + + return nil +} + +// Stop cleans up all resources +func (h *HTTPListenerV2) Stop() { + h.listener.Close() + h.wg.Wait() +} + +func (h *HTTPListenerV2) ServeHTTP(res http.ResponseWriter, req *http.Request) { + handler := h.serveWrite + + if req.URL.Path != h.Path { + handler = http.NotFound + } + + h.authenticateIfSet(handler, res, req) +} + +func (h *HTTPListenerV2) serveWrite(res http.ResponseWriter, req *http.Request) { + // Check that the content length is not too large for us to handle. + if req.ContentLength > h.MaxBodySize.Size { + tooLarge(res) + return + } + + // Check if the requested HTTP method was specified in config. + isAcceptedMethod := false + for _, method := range h.Methods { + if req.Method == method { + isAcceptedMethod = true + break + } + } + if !isAcceptedMethod { + methodNotAllowed(res) + return + } + + var bytes []byte + var ok bool + + switch strings.ToLower(h.DataSource) { + case query: + bytes, ok = h.collectQuery(res, req) + default: + bytes, ok = h.collectBody(res, req) + } + + if !ok { + return + } + + metrics, err := h.Parse(bytes) + if err != nil { + h.Log.Debugf("Parse error: %s", err.Error()) + badRequest(res) + return + } + + for _, m := range metrics { + for headerName, measurementName := range h.HTTPHeaderTags { + headerValues, foundHeader := req.Header[headerName] + if foundHeader && len(headerValues) > 0 { + m.AddTag(measurementName, headerValues[0]) + } + } + + h.acc.AddMetric(m) + } + + res.WriteHeader(http.StatusNoContent) +} + +func (h *HTTPListenerV2) collectBody(res http.ResponseWriter, req *http.Request) ([]byte, bool) { + body := req.Body + + // Handle gzip request bodies + if req.Header.Get("Content-Encoding") == "gzip" { + var err error + body, err = gzip.NewReader(req.Body) + if err != nil { + h.Log.Debug(err.Error()) + badRequest(res) + return nil, false + } + defer body.Close() + } + + body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) + bytes, err := ioutil.ReadAll(body) + if err != nil { + tooLarge(res) + return nil, false + } + + return bytes, true +} + +func (h *HTTPListenerV2) collectQuery(res http.ResponseWriter, req *http.Request) ([]byte, bool) { + rawQuery := req.URL.RawQuery + + query, err := url.QueryUnescape(rawQuery) + if err != nil { + h.Log.Debugf("Error parsing query: %s", err.Error()) + badRequest(res) + return nil, false + } + + return []byte(query), true +} + +func tooLarge(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusRequestEntityTooLarge) + res.Write([]byte(`{"error":"http: request body too large"}`)) +} + +func methodNotAllowed(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusMethodNotAllowed) + res.Write([]byte(`{"error":"http: method not allowed"}`)) +} + +func internalServerError(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusInternalServerError) +} + +func badRequest(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusBadRequest) + res.Write([]byte(`{"error":"http: bad request"}`)) +} + +func (h *HTTPListenerV2) authenticateIfSet(handler http.HandlerFunc, res http.ResponseWriter, req *http.Request) { + if h.BasicUsername != "" && h.BasicPassword != "" { + reqUsername, reqPassword, ok := req.BasicAuth() + if !ok || + subtle.ConstantTimeCompare([]byte(reqUsername), []byte(h.BasicUsername)) != 1 || + subtle.ConstantTimeCompare([]byte(reqPassword), []byte(h.BasicPassword)) != 1 { + + http.Error(res, "Unauthorized.", http.StatusUnauthorized) + return + } + handler(res, req) + } else { + handler(res, req) + } +} + +func init() { + inputs.Add("http_listener_v2", func() telegraf.Input { + return &HTTPListenerV2{ + ServiceAddress: ":8080", + TimeFunc: time.Now, + Path: "/telegraf", + Methods: []string{"POST", "PUT"}, + DataSource: body, + } + }) +} diff --git a/plugins/inputs/http_listener/http_listener_test.go b/plugins/inputs/http_listener_v2/http_listener_v2_test.go similarity index 94% rename from plugins/inputs/http_listener/http_listener_test.go rename to plugins/inputs/http_listener_v2/http_listener_v2_test.go index 7c6cdf728..c06b3908d 100644 --- a/plugins/inputs/http_listener/http_listener_test.go +++ b/plugins/inputs/http_listener_v2/http_listener_v2_test.go @@ -1,4 +1,4 @@ -package http_listener +package http_listener_v2 import ( "bytes" @@ -13,8 +13,9 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" ) @@ -41,24 +42,38 @@ var ( pki = testutil.NewPKI("../../../testutil/pki") ) -func newTestHTTPListener() *HTTPListener { - listener := &HTTPListener{ +func newTestHTTPListenerV2() *HTTPListenerV2 { + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, TimeFunc: time.Now, + MaxBodySize: internal.Size{Size: 70000}, + DataSource: "body", } return listener } -func newTestHTTPAuthListener() *HTTPListener { - listener := newTestHTTPListener() +func newTestHTTPAuthListener() *HTTPListenerV2 { + listener := newTestHTTPListenerV2() listener.BasicUsername = basicUsername listener.BasicPassword = basicPassword return listener } -func newTestHTTPSListener() *HTTPListener { - listener := &HTTPListener{ +func newTestHTTPSListenerV2() *HTTPListenerV2 { + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, ServerConfig: *pki.TLSServerConfig(), TimeFunc: time.Now, } @@ -78,7 +93,7 @@ func getHTTPSClient() *http.Client { } } -func createURL(listener *HTTPListener, scheme string, path string, rawquery string) string { +func createURL(listener *HTTPListenerV2, scheme string, path string, rawquery string) string { u := url.URL{ Scheme: scheme, Host: "localhost:" + strconv.Itoa(listener.Port), @@ -89,7 +104,7 @@ func createURL(listener *HTTPListener, scheme string, path string, rawquery stri } func TestWriteHTTPSNoClientAuth(t *testing.T) { - listener := newTestHTTPSListener() + listener := newTestHTTPSListenerV2() listener.TLSAllowedCACerts = nil acc := &testutil.Accumulator{} @@ -114,7 +129,7 @@ func TestWriteHTTPSNoClientAuth(t *testing.T) { } func TestWriteHTTPSWithClientAuth(t *testing.T) { - listener := newTestHTTPSListener() + listener := newTestHTTPSListenerV2() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -146,7 +161,7 @@ func TestWriteHTTPBasicAuth(t *testing.T) { } func TestWriteHTTP(t *testing.T) { - listener := newTestHTTPListener() + listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -184,7 +199,7 @@ func TestWriteHTTP(t *testing.T) { resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) resp.Body.Close() - require.EqualValues(t, 400, resp.StatusCode) + require.EqualValues(t, 413, resp.StatusCode) acc.Wait(3) acc.AssertContainsTaggedFields(t, "cpu_load_short", @@ -195,7 +210,7 @@ func TestWriteHTTP(t *testing.T) { // http listener should add a newline at the end of the buffer if it's not there func TestWriteHTTPNoNewline(t *testing.T) { - listener := newTestHTTPListener() + listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -214,10 +229,16 @@ func TestWriteHTTPNoNewline(t *testing.T) { ) } -func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { - listener := &HTTPListener{ +func TestWriteHTTPExactMaxBodySize(t *testing.T) { + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", - MaxLineSize: 128 * 1000, + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, + MaxBodySize: internal.Size{Size: int64(len(hugeMetric))}, TimeFunc: time.Now, } @@ -225,17 +246,22 @@ func TestWriteHTTPMaxLineSizeIncrease(t *testing.T) { require.NoError(t, listener.Start(acc)) defer listener.Stop() - // Post a gigantic metric to the listener and verify that it writes OK this time: - resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) require.NoError(t, err) resp.Body.Close() require.EqualValues(t, 204, resp.StatusCode) } func TestWriteHTTPVerySmallMaxBody(t *testing.T) { - listener := &HTTPListener{ + parser, _ := parsers.NewInfluxParser() + + listener := &HTTPListenerV2{ + Log: testutil.Logger{}, ServiceAddress: "localhost:0", - MaxBodySize: 4096, + Path: "/write", + Methods: []string{"POST"}, + Parser: parser, + MaxBodySize: internal.Size{Size: 4096}, TimeFunc: time.Now, } @@ -249,63 +275,9 @@ func TestWriteHTTPVerySmallMaxBody(t *testing.T) { require.EqualValues(t, 413, resp.StatusCode) } -func TestWriteHTTPVerySmallMaxLineSize(t *testing.T) { - listener := &HTTPListener{ - ServiceAddress: "localhost:0", - MaxLineSize: 70, - TimeFunc: time.Now, - } - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) - - hostTags := []string{"server02", "server03", - "server04", "server05", "server06"} - acc.Wait(len(hostTags)) - for _, hostTag := range hostTags { - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag}, - ) - } -} - -func TestWriteHTTPLargeLinesSkipped(t *testing.T) { - listener := &HTTPListener{ - ServiceAddress: "localhost:0", - MaxLineSize: 100, - TimeFunc: time.Now, - } - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 400, resp.StatusCode) - - hostTags := []string{"server02", "server03", - "server04", "server05", "server06"} - acc.Wait(len(hostTags)) - for _, hostTag := range hostTags { - acc.AssertContainsTaggedFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(12)}, - map[string]string{"host": hostTag}, - ) - } -} - // test that writing gzipped data works func TestWriteHTTPGzippedData(t *testing.T) { - listener := newTestHTTPListener() + listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -336,10 +308,10 @@ func TestWriteHTTPGzippedData(t *testing.T) { // writes 25,000 metrics to the listener with 10 different writers func TestWriteHTTPHighTraffic(t *testing.T) { - if runtime.GOOS != "darwin" { + if runtime.GOOS == "darwin" { t.Skip("Skipping due to hang on darwin") } - listener := newTestHTTPListener() + listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -368,7 +340,7 @@ func TestWriteHTTPHighTraffic(t *testing.T) { } func TestReceive404ForInvalidEndpoint(t *testing.T) { - listener := newTestHTTPListener() + listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -382,7 +354,7 @@ func TestReceive404ForInvalidEndpoint(t *testing.T) { } func TestWriteHTTPInvalid(t *testing.T) { - listener := newTestHTTPListener() + listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -396,7 +368,7 @@ func TestWriteHTTPInvalid(t *testing.T) { } func TestWriteHTTPEmpty(t *testing.T) { - listener := newTestHTTPListener() + listener := newTestHTTPListenerV2() acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) @@ -409,65 +381,117 @@ func TestWriteHTTPEmpty(t *testing.T) { require.EqualValues(t, 204, resp.StatusCode) } -func TestQueryAndPingHTTP(t *testing.T) { - listener := newTestHTTPListener() +func TestWriteHTTPTransformHeaderValuesToTagsSingleWrite(t *testing.T) { + listener := newTestHTTPListenerV2() + listener.HTTPHeaderTags = map[string]string{"Present_http_header_1": "presentMeasurementKey1", "Present_http_header_2": "presentMeasurementKey2", "NOT_PRESENT_HEADER": "notPresentMeasurementKey"} acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() - // post query to listener - resp, err := http.Post( - createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsg))) require.NoError(t, err) - require.EqualValues(t, 200, resp.StatusCode) + req.Header.Set("Content-Type", "") + req.Header.Set("Present_http_header_1", "PRESENT_HTTP_VALUE_1") + req.Header.Set("Present_http_header_2", "PRESENT_HTTP_VALUE_2") - // post ping to listener - resp, err = http.Post(createURL(listener, "http", "/ping", ""), "", nil) - require.NoError(t, err) - resp.Body.Close() - require.EqualValues(t, 204, resp.StatusCode) -} - -func TestWriteWithPrecision(t *testing.T) { - listener := newTestHTTPListener() - - acc := &testutil.Accumulator{} - require.NoError(t, listener.Start(acc)) - defer listener.Stop() - - msg := "xyzzy value=42 1422568543\n" - resp, err := http.Post( - createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) + resp, err := http.DefaultClient.Do(req) require.NoError(t, err) resp.Body.Close() require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) - require.Equal(t, 1, len(acc.Metrics)) - require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "presentMeasurementKey1": "PRESENT_HTTP_VALUE_1", "presentMeasurementKey2": "PRESENT_HTTP_VALUE_2"}, + ) + + // post single message to listener + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "presentMeasurementKey1": "PRESENT_HTTP_VALUE_1", "presentMeasurementKey2": "PRESENT_HTTP_VALUE_2"}, + ) } -func TestWriteWithPrecisionNoTimestamp(t *testing.T) { - listener := newTestHTTPListener() - listener.TimeFunc = func() time.Time { - return time.Unix(42, 123456789) +func TestWriteHTTPTransformHeaderValuesToTagsBulkWrite(t *testing.T) { + listener := newTestHTTPListenerV2() + listener.HTTPHeaderTags = map[string]string{"Present_http_header_1": "presentMeasurementKey1", "Present_http_header_2": "presentMeasurementKey2", "NOT_PRESENT_HEADER": "notPresentMeasurementKey"} + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + req.Header.Set("Content-Type", "") + req.Header.Set("Present_http_header_1", "PRESENT_HTTP_VALUE_1") + req.Header.Set("Present_http_header_2", "PRESENT_HTTP_VALUE_2") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(2) + hostTags := []string{"server02", "server03", "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag, "presentMeasurementKey1": "PRESENT_HTTP_VALUE_1", "presentMeasurementKey2": "PRESENT_HTTP_VALUE_2"}, + ) } +} + +func TestWriteHTTPQueryParams(t *testing.T) { + parser, _ := parsers.NewFormUrlencodedParser("query_measurement", nil, []string{"tagKey"}) + listener := newTestHTTPListenerV2() + listener.DataSource = "query" + listener.Parser = parser acc := &testutil.Accumulator{} require.NoError(t, listener.Start(acc)) defer listener.Stop() - msg := "xyzzy value=42\n" - resp, err := http.Post( - createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) + resp, err := http.Post(createURL(listener, "http", "/write", "tagKey=tagValue&fieldKey=42"), "", bytes.NewBuffer([]byte(emptyMsg))) require.NoError(t, err) resp.Body.Close() require.EqualValues(t, 204, resp.StatusCode) acc.Wait(1) - require.Equal(t, 1, len(acc.Metrics)) - require.Equal(t, time.Unix(42, 0), acc.Metrics[0].Time) + acc.AssertContainsTaggedFields(t, "query_measurement", + map[string]interface{}{"fieldKey": float64(42)}, + map[string]string{"tagKey": "tagValue"}, + ) +} + +func TestWriteHTTPFormData(t *testing.T) { + parser, _ := parsers.NewFormUrlencodedParser("query_measurement", nil, []string{"tagKey"}) + listener := newTestHTTPListenerV2() + listener.Parser = parser + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.PostForm(createURL(listener, "http", "/write", ""), url.Values{ + "tagKey": {"tagValue"}, + "fieldKey": {"42"}, + }) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "query_measurement", + map[string]interface{}{"fieldKey": float64(42)}, + map[string]string{"tagKey": "tagValue"}, + ) } const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i diff --git a/plugins/inputs/http_listener/testdata/testmsgs.gz b/plugins/inputs/http_listener_v2/testdata/testmsgs.gz similarity index 100% rename from plugins/inputs/http_listener/testdata/testmsgs.gz rename to plugins/inputs/http_listener_v2/testdata/testmsgs.gz diff --git a/plugins/inputs/http_response/README.md b/plugins/inputs/http_response/README.md index 4ccd236a5..f1d1ab2d5 100644 --- a/plugins/inputs/http_response/README.md +++ b/plugins/inputs/http_response/README.md @@ -7,9 +7,13 @@ This input plugin checks HTTP/HTTPS connections. ``` # HTTP/HTTPS request given an address a method and a timeout [[inputs.http_response]] + ## Deprecated in 1.12, use 'urls' ## Server address (default http://localhost) # address = "http://localhost" + ## List of urls to query. + # urls = ["http://localhost"] + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) # http_proxy = "http://localhost:8888" @@ -27,7 +31,7 @@ This input plugin checks HTTP/HTTPS connections. # {'fake':'data'} # ''' - ## Optional substring or regex match in body of the response + ## Optional substring or regex match in body of the response (case sensitive) # response_string_match = "\"service_status\": \"up\"" # response_string_match = "ok" # response_string_match = "\".*_status\".?:.?\"up\"" @@ -42,6 +46,14 @@ This input plugin checks HTTP/HTTPS connections. ## HTTP Request Headers (all values must be strings) # [inputs.http_response.headers] # Host = "github.com" + + ## Optional setting to map reponse http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + + ## Interface to use when dialing an address + # interface = "eth0" ``` ### Metrics: @@ -54,6 +66,8 @@ This input plugin checks HTTP/HTTPS connections. - result ([see below](#result--result_code)) - fields: - response_time (float, seconds) + - content_length (int, response body length) + - response_string_match (int, 0 = mismatch / body read error, 1 = match) - http_response_code (int, response status code) - result_type (string, deprecated in 1.6: use `result` tag and `result_code` field) - result_code (int, [see below](#result--result_code)) @@ -67,8 +81,8 @@ This tag is used to expose network and plugin errors. HTTP errors are considered |Tag value |Corresponding field value|Description| --------------------------|-------------------------|-----------| |success | 0 |The HTTP request completed, even if the HTTP code represents an error| -|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex| -|body_read_error | 2 |The option `response_string_match` was used, but the plugin wans't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error| +|response_string_mismatch | 1 |The option `response_string_match` was used, and the body of the response didn't match the regex. HTTP errors with content in their body (like 4xx, 5xx) will trigger this error| +|body_read_error | 2 |The option `response_string_match` was used, but the plugin wasn't able to read the body of the response. Responses with empty bodies (like 3xx, HEAD, etc) will trigger this error| |connection_failed | 3 |Catch all for any network error not specifically handled by the plugin| |timeout | 4 |The plugin timed out while awaiting the HTTP connection to complete| |dns_error | 5 |There was a DNS error while attempting to connect to the host| @@ -77,5 +91,5 @@ This tag is used to expose network and plugin errors. HTTP errors are considered ### Example Output: ``` -http_response,method=GET,server=http://www.github.com,status_code=200,result=success http_response_code=200i,response_time=6.223266528,result_type="success",result_code=0i 1459419354977857955 +http_response,method=GET,result=success,server=http://github.com,status_code=200 content_length=87878i,http_response_code=200i,response_time=0.937655534,result_code=0i,result_type="success" 1565839598000000000 ``` diff --git a/plugins/inputs/http_response/http_response.go b/plugins/inputs/http_response/http_response.go index 1f1f68707..bc9452efc 100644 --- a/plugins/inputs/http_response/http_response.go +++ b/plugins/inputs/http_response/http_response.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "io/ioutil" - "log" "net" "net/http" "net/url" @@ -22,16 +21,26 @@ import ( // HTTPResponse struct type HTTPResponse struct { - Address string - HTTPProxy string `toml:"http_proxy"` - Body string - Method string - ResponseTimeout internal.Duration - Headers map[string]string - FollowRedirects bool + Address string // deprecated in 1.12 + URLs []string `toml:"urls"` + HTTPProxy string `toml:"http_proxy"` + Body string + Method string + ResponseTimeout internal.Duration + HTTPHeaderTags map[string]string `toml:"http_header_tags"` + Headers map[string]string + FollowRedirects bool + // Absolute path to file with Bearer token + BearerToken string `toml:"bearer_token"` ResponseStringMatch string + Interface string + // HTTP Basic Auth Credentials + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig + Log telegraf.Logger + compiledStringMatch *regexp.Regexp client *http.Client } @@ -42,9 +51,13 @@ func (h *HTTPResponse) Description() string { } var sampleConfig = ` + ## Deprecated in 1.12, use 'urls' ## Server address (default http://localhost) # address = "http://localhost" + ## List of urls to query. + # urls = ["http://localhost"] + ## Set http_proxy (telegraf uses the system wide proxy settings if it's is not set) # http_proxy = "http://localhost:8888" @@ -57,6 +70,14 @@ var sampleConfig = ` ## Whether to follow redirects from the server (defaults to false) # follow_redirects = false + ## Optional file with Bearer token + ## file content is added as an Authorization header + # bearer_token = "/path/to/file" + + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" + ## Optional HTTP Request Body # body = ''' # {'fake':'data'} @@ -77,6 +98,14 @@ var sampleConfig = ` ## HTTP Request Headers (all values must be strings) # [inputs.http_response.headers] # Host = "github.com" + + ## Optional setting to map reponse http headers into tags + ## If the http header is not present on the request, no corresponding tag will be added + ## If multiple instances of the http header are present, only the first value will be used + # http_header_tags = {"HTTP_HEADER" = "TAG_NAME"} + + ## Interface to use when dialing an address + # interface = "eth0" ` // SampleConfig returns the plugin SampleConfig @@ -103,16 +132,27 @@ func getProxyFunc(http_proxy string) func(*http.Request) (*url.URL, error) { } } -// CreateHttpClient creates an http client which will timeout at the specified +// createHttpClient creates an http client which will timeout at the specified // timeout period and can follow redirects if specified func (h *HTTPResponse) createHttpClient() (*http.Client, error) { tlsCfg, err := h.ClientConfig.TLSConfig() if err != nil { return nil, err } + + dialer := &net.Dialer{} + + if h.Interface != "" { + dialer.LocalAddr, err = localAddress(h.Interface) + if err != nil { + return nil, err + } + } + client := &http.Client{ Transport: &http.Transport{ Proxy: getProxyFunc(h.HTTPProxy), + DialContext: dialer.DialContext, DisableKeepAlives: true, TLSClientConfig: tlsCfg, }, @@ -121,12 +161,33 @@ func (h *HTTPResponse) createHttpClient() (*http.Client, error) { if h.FollowRedirects == false { client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - return ErrRedirectAttempted + return http.ErrUseLastResponse } } return client, nil } +func localAddress(interfaceName string) (net.Addr, error) { + i, err := net.InterfaceByName(interfaceName) + if err != nil { + return nil, err + } + + addrs, err := i.Addrs() + if err != nil { + return nil, err + } + + for _, addr := range addrs { + if naddr, ok := addr.(*net.IPNet); ok { + // leaving port set to zero to let kernel pick + return &net.TCPAddr{IP: naddr.IP}, nil + } + } + + return nil, fmt.Errorf("cannot create local address for interface %q", interfaceName) +} + func setResult(result_string string, fields map[string]interface{}, tags map[string]string) { result_codes := map[string]int{ "success": 0, @@ -171,20 +232,29 @@ func setError(err error, fields map[string]interface{}, tags map[string]string) } // HTTPGather gathers all fields and returns any errors it encounters -func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string, error) { +func (h *HTTPResponse) httpGather(u string) (map[string]interface{}, map[string]string, error) { // Prepare fields and tags fields := make(map[string]interface{}) - tags := map[string]string{"server": h.Address, "method": h.Method} + tags := map[string]string{"server": u, "method": h.Method} var body io.Reader if h.Body != "" { body = strings.NewReader(h.Body) } - request, err := http.NewRequest(h.Method, h.Address, body) + request, err := http.NewRequest(h.Method, u, body) if err != nil { return nil, nil, err } + if h.BearerToken != "" { + token, err := ioutil.ReadFile(h.BearerToken) + if err != nil { + return nil, nil, err + } + bearer := "Bearer " + strings.Trim(string(token), "\n") + request.Header.Add("Authorization", bearer) + } + for key, val := range h.Headers { request.Header.Add(key, val) if key == "Host" { @@ -192,6 +262,10 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string, } } + if h.Username != "" || h.Password != "" { + request.SetBasicAuth(h.Username, h.Password) + } + // Start Timer start := time.Now() resp, err := h.client.Do(request) @@ -201,28 +275,19 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string, // HTTP error codes do not generate errors in the net/http library if err != nil { // Log error - log.Printf("D! Network error while polling %s: %s", h.Address, err.Error()) + h.Log.Debugf("Network error while polling %s: %s", u, err.Error()) // Get error details netErr := setError(err, fields, tags) - // If recognize the returnded error, get out + // If recognize the returned error, get out if netErr != nil { return fields, tags, nil } // Any error not recognized by `set_error` is considered a "connection_failed" setResult("connection_failed", fields, tags) - - // If the error is a redirect we continue processing and log the HTTP code - urlError, isUrlError := err.(*url.Error) - if !h.FollowRedirects && isUrlError && urlError.Err == ErrRedirectAttempted { - err = nil - } else { - // If the error isn't a timeout or a redirect stop - // processing the request - return fields, tags, nil - } + return fields, tags, nil } if _, ok := fields["response_time"]; !ok { @@ -231,26 +296,35 @@ func (h *HTTPResponse) httpGather() (map[string]interface{}, map[string]string, // This function closes the response body, as // required by the net/http library - defer func() { - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() + defer resp.Body.Close() + + // Add the response headers + for headerName, tag := range h.HTTPHeaderTags { + headerValues, foundHeader := resp.Header[headerName] + if foundHeader && len(headerValues) > 0 { + tags[tag] = headerValues[0] + } + } // Set log the HTTP response code tags["status_code"] = strconv.Itoa(resp.StatusCode) fields["http_response_code"] = resp.StatusCode + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + h.Log.Debugf("Failed to read body of HTTP Response : %s", err.Error()) + setResult("body_read_error", fields, tags) + fields["content_length"] = len(bodyBytes) + if h.ResponseStringMatch != "" { + fields["response_string_match"] = 0 + } + return fields, tags, nil + } + + fields["content_length"] = len(bodyBytes) + // Check the response for a regex match. if h.ResponseStringMatch != "" { - - bodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - log.Printf("D! Failed to read body of HTTP Response : %s", err) - setResult("body_read_error", fields, tags) - fields["response_string_match"] = 0 - return fields, tags, nil - } - if h.compiledStringMatch.Match(bodyBytes) { setResult("success", fields, tags) fields["response_string_match"] = 1 @@ -284,20 +358,15 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { if h.Method == "" { h.Method = "GET" } - if h.Address == "" { - h.Address = "http://localhost" - } - addr, err := url.Parse(h.Address) - if err != nil { - return err - } - if addr.Scheme != "http" && addr.Scheme != "https" { - return errors.New("Only http and https are supported") - } - // Prepare data - var fields map[string]interface{} - var tags map[string]string + if len(h.URLs) == 0 { + if h.Address == "" { + h.URLs = []string{"http://localhost"} + } else { + h.Log.Warn("'address' deprecated in telegraf 1.12, please use 'urls'") + h.URLs = []string{h.Address} + } + } if h.client == nil { client, err := h.createHttpClient() @@ -307,14 +376,33 @@ func (h *HTTPResponse) Gather(acc telegraf.Accumulator) error { h.client = client } - // Gather data - fields, tags, err = h.httpGather() - if err != nil { - return err + for _, u := range h.URLs { + addr, err := url.Parse(u) + if err != nil { + acc.AddError(err) + continue + } + + if addr.Scheme != "http" && addr.Scheme != "https" { + acc.AddError(errors.New("Only http and https are supported")) + continue + } + + // Prepare data + var fields map[string]interface{} + var tags map[string]string + + // Gather data + fields, tags, err = h.httpGather(u) + if err != nil { + acc.AddError(err) + continue + } + + // Add metrics + acc.AddFields("http_response", fields, tags) } - // Add metrics - acc.AddFields("http_response", fields, tags) return nil } diff --git a/plugins/inputs/http_response/http_response_test.go b/plugins/inputs/http_response/http_response_test.go index 7d3780cec..9986ddefc 100644 --- a/plugins/inputs/http_response/http_response_test.go +++ b/plugins/inputs/http_response/http_response_test.go @@ -1,16 +1,18 @@ package http_response import ( + "errors" "fmt" "io/ioutil" + "net" "net/http" "net/http/httptest" "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -34,6 +36,7 @@ func checkAbsentTags(t *testing.T, tags []string, acc *testutil.Accumulator) { // Receives a dictionary and with expected fields and their values. If a value is nil, it will only check // that the field exists, but not its contents func checkFields(t *testing.T, fields map[string]interface{}, acc *testutil.Accumulator) { + t.Helper() for key, field := range fields { switch v := field.(type) { case int: @@ -83,6 +86,11 @@ func setUpTestMux() http.Handler { http.Redirect(w, req, "/good", http.StatusMovedPermanently) }) mux.HandleFunc("/good", func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Server", "MyTestServer") + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "hit the good page!") + }) + mux.HandleFunc("/noheader", func(w http.ResponseWriter, req *http.Request) { fmt.Fprintf(w, "hit the good page!") }) mux.HandleFunc("/jsonresponse", func(w http.ResponseWriter, req *http.Request) { @@ -119,6 +127,7 @@ func setUpTestMux() http.Handler { } func checkOutput(t *testing.T, acc *testutil.Accumulator, presentFields map[string]interface{}, presentTags map[string]interface{}, absentFields []string, absentTags []string) { + t.Helper() if presentFields != nil { checkFields(t, presentFields, acc) } @@ -146,6 +155,7 @@ func TestHeaders(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL, Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 2}, @@ -163,6 +173,7 @@ func TestHeaders(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -180,6 +191,7 @@ func TestFields(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/good", Body: "{ 'test': 'data'}", Method: "GET", @@ -199,6 +211,163 @@ func TestFields(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} + +func TestHTTPHeaderTags(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/good", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + "my_server": "MyTestServer", + "content_type": "application/json; charset=utf-8", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) + + h = &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/noheader", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + + acc = testutil.Accumulator{} + err = h.Gather(&acc) + require.NoError(t, err) + + expectedTags = map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) + + // Connection failed + h = &HTTPResponse{ + Log: testutil.Logger{}, + Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here + Body: "", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + HTTPHeaderTags: map[string]string{"Server": "my_server", "Content-Type": "content_type"}, + FollowRedirects: false, + } + + acc = testutil.Accumulator{} + err = h.Gather(&acc) + require.NoError(t, err) + + expectedFields = map[string]interface{}{ + "result_type": "connection_failed", + "result_code": 3, + } + expectedTags = map[string]interface{}{ + "server": nil, + "method": "GET", + "result": "connection_failed", + } + absentFields = []string{"http_response_code", "response_time", "content_length", "response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} + +func findInterface() (net.Interface, error) { + potential, _ := net.Interfaces() + + for _, i := range potential { + // we are only interest in loopback interfaces which are up + if (i.Flags&net.FlagUp == 0) || (i.Flags&net.FlagLoopback == 0) { + continue + } + + if addrs, _ := i.Addrs(); len(addrs) > 0 { + // return interface if it has at least one unicast address + return i, nil + } + } + + return net.Interface{}, errors.New("cannot find suitable loopback interface") +} + +func TestInterface(t *testing.T) { + var ( + mux = setUpTestMux() + ts = httptest.NewServer(mux) + ) + + defer ts.Close() + + intf, err := findInterface() + require.NoError(t, err) + + h := &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/good", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + Interface: intf.Name, + } + + var acc testutil.Accumulator + err = h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -216,6 +385,7 @@ func TestRedirects(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/redirect", Body: "{ 'test': 'data'}", Method: "GET", @@ -234,6 +404,7 @@ func TestRedirects(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -245,6 +416,7 @@ func TestRedirects(t *testing.T) { checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) h = &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/badredirect", Body: "{ 'test': 'data'}", Method: "GET", @@ -281,6 +453,7 @@ func TestMethod(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/mustbepostmethod", Body: "{ 'test': 'data'}", Method: "POST", @@ -299,6 +472,7 @@ func TestMethod(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -310,6 +484,7 @@ func TestMethod(t *testing.T) { checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) h = &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/mustbepostmethod", Body: "{ 'test': 'data'}", Method: "GET", @@ -328,6 +503,7 @@ func TestMethod(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags = map[string]interface{}{ "server": nil, @@ -340,6 +516,7 @@ func TestMethod(t *testing.T) { //check that lowercase methods work correctly h = &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/mustbepostmethod", Body: "{ 'test': 'data'}", Method: "head", @@ -358,6 +535,7 @@ func TestMethod(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags = map[string]interface{}{ "server": nil, @@ -375,6 +553,7 @@ func TestBody(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/musthaveabody", Body: "{ 'test': 'data'}", Method: "GET", @@ -393,6 +572,7 @@ func TestBody(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -404,6 +584,7 @@ func TestBody(t *testing.T) { checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) h = &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/musthaveabody", Method: "GET", ResponseTimeout: internal.Duration{Duration: time.Second * 20}, @@ -437,6 +618,7 @@ func TestStringMatch(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/good", Body: "{ 'test': 'data'}", Method: "GET", @@ -457,6 +639,7 @@ func TestStringMatch(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -473,6 +656,7 @@ func TestStringMatchJson(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/jsonresponse", Body: "{ 'test': 'data'}", Method: "GET", @@ -493,6 +677,7 @@ func TestStringMatchJson(t *testing.T) { "result_type": "success", "result_code": 0, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -509,6 +694,7 @@ func TestStringMatchFail(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/good", Body: "{ 'test': 'data'}", Method: "GET", @@ -530,6 +716,7 @@ func TestStringMatchFail(t *testing.T) { "result_type": "response_string_mismatch", "result_code": 1, "response_time": nil, + "content_length": nil, } expectedTags := map[string]interface{}{ "server": nil, @@ -550,6 +737,7 @@ func TestTimeout(t *testing.T) { defer ts.Close() h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/twosecondnap", Body: "{ 'test': 'data'}", Method: "GET", @@ -572,18 +760,18 @@ func TestTimeout(t *testing.T) { "method": "GET", "result": "timeout", } - absentFields := []string{"http_response_code", "response_time", "response_string_match"} + absentFields := []string{"http_response_code", "response_time", "content_length", "response_string_match"} absentTags := []string{"status_code"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags) } -func TestPluginErrors(t *testing.T) { +func TestBadRegex(t *testing.T) { mux := setUpTestMux() ts := httptest.NewServer(mux) defer ts.Close() - // Bad regex test. Should return an error and return nothing h := &HTTPResponse{ + Log: testutil.Logger{}, Address: ts.URL + "/good", Body: "{ 'test': 'data'}", Method: "GET", @@ -599,43 +787,15 @@ func TestPluginErrors(t *testing.T) { err := h.Gather(&acc) require.Error(t, err) - absentFields := []string{"http_response_code", "response_time", "response_string_match", "result_type", "result_code"} + absentFields := []string{"http_response_code", "response_time", "content_length", "response_string_match", "result_type", "result_code"} absentTags := []string{"status_code", "result", "server", "method"} checkOutput(t, &acc, nil, nil, absentFields, absentTags) - - // Attempt to read empty body test - h = &HTTPResponse{ - Address: ts.URL + "/redirect", - Body: "", - Method: "GET", - ResponseStringMatch: ".*", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, - FollowRedirects: false, - } - - acc = testutil.Accumulator{} - err = h.Gather(&acc) - require.NoError(t, err) - - expectedFields := map[string]interface{}{ - "http_response_code": http.StatusMovedPermanently, - "response_string_match": 0, - "result_type": "body_read_error", - "result_code": 2, - "response_time": nil, - } - expectedTags := map[string]interface{}{ - "server": nil, - "method": "GET", - "status_code": "301", - "result": "body_read_error", - } - checkOutput(t, &acc, expectedFields, expectedTags, nil, nil) } func TestNetworkErrors(t *testing.T) { // DNS error h := &HTTPResponse{ + Log: testutil.Logger{}, Address: "https://nonexistent.nonexistent", // Any non-resolvable URL works here Body: "", Method: "GET", @@ -656,16 +816,17 @@ func TestNetworkErrors(t *testing.T) { "method": "GET", "result": "dns_error", } - absentFields := []string{"http_response_code", "response_time", "response_string_match"} + absentFields := []string{"http_response_code", "response_time", "content_length", "response_string_match"} absentTags := []string{"status_code"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags) - // Connecton failed + // Connection failed h = &HTTPResponse{ - Address: "https://127.127.127.127", // Any non-routable IP works here + Log: testutil.Logger{}, + Address: "https:/nonexistent.nonexistent", // Any non-routable IP works here Body: "", Method: "GET", - ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + ResponseTimeout: internal.Duration{Duration: time.Second * 5}, FollowRedirects: false, } @@ -682,7 +843,164 @@ func TestNetworkErrors(t *testing.T) { "method": "GET", "result": "connection_failed", } - absentFields = []string{"http_response_code", "response_time", "response_string_match"} + absentFields = []string{"http_response_code", "response_time", "content_length", "response_string_match"} absentTags = []string{"status_code"} checkOutput(t, &acc, expectedFields, expectedTags, absentFields, absentTags) } + +func TestContentLength(t *testing.T) { + mux := setUpTestMux() + ts := httptest.NewServer(mux) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + URLs: []string{ts.URL + "/good"}, + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": len([]byte("hit the good page!")), + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) + + h = &HTTPResponse{ + Log: testutil.Logger{}, + URLs: []string{ts.URL + "/musthaveabody"}, + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Headers: map[string]string{ + "Content-Type": "application/json", + }, + FollowRedirects: true, + } + acc = testutil.Accumulator{} + err = h.Gather(&acc) + require.NoError(t, err) + + expectedFields = map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": len([]byte("sent a body!")), + } + expectedTags = map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields = []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} + +func TestRedirect(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Location", "http://example.org") + w.WriteHeader(http.StatusMovedPermanently) + w.Write([]byte("test")) + }) + + plugin := &HTTPResponse{ + URLs: []string{ts.URL}, + ResponseStringMatch: "test", + } + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "http_response", + map[string]string{ + "server": ts.URL, + "method": "GET", + "result": "success", + "status_code": "301", + }, + map[string]interface{}{ + "result_code": 0, + "result_type": "success", + "http_response_code": 301, + "response_string_match": 1, + "content_length": 4, + }, + time.Unix(0, 0), + ), + } + + actual := acc.GetTelegrafMetrics() + for _, m := range actual { + m.RemoveField("response_time") + } + + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestBasicAuth(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + aHeader := r.Header.Get("Authorization") + assert.Equal(t, "Basic bWU6bXlwYXNzd29yZA==", aHeader) + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + h := &HTTPResponse{ + Log: testutil.Logger{}, + Address: ts.URL + "/good", + Body: "{ 'test': 'data'}", + Method: "GET", + ResponseTimeout: internal.Duration{Duration: time.Second * 20}, + Username: "me", + Password: "mypassword", + Headers: map[string]string{ + "Content-Type": "application/json", + }, + } + + var acc testutil.Accumulator + err := h.Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "http_response_code": http.StatusOK, + "result_type": "success", + "result_code": 0, + "response_time": nil, + "content_length": nil, + } + expectedTags := map[string]interface{}{ + "server": nil, + "method": "GET", + "status_code": "200", + "result": "success", + } + absentFields := []string{"response_string_match"} + checkOutput(t, &acc, expectedFields, expectedTags, absentFields, nil) +} diff --git a/plugins/inputs/httpjson/httpjson.go b/plugins/inputs/httpjson/httpjson.go index c7324dee4..7feff1a84 100644 --- a/plugins/inputs/httpjson/httpjson.go +++ b/plugins/inputs/httpjson/httpjson.go @@ -42,7 +42,7 @@ type HTTPClient interface { // req: HTTP request object // // Returns: - // http.Response: HTTP respons object + // http.Response: HTTP response object // error : Any error that may have occurred MakeRequest(req *http.Request) (*http.Response, error) @@ -181,7 +181,12 @@ func (h *HttpJson) gatherServer( "server": serverURL, } - parser, err := parsers.NewJSONParser(msrmnt_name, h.TagKeys, tags) + parser, err := parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: msrmnt_name, + TagKeys: h.TagKeys, + DefaultTags: tags, + }) if err != nil { return err } diff --git a/plugins/inputs/httpjson/httpjson_test.go b/plugins/inputs/httpjson/httpjson_test.go index 7134ffb46..909759199 100644 --- a/plugins/inputs/httpjson/httpjson_test.go +++ b/plugins/inputs/httpjson/httpjson_test.go @@ -163,7 +163,7 @@ func (c *mockHTTPClient) HTTPClient() *http.Client { // *HttpJson: Pointer to an HttpJson object that uses the generated mock HTTP client func genMockHttpJson(response string, statusCode int) []*HttpJson { return []*HttpJson{ - &HttpJson{ + { client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ "http://server1.example.com/metrics/", @@ -180,7 +180,7 @@ func genMockHttpJson(response string, statusCode int) []*HttpJson { "apiVersion": "v1", }, }, - &HttpJson{ + { client: &mockHTTPClient{responseBody: response, statusCode: statusCode}, Servers: []string{ "http://server3.example.com/metrics/", diff --git a/plugins/inputs/icinga2/README.md b/plugins/inputs/icinga2/README.md new file mode 100644 index 000000000..14708cd41 --- /dev/null +++ b/plugins/inputs/icinga2/README.md @@ -0,0 +1,65 @@ +# Icinga2 Input Plugin + +This plugin gather services & hosts status using Icinga2 Remote API. + +The icinga2 plugin uses the icinga2 remote API to gather status on running +services and hosts. You can read Icinga2's documentation for their remote API +[here](https://docs.icinga.com/icinga2/latest/doc/module/icinga2/chapter/icinga2-api) + +### Configuration: + +```toml +# Description +[[inputs.icinga2]] + ## Required Icinga2 server address + # server = "https://localhost:5665" + + ## Required Icinga2 object type ("services" or "hosts") + # object_type = "services" + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true +``` + +### Measurements & Fields: + +- All measurements have the following fields: + - name (string) + - state_code (int) + +### Tags: + +- All measurements have the following tags: + - check_command + - display_name + - state + - source + - port + - scheme + +### Sample Queries: + +``` +SELECT * FROM "icinga2_services" WHERE state_code = 0 AND time > now() - 24h // Service with OK status +SELECT * FROM "icinga2_services" WHERE state_code = 1 AND time > now() - 24h // Service with WARNING status +SELECT * FROM "icinga2_services" WHERE state_code = 2 AND time > now() - 24h // Service with CRITICAL status +SELECT * FROM "icinga2_services" WHERE state_code = 3 AND time > now() - 24h // Service with UNKNOWN status +``` + +### Example Output: + +``` +$ ./telegraf -config telegraf.conf -input-filter icinga2 -test +icinga2_hosts,display_name=router-fr.eqx.fr,check_command=hostalive-custom,host=test-vm,source=localhost,port=5665,scheme=https,state=ok name="router-fr.eqx.fr",state=0 1492021603000000000 +``` diff --git a/plugins/inputs/icinga2/icinga2.go b/plugins/inputs/icinga2/icinga2.go new file mode 100644 index 000000000..67b9bcab9 --- /dev/null +++ b/plugins/inputs/icinga2/icinga2.go @@ -0,0 +1,176 @@ +package icinga2 + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Icinga2 struct { + Server string + ObjectType string + Username string + Password string + ResponseTimeout internal.Duration + tls.ClientConfig + + Log telegraf.Logger + + client *http.Client +} + +type Result struct { + Results []Object `json:"results"` +} + +type Object struct { + Attrs Attribute `json:"attrs"` + Name string `json:"name"` + Joins struct{} `json:"joins"` + Meta struct{} `json:"meta"` + Type ObjectType `json:"type"` +} + +type Attribute struct { + CheckCommand string `json:"check_command"` + DisplayName string `json:"display_name"` + Name string `json:"name"` + State float64 `json:"state"` +} + +var levels = []string{"ok", "warning", "critical", "unknown"} + +type ObjectType string + +var sampleConfig = ` + ## Required Icinga2 server address + # server = "https://localhost:5665" + + ## Required Icinga2 object type ("services" or "hosts") + # object_type = "services" + + ## Credentials for basic HTTP authentication + # username = "admin" + # password = "admin" + + ## Maximum time to receive response. + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = true + ` + +func (i *Icinga2) Description() string { + return "Gather Icinga2 status" +} + +func (i *Icinga2) SampleConfig() string { + return sampleConfig +} + +func (i *Icinga2) GatherStatus(acc telegraf.Accumulator, checks []Object) { + for _, check := range checks { + url, err := url.Parse(i.Server) + if err != nil { + i.Log.Error(err.Error()) + continue + } + + state := int64(check.Attrs.State) + + fields := map[string]interface{}{ + "name": check.Attrs.Name, + "state_code": state, + } + + tags := map[string]string{ + "display_name": check.Attrs.DisplayName, + "check_command": check.Attrs.CheckCommand, + "state": levels[state], + "source": url.Hostname(), + "scheme": url.Scheme, + "port": url.Port(), + } + + acc.AddFields(fmt.Sprintf("icinga2_%s", i.ObjectType), fields, tags) + } +} + +func (i *Icinga2) createHttpClient() (*http.Client, error) { + tlsCfg, err := i.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: i.ResponseTimeout.Duration, + } + + return client, nil +} + +func (i *Icinga2) Gather(acc telegraf.Accumulator) error { + if i.ResponseTimeout.Duration < time.Second { + i.ResponseTimeout.Duration = time.Second * 5 + } + + if i.client == nil { + client, err := i.createHttpClient() + if err != nil { + return err + } + i.client = client + } + + url := fmt.Sprintf("%s/v1/objects/%s?attrs=name&attrs=display_name&attrs=state&attrs=check_command", i.Server, i.ObjectType) + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + if i.Username != "" { + req.SetBasicAuth(i.Username, i.Password) + } + + resp, err := i.client.Do(req) + if err != nil { + return err + } + + defer resp.Body.Close() + + result := Result{} + json.NewDecoder(resp.Body).Decode(&result) + if err != nil { + return err + } + + i.GatherStatus(acc, result.Results) + + return nil +} + +func init() { + inputs.Add("icinga2", func() telegraf.Input { + return &Icinga2{ + Server: "https://localhost:5665", + ObjectType: "services", + ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + } + }) +} diff --git a/plugins/inputs/icinga2/icinga2_test.go b/plugins/inputs/icinga2/icinga2_test.go new file mode 100644 index 000000000..a908af7d5 --- /dev/null +++ b/plugins/inputs/icinga2/icinga2_test.go @@ -0,0 +1,116 @@ +package icinga2 + +import ( + "encoding/json" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +func TestGatherServicesStatus(t *testing.T) { + s := `{ + "results": [ + { + "attrs": { + "check_command": "check-bgp-juniper-netconf", + "display_name": "eq-par.dc2.fr", + "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", + "state": 0 + }, + "joins": {}, + "meta": {}, + "name": "eq-par.dc2.fr!ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", + "type": "Service" + } + ] +} +` + + checks := Result{} + json.Unmarshal([]byte(s), &checks) + + icinga2 := new(Icinga2) + icinga2.Log = testutil.Logger{} + icinga2.ObjectType = "services" + icinga2.Server = "https://localhost:5665" + + var acc testutil.Accumulator + icinga2.GatherStatus(&acc, checks.Results) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "icinga2_services", + map[string]string{ + "display_name": "eq-par.dc2.fr", + "check_command": "check-bgp-juniper-netconf", + "state": "ok", + "source": "localhost", + "port": "5665", + "scheme": "https", + }, + map[string]interface{}{ + "name": "ef017af8-c684-4f3f-bb20-0dfe9fcd3dbe", + "state_code": 0, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestGatherHostsStatus(t *testing.T) { + s := `{ + "results": [ + { + "attrs": { + "address": "192.168.1.1", + "check_command": "ping", + "display_name": "apache", + "name": "webserver", + "state": 2.0 + }, + "joins": {}, + "meta": {}, + "name": "webserver", + "type": "Host" + } + ] +} +` + + checks := Result{} + json.Unmarshal([]byte(s), &checks) + + var acc testutil.Accumulator + + icinga2 := new(Icinga2) + icinga2.Log = testutil.Logger{} + icinga2.ObjectType = "hosts" + icinga2.Server = "https://localhost:5665" + + icinga2.GatherStatus(&acc, checks.Results) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "icinga2_hosts", + map[string]string{ + "display_name": "apache", + "check_command": "ping", + "state": "critical", + "source": "localhost", + "port": "5665", + "scheme": "https", + }, + map[string]interface{}{ + "name": "webserver", + "state_code": 2, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} diff --git a/plugins/inputs/infiniband/README.md b/plugins/inputs/infiniband/README.md new file mode 100644 index 000000000..bc5b03543 --- /dev/null +++ b/plugins/inputs/infiniband/README.md @@ -0,0 +1,58 @@ +# InfiniBand Input Plugin + +This plugin gathers statistics for all InfiniBand devices and ports on the +system. These are the counters that can be found in +`/sys/class/infiniband//port//counters/` + +**Supported Platforms**: Linux + +### Configuration + +```toml +[[inputs.infiniband]] + # no configuration +``` + +### Metrics + +Actual metrics depend on the InfiniBand devices, the plugin uses a simple +mapping from counter -> counter value. + +[Information about the counters][counters] collected is provided by Mellanox. + +[counters]: https://community.mellanox.com/s/article/understanding-mlx5-linux-counters-and-status-parameters + +- infiniband + - tags: + - device + - port + - fields: + - excessive_buffer_overrun_errors (integer) + - link_downed (integer) + - link_error_recovery (integer) + - local_link_integrity_errors (integer) + - multicast_rcv_packets (integer) + - multicast_xmit_packets (integer) + - port_rcv_constraint_errors (integer) + - port_rcv_data (integer) + - port_rcv_errors (integer) + - port_rcv_packets (integer) + - port_rcv_remote_physical_errors (integer) + - port_rcv_switch_relay_errors (integer) + - port_xmit_constraint_errors (integer) + - port_xmit_data (integer) + - port_xmit_discards (integer) + - port_xmit_packets (integer) + - port_xmit_wait (integer) + - symbol_error (integer) + - unicast_rcv_packets (integer) + - unicast_xmit_packets (integer) + - VL15_dropped (integer) + + + +### Example Output + +``` +infiniband,device=mlx5_0,port=1 VL15_dropped=0i,excessive_buffer_overrun_errors=0i,link_downed=0i,link_error_recovery=0i,local_link_integrity_errors=0i,multicast_rcv_packets=0i,multicast_xmit_packets=0i,port_rcv_constraint_errors=0i,port_rcv_data=237159415345822i,port_rcv_errors=0i,port_rcv_packets=801977655075i,port_rcv_remote_physical_errors=0i,port_rcv_switch_relay_errors=0i,port_xmit_constraint_errors=0i,port_xmit_data=238334949937759i,port_xmit_discards=0i,port_xmit_packets=803162651391i,port_xmit_wait=4294967295i,symbol_error=0i,unicast_rcv_packets=801977655075i,unicast_xmit_packets=803162651391i 1573125558000000000 +``` diff --git a/plugins/inputs/infiniband/infiniband.go b/plugins/inputs/infiniband/infiniband.go new file mode 100644 index 000000000..65e1d6c71 --- /dev/null +++ b/plugins/inputs/infiniband/infiniband.go @@ -0,0 +1,22 @@ +package infiniband + +import ( + "github.com/influxdata/telegraf" +) + +// Stores the configuration values for the infiniband plugin - as there are no +// config values, this is intentionally empty +type Infiniband struct { + Log telegraf.Logger `toml:"-"` +} + +// Sample configuration for plugin +var InfinibandConfig = `` + +func (_ *Infiniband) SampleConfig() string { + return InfinibandConfig +} + +func (_ *Infiniband) Description() string { + return "Gets counters from all InfiniBand cards and ports installed" +} diff --git a/plugins/inputs/infiniband/infiniband_linux.go b/plugins/inputs/infiniband/infiniband_linux.go new file mode 100644 index 000000000..48cd8a428 --- /dev/null +++ b/plugins/inputs/infiniband/infiniband_linux.go @@ -0,0 +1,59 @@ +// +build linux + +package infiniband + +import ( + "fmt" + "github.com/Mellanox/rdmamap" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "strconv" +) + +// Gather statistics from our infiniband cards +func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { + + rdmaDevices := rdmamap.GetRdmaDeviceList() + + if len(rdmaDevices) == 0 { + return fmt.Errorf("no InfiniBand devices found in /sys/class/infiniband/") + } + + for _, dev := range rdmaDevices { + devicePorts := rdmamap.GetPorts(dev) + for _, port := range devicePorts { + portInt, err := strconv.Atoi(port) + if err != nil { + return err + } + + stats, err := rdmamap.GetRdmaSysfsStats(dev, portInt) + if err != nil { + return err + } + + addStats(dev, port, stats, acc) + } + } + + return nil +} + +// Add the statistics to the accumulator +func addStats(dev string, port string, stats []rdmamap.RdmaStatEntry, acc telegraf.Accumulator) { + + // Allow users to filter by card and port + tags := map[string]string{"device": dev, "port": port} + fields := make(map[string]interface{}) + + for _, entry := range stats { + fields[entry.Name] = entry.Value + } + + acc.AddFields("infiniband", fields, tags) +} + +// Initialise plugin +func init() { + inputs.Add("infiniband", func() telegraf.Input { return &Infiniband{} }) +} diff --git a/plugins/inputs/infiniband/infiniband_notlinux.go b/plugins/inputs/infiniband/infiniband_notlinux.go new file mode 100644 index 000000000..5b19672d9 --- /dev/null +++ b/plugins/inputs/infiniband/infiniband_notlinux.go @@ -0,0 +1,23 @@ +// +build !linux + +package infiniband + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +func (i *Infiniband) Init() error { + i.Log.Warn("Current platform is not supported") + return nil +} + +func (_ *Infiniband) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("infiniband", func() telegraf.Input { + return &Infiniband{} + }) +} diff --git a/plugins/inputs/infiniband/infiniband_test.go b/plugins/inputs/infiniband/infiniband_test.go new file mode 100644 index 000000000..6c4bb2458 --- /dev/null +++ b/plugins/inputs/infiniband/infiniband_test.go @@ -0,0 +1,134 @@ +// +build linux + +package infiniband + +import ( + "github.com/Mellanox/rdmamap" + "github.com/influxdata/telegraf/testutil" + "testing" +) + +func TestInfiniband(t *testing.T) { + fields := map[string]interface{}{ + "excessive_buffer_overrun_errors": uint64(0), + "link_downed": uint64(0), + "link_error_recovery": uint64(0), + "local_link_integrity_errors": uint64(0), + "multicast_rcv_packets": uint64(0), + "multicast_xmit_packets": uint64(0), + "port_rcv_constraint_errors": uint64(0), + "port_rcv_data": uint64(237159415345822), + "port_rcv_errors": uint64(0), + "port_rcv_packets": uint64(801977655075), + "port_rcv_remote_physical_errors": uint64(0), + "port_rcv_switch_relay_errors": uint64(0), + "port_xmit_constraint_errors": uint64(0), + "port_xmit_data": uint64(238334949937759), + "port_xmit_discards": uint64(0), + "port_xmit_packets": uint64(803162651391), + "port_xmit_wait": uint64(4294967295), + "symbol_error": uint64(0), + "unicast_rcv_packets": uint64(801977655075), + "unicast_xmit_packets": uint64(803162651391), + "VL15_dropped": uint64(0), + } + + tags := map[string]string{ + "device": "m1x5_0", + "port": "1", + } + + sample_rdmastats_entries := []rdmamap.RdmaStatEntry{ + { + Name: "excessive_buffer_overrun_errors", + Value: uint64(0), + }, + { + Name: "link_downed", + Value: uint64(0), + }, + { + Name: "link_error_recovery", + Value: uint64(0), + }, + { + Name: "local_link_integrity_errors", + Value: uint64(0), + }, + { + Name: "multicast_rcv_packets", + Value: uint64(0), + }, + { + Name: "multicast_xmit_packets", + Value: uint64(0), + }, + { + Name: "port_rcv_constraint_errors", + Value: uint64(0), + }, + { + Name: "port_rcv_data", + Value: uint64(237159415345822), + }, + { + Name: "port_rcv_errors", + Value: uint64(0), + }, + { + Name: "port_rcv_packets", + Value: uint64(801977655075), + }, + { + Name: "port_rcv_remote_physical_errors", + Value: uint64(0), + }, + { + Name: "port_rcv_switch_relay_errors", + Value: uint64(0), + }, + { + Name: "port_xmit_constraint_errors", + Value: uint64(0), + }, + { + Name: "port_xmit_data", + Value: uint64(238334949937759), + }, + { + Name: "port_xmit_discards", + Value: uint64(0), + }, + { + Name: "port_xmit_packets", + Value: uint64(803162651391), + }, + { + Name: "port_xmit_wait", + Value: uint64(4294967295), + }, + { + Name: "symbol_error", + Value: uint64(0), + }, + { + Name: "unicast_rcv_packets", + Value: uint64(801977655075), + }, + { + Name: "unicast_xmit_packets", + Value: uint64(803162651391), + }, + { + Name: "VL15_dropped", + Value: uint64(0), + }, + } + + var acc testutil.Accumulator + + addStats("m1x5_0", "1", sample_rdmastats_entries, &acc) + + acc.AssertContainsTaggedFields(t, "infiniband", fields, tags) + +} diff --git a/plugins/inputs/influxdb/README.md b/plugins/inputs/influxdb/README.md index 2bab123f8..8787c6a0e 100644 --- a/plugins/inputs/influxdb/README.md +++ b/plugins/inputs/influxdb/README.md @@ -20,6 +20,10 @@ InfluxDB-formatted endpoints. See below for more information. "http://localhost:8086/debug/vars" ] + ## Username and password to send using HTTP Basic Authentication. + # username = "" + # password = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -55,7 +59,7 @@ and may vary between versions. - heap_sys - mcache_sys - next_gc - - gcc_pu_fraction + - gc_cpu_fraction - other_sys - alloc - stack_inuse @@ -91,7 +95,7 @@ telegraf --config ~/ws/telegraf.conf --input-filter influxdb --test > influxdb_measurement,database=_internal,host=tyrion,measurement=tsm1_filestore,url=http://localhost:8086/debug/vars numSeries=2 1463590500247354636 > influxdb_measurement,database=_internal,host=tyrion,measurement=tsm1_wal,url=http://localhost:8086/debug/vars numSeries=4 1463590500247354636 > influxdb_measurement,database=_internal,host=tyrion,measurement=write,url=http://localhost:8086/debug/vars numSeries=1 1463590500247354636 -> influxdb_memstats,host=tyrion,url=http://localhost:8086/debug/vars alloc=7642384i,buck_hash_sys=1463471i,frees=1169558i,gc_sys=653312i,gcc_pu_fraction=0.00003825652361068311,heap_alloc=7642384i,heap_idle=9912320i,heap_inuse=9125888i,heap_objects=48276i,heap_released=0i,heap_sys=19038208i,last_gc=1463590480877651621i,lookups=90i,mallocs=1217834i,mcache_inuse=4800i,mcache_sys=16384i,mspan_inuse=70920i,mspan_sys=81920i,next_gc=11679787i,num_gc=141i,other_sys=1244233i,pause_total_ns=24034027i,stack_inuse=884736i,stack_sys=884736i,sys=23382264i,total_alloc=679012200i 1463590500277918755 +> influxdb_memstats,host=tyrion,url=http://localhost:8086/debug/vars alloc=7642384i,buck_hash_sys=1463471i,frees=1169558i,gc_sys=653312i,gc_cpu_fraction=0.00003825652361068311,heap_alloc=7642384i,heap_idle=9912320i,heap_inuse=9125888i,heap_objects=48276i,heap_released=0i,heap_sys=19038208i,last_gc=1463590480877651621i,lookups=90i,mallocs=1217834i,mcache_inuse=4800i,mcache_sys=16384i,mspan_inuse=70920i,mspan_sys=81920i,next_gc=11679787i,num_gc=141i,other_sys=1244233i,pause_total_ns=24034027i,stack_inuse=884736i,stack_sys=884736i,sys=23382264i,total_alloc=679012200i 1463590500277918755 > influxdb_shard,database=_internal,engine=tsm1,host=tyrion,id=4,path=/Users/sparrc/.influxdb/data/_internal/monitor/4,retentionPolicy=monitor,url=http://localhost:8086/debug/vars fieldsCreate=65,seriesCreate=26,writePointsOk=7274,writeReq=280 1463590500247354636 > influxdb_subscriber,host=tyrion,url=http://localhost:8086/debug/vars pointsWritten=7274 1463590500247354636 > influxdb_tsm1_cache,database=_internal,host=tyrion,path=/Users/sparrc/.influxdb/data/_internal/monitor/1,retentionPolicy=monitor,url=http://localhost:8086/debug/vars WALCompactionTimeMs=0,cacheAgeMs=2809192,cachedBytes=0,diskBytes=0,memBytes=0,snapshotCount=0 1463590500247354636 diff --git a/plugins/inputs/influxdb/influxdb.go b/plugins/inputs/influxdb/influxdb.go index 0bb3ead5e..23fa9fdc4 100644 --- a/plugins/inputs/influxdb/influxdb.go +++ b/plugins/inputs/influxdb/influxdb.go @@ -1,9 +1,10 @@ package influxdb import ( + "bytes" "encoding/json" "errors" - "fmt" + "io" "net/http" "sync" "time" @@ -14,9 +15,28 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +const ( + maxErrorResponseBodyLength = 1024 +) + +type APIError struct { + StatusCode int + Reason string + Description string `json:"error"` +} + +func (e *APIError) Error() string { + if e.Description != "" { + return e.Reason + ": " + e.Description + } + return e.Reason +} + type InfluxDB struct { - URLs []string `toml:"urls"` - Timeout internal.Duration + URLs []string `toml:"urls"` + Username string `toml:"username"` + Password string `toml:"password"` + Timeout internal.Duration `toml:"timeout"` tls.ClientConfig client *http.Client @@ -38,6 +58,10 @@ func (*InfluxDB) SampleConfig() string { "http://localhost:8086/debug/vars" ] + ## Username and password to send using HTTP Basic Authentication. + # username = "" + # password = "" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -75,7 +99,7 @@ func (i *InfluxDB) Gather(acc telegraf.Accumulator) error { go func(url string) { defer wg.Done() if err := i.gatherURL(acc, url); err != nil { - acc.AddError(fmt.Errorf("[url=%s]: %s", url, err)) + acc.AddError(err) } }(u) } @@ -135,12 +159,27 @@ func (i *InfluxDB) gatherURL( shardCounter := 0 now := time.Now() - resp, err := i.client.Get(url) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + if i.Username != "" || i.Password != "" { + req.SetBasicAuth(i.Username, i.Password) + } + + req.Header.Set("User-Agent", "Telegraf/"+internal.Version()) + + resp, err := i.client.Do(req) if err != nil { return err } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return readResponseError(resp) + } + // It would be nice to be able to decode into a map[string]point, but // we'll get a decoder error like: // `json: cannot unmarshal array into Go value of type influxdb.point` @@ -203,7 +242,7 @@ func (i *InfluxDB) gatherURL( "pause_total_ns": m.PauseTotalNs, "pause_ns": m.PauseNs[(m.NumGC+255)%256], "num_gc": m.NumGC, - "gcc_pu_fraction": m.GCCPUFraction, + "gc_cpu_fraction": m.GCCPUFraction, }, map[string]string{ "url": url, @@ -255,6 +294,27 @@ func (i *InfluxDB) gatherURL( return nil } +func readResponseError(resp *http.Response) error { + apiError := &APIError{ + StatusCode: resp.StatusCode, + Reason: resp.Status, + } + + var buf bytes.Buffer + r := io.LimitReader(resp.Body, maxErrorResponseBodyLength) + _, err := buf.ReadFrom(r) + if err != nil { + return apiError + } + + err = json.Unmarshal(buf.Bytes(), apiError) + if err != nil { + return apiError + } + + return apiError +} + func init() { inputs.Add("influxdb", func() telegraf.Input { return &InfluxDB{ diff --git a/plugins/inputs/influxdb/influxdb_test.go b/plugins/inputs/influxdb/influxdb_test.go index f24ecc24c..27ea81b6d 100644 --- a/plugins/inputs/influxdb/influxdb_test.go +++ b/plugins/inputs/influxdb/influxdb_test.go @@ -1,6 +1,7 @@ package influxdb_test import ( + "fmt" "net/http" "net/http/httptest" "testing" @@ -91,7 +92,7 @@ func TestInfluxDB(t *testing.T) { "heap_sys": int64(33849344), "mcache_sys": int64(16384), "next_gc": int64(20843042), - "gcc_pu_fraction": float64(4.287178819113636e-05), + "gc_cpu_fraction": float64(4.287178819113636e-05), "other_sys": int64(1229737), "alloc": int64(17034016), "stack_inuse": int64(753664), @@ -178,6 +179,31 @@ func TestErrorHandling404(t *testing.T) { require.Error(t, acc.GatherError(plugin.Gather)) } +func TestErrorResponse(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + w.Write([]byte(`{"error": "unable to parse authentication credentials"}`)) + })) + defer ts.Close() + + plugin := &influxdb.InfluxDB{ + URLs: []string{ts.URL}, + } + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + expected := []error{ + &influxdb.APIError{ + StatusCode: http.StatusUnauthorized, + Reason: fmt.Sprintf("%d %s", http.StatusUnauthorized, http.StatusText(http.StatusUnauthorized)), + Description: "unable to parse authentication credentials", + }, + } + require.Equal(t, expected, acc.Errors) +} + const basicJSON = ` { "_1": { diff --git a/plugins/inputs/influxdb_listener/README.md b/plugins/inputs/influxdb_listener/README.md new file mode 100644 index 000000000..aae77fb96 --- /dev/null +++ b/plugins/inputs/influxdb_listener/README.md @@ -0,0 +1,79 @@ +# InfluxDB Listener Input Plugin + +InfluxDB Listener is a service input plugin that listens for requests sent +according to the [InfluxDB HTTP API][influxdb_http_api]. The intent of the +plugin is to allow Telegraf to serve as a proxy/router for the `/write` +endpoint of the InfluxDB HTTP API. + +**Note:** This plugin was previously known as `http_listener`. If you wish to +send general metrics via HTTP it is recommended to use the +[`http_listener_v2`][http_listener_v2] instead. + +The `/write` endpoint supports the `precision` query parameter and can be set +to one of `ns`, `u`, `ms`, `s`, `m`, `h`. All other parameters are ignored and +defer to the output plugins configuration. + +When chaining Telegraf instances using this plugin, CREATE DATABASE requests +receive a 200 OK response with message body `{"results":[]}` but they are not +relayed. The output configuration of the Telegraf instance which ultimately +submits data to InfluxDB determines the destination database. + +### Configuration: + +```toml +[[inputs.influxdb_listener]] + ## Address and port to host HTTP listener on + service_address = ":8186" + + ## maximum duration before timing out read of the request + read_timeout = "10s" + ## maximum duration before timing out write of the response + write_timeout = "10s" + + ## Maximum allowed HTTP request body size in bytes. + ## 0 means to use the default of 32MiB. + max_body_size = 0 + + ## Maximum line size allowed to be sent in bytes. + ## deprecated in 1.14; parser now handles lines of unlimited length and option is ignored + # max_line_size = 0 + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + tls_cert = "/etc/telegraf/cert.pem" + tls_key = "/etc/telegraf/key.pem" + + ## Optional tag name used to store the database name. + ## If the write has a database in the query string then it will be kept in this tag name. + ## This tag can be used in downstream outputs. + ## The default value of nothing means it will be off and the database will not be recorded. + ## If you have a tag that is the same as the one specified below, and supply a database, + ## the tag will be overwritten with the database supplied. + # database_tag = "" + + ## If set the retention policy specified in the write query will be added as + ## the value of this tag name. + # retention_policy_tag = "" + + ## Optional username and password to accept for HTTP basic authentication. + ## You probably want to make sure you have TLS configured above for this. + # basic_username = "foobar" + # basic_password = "barfoo" +``` + +### Metrics: + +Metrics are created from InfluxDB Line Protocol in the request body. + +### Troubleshooting: + +**Example Query:** +``` +curl -i -XPOST 'http://localhost:8186/write' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000' +``` + +[influxdb_http_api]: https://docs.influxdata.com/influxdb/latest/guides/writing_data/ +[http_listener_v2]: /plugins/inputs/http_listener_v2/README.md diff --git a/plugins/inputs/influxdb_listener/influxdb_listener.go b/plugins/inputs/influxdb_listener/influxdb_listener.go new file mode 100644 index 000000000..4ba5a8c7c --- /dev/null +++ b/plugins/inputs/influxdb_listener/influxdb_listener.go @@ -0,0 +1,418 @@ +package influxdb_listener + +import ( + "compress/gzip" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "net" + "net/http" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers/influx" + "github.com/influxdata/telegraf/selfstat" +) + +const ( + // defaultMaxBodySize is the default maximum request body size, in bytes. + // if the request body is over this size, we will return an HTTP 413 error. + defaultMaxBodySize = 32 * 1024 * 1024 +) + +type InfluxDBListener struct { + ServiceAddress string `toml:"service_address"` + port int + tlsint.ServerConfig + + ReadTimeout internal.Duration `toml:"read_timeout"` + WriteTimeout internal.Duration `toml:"write_timeout"` + MaxBodySize internal.Size `toml:"max_body_size"` + MaxLineSize internal.Size `toml:"max_line_size"` // deprecated in 1.14; ignored + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + DatabaseTag string `toml:"database_tag"` + RetentionPolicyTag string `toml:"retention_policy_tag"` + + timeFunc influx.TimeFunc + + listener net.Listener + server http.Server + + acc telegraf.Accumulator + + bytesRecv selfstat.Stat + requestsServed selfstat.Stat + writesServed selfstat.Stat + queriesServed selfstat.Stat + pingsServed selfstat.Stat + requestsRecv selfstat.Stat + notFoundsServed selfstat.Stat + buffersCreated selfstat.Stat + authFailures selfstat.Stat + + Log telegraf.Logger `toml:"-"` + + mux http.ServeMux +} + +const sampleConfig = ` + ## Address and port to host InfluxDB listener on + service_address = ":8186" + + ## maximum duration before timing out read of the request + read_timeout = "10s" + ## maximum duration before timing out write of the response + write_timeout = "10s" + + ## Maximum allowed HTTP request body size in bytes. + ## 0 means to use the default of 32MiB. + max_body_size = "32MiB" + + ## Optional tag name used to store the database. + ## If the write has a database in the query string then it will be kept in this tag name. + ## This tag can be used in downstream outputs. + ## The default value of nothing means it will be off and the database will not be recorded. + # database_tag = "" + + ## If set the retention policy specified in the write query will be added as + ## the value of this tag name. + # retention_policy_tag = "" + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Add service certificate and key + tls_cert = "/etc/telegraf/cert.pem" + tls_key = "/etc/telegraf/key.pem" + + ## Optional username and password to accept for HTTP basic authentication. + ## You probably want to make sure you have TLS configured above for this. + # basic_username = "foobar" + # basic_password = "barfoo" +` + +func (h *InfluxDBListener) SampleConfig() string { + return sampleConfig +} + +func (h *InfluxDBListener) Description() string { + return "Accept metrics over InfluxDB 1.x HTTP API" +} + +func (h *InfluxDBListener) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (h *InfluxDBListener) routes() { + authHandler := internal.AuthHandler(h.BasicUsername, h.BasicPassword, "influxdb", + func(_ http.ResponseWriter) { + h.authFailures.Incr(1) + }, + ) + + h.mux.Handle("/write", authHandler(h.handleWrite())) + h.mux.Handle("/query", authHandler(h.handleQuery())) + h.mux.Handle("/ping", h.handlePing()) + h.mux.Handle("/", authHandler(h.handleDefault())) +} + +func (h *InfluxDBListener) Init() error { + tags := map[string]string{ + "address": h.ServiceAddress, + } + h.bytesRecv = selfstat.Register("influxdb_listener", "bytes_received", tags) + h.requestsServed = selfstat.Register("influxdb_listener", "requests_served", tags) + h.writesServed = selfstat.Register("influxdb_listener", "writes_served", tags) + h.queriesServed = selfstat.Register("influxdb_listener", "queries_served", tags) + h.pingsServed = selfstat.Register("influxdb_listener", "pings_served", tags) + h.requestsRecv = selfstat.Register("influxdb_listener", "requests_received", tags) + h.notFoundsServed = selfstat.Register("influxdb_listener", "not_founds_served", tags) + h.buffersCreated = selfstat.Register("influxdb_listener", "buffers_created", tags) + h.authFailures = selfstat.Register("influxdb_listener", "auth_failures", tags) + h.routes() + + if h.MaxBodySize.Size == 0 { + h.MaxBodySize.Size = defaultMaxBodySize + } + + if h.MaxLineSize.Size != 0 { + h.Log.Warnf("Use of deprecated configuration: 'max_line_size'; parser now handles lines of unlimited length and option is ignored") + } + + if h.ReadTimeout.Duration < time.Second { + h.ReadTimeout.Duration = time.Second * 10 + } + if h.WriteTimeout.Duration < time.Second { + h.WriteTimeout.Duration = time.Second * 10 + } + + return nil +} + +// Start starts the InfluxDB listener service. +func (h *InfluxDBListener) Start(acc telegraf.Accumulator) error { + h.acc = acc + + tlsConf, err := h.ServerConfig.TLSConfig() + if err != nil { + return err + } + + h.server = http.Server{ + Addr: h.ServiceAddress, + Handler: h, + ReadTimeout: h.ReadTimeout.Duration, + WriteTimeout: h.WriteTimeout.Duration, + TLSConfig: tlsConf, + } + + var listener net.Listener + if tlsConf != nil { + listener, err = tls.Listen("tcp", h.ServiceAddress, tlsConf) + if err != nil { + return err + } + } else { + listener, err = net.Listen("tcp", h.ServiceAddress) + if err != nil { + return err + } + } + h.listener = listener + h.port = listener.Addr().(*net.TCPAddr).Port + + go func() { + err = h.server.Serve(h.listener) + if err != http.ErrServerClosed { + h.Log.Infof("Error serving HTTP on %s", h.ServiceAddress) + } + }() + + h.Log.Infof("Started HTTP listener service on %s", h.ServiceAddress) + + return nil +} + +// Stop cleans up all resources +func (h *InfluxDBListener) Stop() { + err := h.server.Shutdown(context.Background()) + if err != nil { + h.Log.Infof("Error shutting down HTTP server: %v", err.Error()) + } +} + +func (h *InfluxDBListener) ServeHTTP(res http.ResponseWriter, req *http.Request) { + h.requestsRecv.Incr(1) + h.mux.ServeHTTP(res, req) + h.requestsServed.Incr(1) +} + +func (h *InfluxDBListener) handleQuery() http.HandlerFunc { + return func(res http.ResponseWriter, req *http.Request) { + defer h.queriesServed.Incr(1) + // Deliver a dummy response to the query endpoint, as some InfluxDB + // clients test endpoint availability with a query + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + res.WriteHeader(http.StatusOK) + res.Write([]byte("{\"results\":[]}")) + } +} + +func (h *InfluxDBListener) handlePing() http.HandlerFunc { + return func(res http.ResponseWriter, req *http.Request) { + defer h.pingsServed.Incr(1) + verbose := req.URL.Query().Get("verbose") + + // respond to ping requests + res.Header().Set("X-Influxdb-Version", "1.0") + if verbose != "" && verbose != "0" && verbose != "false" { + res.Header().Set("Content-Type", "application/json") + res.WriteHeader(http.StatusOK) + b, _ := json.Marshal(map[string]string{"version": "1.0"}) // based on header set above + res.Write(b) + } else { + res.WriteHeader(http.StatusNoContent) + } + } +} + +func (h *InfluxDBListener) handleDefault() http.HandlerFunc { + return func(res http.ResponseWriter, req *http.Request) { + defer h.notFoundsServed.Incr(1) + http.NotFound(res, req) + } +} + +func (h *InfluxDBListener) handleWrite() http.HandlerFunc { + return func(res http.ResponseWriter, req *http.Request) { + defer h.writesServed.Incr(1) + // Check that the content length is not too large for us to handle. + if req.ContentLength > h.MaxBodySize.Size { + tooLarge(res) + return + } + + db := req.URL.Query().Get("db") + rp := req.URL.Query().Get("rp") + + body := req.Body + body = http.MaxBytesReader(res, body, h.MaxBodySize.Size) + // Handle gzip request bodies + if req.Header.Get("Content-Encoding") == "gzip" { + var err error + body, err = gzip.NewReader(body) + if err != nil { + h.Log.Debugf("Error decompressing request body: %v", err.Error()) + badRequest(res, err.Error()) + return + } + defer body.Close() + } + + parser := influx.NewStreamParser(body) + parser.SetTimeFunc(h.timeFunc) + + precisionStr := req.URL.Query().Get("precision") + if precisionStr != "" { + precision := getPrecisionMultiplier(precisionStr) + parser.SetTimePrecision(precision) + } + + var m telegraf.Metric + var err error + var parseErrorCount int + var lastPos int = 0 + var firstParseErrorStr string + for { + select { + case <-req.Context().Done(): + // Shutting down before parsing is finished. + res.WriteHeader(http.StatusServiceUnavailable) + return + default: + } + + m, err = parser.Next() + pos := parser.Position() + h.bytesRecv.Incr(int64(pos - lastPos)) + lastPos = pos + + // Continue parsing metrics even if some are malformed + if parseErr, ok := err.(*influx.ParseError); ok { + parseErrorCount += 1 + errStr := parseErr.Error() + if firstParseErrorStr == "" { + firstParseErrorStr = errStr + } + continue + } else if err != nil { + // Either we're exiting cleanly (err == + // influx.EOF) or there's an unexpected error + break + } + + if h.DatabaseTag != "" && db != "" { + m.AddTag(h.DatabaseTag, db) + } + + if h.RetentionPolicyTag != "" && rp != "" { + m.AddTag(h.RetentionPolicyTag, rp) + } + + h.acc.AddMetric(m) + + } + if err != influx.EOF { + h.Log.Debugf("Error parsing the request body: %v", err.Error()) + badRequest(res, err.Error()) + return + } + if parseErrorCount > 0 { + var partialErrorString string + switch parseErrorCount { + case 1: + partialErrorString = fmt.Sprintf("%s", firstParseErrorStr) + case 2: + partialErrorString = fmt.Sprintf("%s (and 1 other parse error)", firstParseErrorStr) + default: + partialErrorString = fmt.Sprintf("%s (and %d other parse errors)", firstParseErrorStr, parseErrorCount-1) + } + partialWrite(res, partialErrorString) + return + } + + // http request success + res.WriteHeader(http.StatusNoContent) + } +} + +func tooLarge(res http.ResponseWriter) { + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + res.Header().Set("X-Influxdb-Error", "http: request body too large") + res.WriteHeader(http.StatusRequestEntityTooLarge) + res.Write([]byte(`{"error":"http: request body too large"}`)) +} + +func badRequest(res http.ResponseWriter, errString string) { + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + if errString == "" { + errString = "http: bad request" + } + res.Header().Set("X-Influxdb-Error", errString) + res.WriteHeader(http.StatusBadRequest) + res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) +} + +func partialWrite(res http.ResponseWriter, errString string) { + res.Header().Set("Content-Type", "application/json") + res.Header().Set("X-Influxdb-Version", "1.0") + res.Header().Set("X-Influxdb-Error", errString) + res.WriteHeader(http.StatusBadRequest) + res.Write([]byte(fmt.Sprintf(`{"error":%q}`, errString))) +} + +func getPrecisionMultiplier(precision string) time.Duration { + // Influxdb defaults silently to nanoseconds if precision isn't + // one of the following: + var d time.Duration + switch precision { + case "u": + d = time.Microsecond + case "ms": + d = time.Millisecond + case "s": + d = time.Second + case "m": + d = time.Minute + case "h": + d = time.Hour + default: + d = time.Nanosecond + } + return d +} + +func init() { + // http_listener deprecated in 1.9 + inputs.Add("http_listener", func() telegraf.Input { + return &InfluxDBListener{ + ServiceAddress: ":8186", + timeFunc: time.Now, + } + }) + inputs.Add("influxdb_listener", func() telegraf.Input { + return &InfluxDBListener{ + ServiceAddress: ":8186", + timeFunc: time.Now, + } + }) +} diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go new file mode 100644 index 000000000..d3dc55219 --- /dev/null +++ b/plugins/inputs/influxdb_listener/influxdb_listener_benchmark_test.go @@ -0,0 +1,108 @@ +package influxdb_listener + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/selfstat" + "github.com/influxdata/telegraf/testutil" +) + +// newListener is the minimal InfluxDBListener construction to serve writes. +func newListener() *InfluxDBListener { + listener := &InfluxDBListener{ + timeFunc: time.Now, + acc: &testutil.NopAccumulator{}, + bytesRecv: selfstat.Register("influxdb_listener", "bytes_received", map[string]string{}), + writesServed: selfstat.Register("influxdb_listener", "writes_served", map[string]string{}), + MaxBodySize: internal.Size{ + Size: defaultMaxBodySize, + }, + } + return listener +} + +func BenchmarkInfluxDBListener_serveWrite(b *testing.B) { + res := httptest.NewRecorder() + addr := "http://localhost/write?db=mydb" + + benchmarks := []struct { + name string + lines string + }{ + { + name: "single line, tag, and field", + lines: lines(1, 1, 1), + }, + { + name: "single line, 10 tags and fields", + lines: lines(1, 10, 10), + }, + { + name: "single line, 100 tags and fields", + lines: lines(1, 100, 100), + }, + { + name: "1k lines, single tag and field", + lines: lines(1000, 1, 1), + }, + { + name: "1k lines, 10 tags and fields", + lines: lines(1000, 10, 10), + }, + { + name: "10k lines, 10 tags and fields", + lines: lines(10000, 10, 10), + }, + { + name: "100k lines, 10 tags and fields", + lines: lines(100000, 10, 10), + }, + } + + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + listener := newListener() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + req, err := http.NewRequest("POST", addr, strings.NewReader(bm.lines)) + if err != nil { + b.Error(err) + } + listener.handleWrite()(res, req) + if res.Code != http.StatusNoContent { + b.Errorf("unexpected status %d", res.Code) + } + } + }) + } +} + +func lines(lines, numTags, numFields int) string { + lp := make([]string, lines) + for i := 0; i < lines; i++ { + tags := make([]string, numTags) + for j := 0; j < numTags; j++ { + tags[j] = fmt.Sprintf("t%d=v%d", j, j) + } + + fields := make([]string, numFields) + for k := 0; k < numFields; k++ { + fields[k] = fmt.Sprintf("f%d=%d", k, k) + } + + lp[i] = fmt.Sprintf("m%d,%s %s", + i, + strings.Join(tags, ","), + strings.Join(fields, ","), + ) + } + + return strings.Join(lp, "\n") +} diff --git a/plugins/inputs/influxdb_listener/influxdb_listener_test.go b/plugins/inputs/influxdb_listener/influxdb_listener_test.go new file mode 100644 index 000000000..d0b2913cd --- /dev/null +++ b/plugins/inputs/influxdb_listener/influxdb_listener_test.go @@ -0,0 +1,649 @@ +package influxdb_listener + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net/http" + "net/url" + "runtime" + "strconv" + "sync" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const ( + testMsg = "cpu_load_short,host=server01 value=12.0 1422568543702900257\n" + + testMsgNoNewline = "cpu_load_short,host=server01 value=12.0 1422568543702900257" + + testMsgs = `cpu_load_short,host=server02 value=12.0 1422568543702900257 +cpu_load_short,host=server03 value=12.0 1422568543702900257 +cpu_load_short,host=server04 value=12.0 1422568543702900257 +cpu_load_short,host=server05 value=12.0 1422568543702900257 +cpu_load_short,host=server06 value=12.0 1422568543702900257 +` + testPartial = `cpu,host=a value1=1 +cpu,host=b value1=1,value2=+Inf,value3=3 +cpu,host=c value1=1` + + badMsg = "blahblahblah: 42\n" + + emptyMsg = "" + + basicUsername = "test-username-please-ignore" + basicPassword = "super-secure-password!" +) + +var ( + pki = testutil.NewPKI("../../../testutil/pki") +) + +func newTestListener() *InfluxDBListener { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + timeFunc: time.Now, + } + return listener +} + +func newTestAuthListener() *InfluxDBListener { + listener := newTestListener() + listener.BasicUsername = basicUsername + listener.BasicPassword = basicPassword + return listener +} + +func newTestSecureListener() *InfluxDBListener { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + ServerConfig: *pki.TLSServerConfig(), + timeFunc: time.Now, + } + + return listener +} + +func getSecureClient() *http.Client { + tlsConfig, err := pki.TLSClientConfig().TLSConfig() + if err != nil { + panic(err) + } + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } +} + +func createURL(listener *InfluxDBListener, scheme string, path string, rawquery string) string { + u := url.URL{ + Scheme: scheme, + Host: "localhost:" + strconv.Itoa(listener.port), + Path: path, + RawQuery: rawquery, + } + return u.String() +} + +func TestWriteSecureNoClientAuth(t *testing.T) { + listener := newTestSecureListener() + listener.TLSAllowedCACerts = nil + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + cas := x509.NewCertPool() + cas.AppendCertsFromPEM([]byte(pki.ReadServerCert())) + noClientAuthClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: cas, + }, + }, + } + + // post single message to listener + resp, err := noClientAuthClient.Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestWriteSecureWithClientAuth(t *testing.T) { + listener := newTestSecureListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := getSecureClient().Post(createURL(listener, "https", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestWriteBasicAuth(t *testing.T) { + listener := newTestAuthListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + client := &http.Client{} + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", "db=mydb"), bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + req.SetBasicAuth(basicUsername, basicPassword) + resp, err := client.Do(req) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, http.StatusNoContent, resp.StatusCode) +} + +func TestWriteKeepDatabase(t *testing.T) { + testMsgWithDB := "cpu_load_short,host=server01,database=wrongdb value=12.0 1422568543702900257\n" + + listener := newTestListener() + listener.DatabaseTag = "database" + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "database": "mydb"}, + ) + + // post single message to listener with a database tag in it already. It should be clobbered. + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgWithDB))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01", "database": "mydb"}, + ) + + // post multiple message to listener + resp, err = http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(2) + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag, "database": "mydb"}, + ) + } +} + +func TestWriteRetentionPolicyTag(t *testing.T) { + listener := newTestListener() + listener.RetentionPolicyTag = "rp" + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.Post(createURL(listener, "http", "/write", "rp=myrp"), "", bytes.NewBuffer([]byte("cpu time_idle=42"))) + require.NoError(t, err) + resp.Body.Close() + require.Equal(t, 204, resp.StatusCode) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "rp": "myrp", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + } + + acc.Wait(1) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +// http listener should add a newline at the end of the buffer if it's not there +func TestWriteNoNewline(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgNoNewline))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": "server01"}, + ) +} + +func TestPartialWrite(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testPartial))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 400, resp.StatusCode) + + acc.Wait(1) + acc.AssertContainsTaggedFields(t, "cpu", + map[string]interface{}{"value1": float64(1)}, + map[string]string{"host": "a"}, + ) + acc.AssertContainsTaggedFields(t, "cpu", + map[string]interface{}{"value1": float64(1)}, + map[string]string{"host": "c"}, + ) +} + +func TestWriteMaxLineSizeIncrease(t *testing.T) { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + timeFunc: time.Now, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // Post a gigantic metric to the listener and verify that it writes OK this time: + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestWriteVerySmallMaxBody(t *testing.T) { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + MaxBodySize: internal.Size{Size: 4096}, + timeFunc: time.Now, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 413, resp.StatusCode) +} + +func TestWriteLargeLine(t *testing.T) { + listener := &InfluxDBListener{ + Log: testutil.Logger{}, + ServiceAddress: "localhost:0", + timeFunc: func() time.Time { + return time.Unix(123456789, 0) + }, + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(hugeMetric+testMsgs))) + require.NoError(t, err) + resp.Body.Close() + //todo: with the new parser, long lines aren't a problem. Do we need to skip them? + //require.EqualValues(t, 400, resp.StatusCode) + + expected := testutil.MustMetric( + "super_long_metric", + map[string]string{"foo": "bar"}, + map[string]interface{}{ + "clients": 42, + "connected_slaves": 43, + "evicted_keys": 44, + "expired_keys": 45, + "instantaneous_ops_per_sec": 46, + "keyspace_hitrate": 47.0, + "keyspace_hits": 48, + "keyspace_misses": 49, + "latest_fork_usec": 50, + "master_repl_offset": 51, + "mem_fragmentation_ratio": 52.58, + "pubsub_channels": 53, + "pubsub_patterns": 54, + "rdb_changes_since_last_save": 55, + "repl_backlog_active": 56, + "repl_backlog_histlen": 57, + "repl_backlog_size": 58, + "sync_full": 59, + "sync_partial_err": 60, + "sync_partial_ok": 61, + "total_commands_processed": 62, + "total_connections_received": 63, + "uptime": 64, + "used_cpu_sys": 65.07, + "used_cpu_sys_children": 66.0, + "used_cpu_user": 67.1, + "used_cpu_user_children": 68.0, + "used_memory": 692048, + "used_memory_lua": 70792, + "used_memory_peak": 711128, + "used_memory_rss": 7298144, + }, + time.Unix(123456789, 0), + ) + + m, ok := acc.Get("super_long_metric") + require.True(t, ok) + testutil.RequireMetricEqual(t, expected, testutil.FromTestMetric(m)) + + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + acc.Wait(len(hostTags)) + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + +// test that writing gzipped data works +func TestWriteGzippedData(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + data, err := ioutil.ReadFile("./testdata/testmsgs.gz") + require.NoError(t, err) + + req, err := http.NewRequest("POST", createURL(listener, "http", "/write", ""), bytes.NewBuffer(data)) + require.NoError(t, err) + req.Header.Set("Content-Encoding", "gzip") + + client := &http.Client{} + resp, err := client.Do(req) + require.NoError(t, err) + require.EqualValues(t, 204, resp.StatusCode) + + hostTags := []string{"server02", "server03", + "server04", "server05", "server06"} + acc.Wait(len(hostTags)) + for _, hostTag := range hostTags { + acc.AssertContainsTaggedFields(t, "cpu_load_short", + map[string]interface{}{"value": float64(12)}, + map[string]string{"host": hostTag}, + ) + } +} + +// writes 25,000 metrics to the listener with 10 different writers +func TestWriteHighTraffic(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("Skipping due to hang on darwin") + } + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post many messages to listener + var wg sync.WaitGroup + for i := 0; i < 10; i++ { + wg.Add(1) + go func(innerwg *sync.WaitGroup) { + defer innerwg.Done() + for i := 0; i < 500; i++ { + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(testMsgs))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + } + }(&wg) + } + + wg.Wait() + listener.Gather(acc) + + acc.Wait(25000) + require.Equal(t, int64(25000), int64(acc.NMetrics())) +} + +func TestReceive404ForInvalidEndpoint(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/foobar", ""), "", bytes.NewBuffer([]byte(testMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 404, resp.StatusCode) +} + +func TestWriteInvalid(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(badMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 400, resp.StatusCode) +} + +func TestWriteEmpty(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", "db=mydb"), "", bytes.NewBuffer([]byte(emptyMsg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestQuery(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post query to listener + resp, err := http.Post( + createURL(listener, "http", "/query", "db=&q=CREATE+DATABASE+IF+NOT+EXISTS+%22mydb%22"), "", nil) + require.NoError(t, err) + require.EqualValues(t, 200, resp.StatusCode) +} + +func TestPing(t *testing.T) { + listener := newTestListener() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post ping to listener + resp, err := http.Post(createURL(listener, "http", "/ping", ""), "", nil) + require.NoError(t, err) + require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) + require.Len(t, resp.Header["Content-Type"], 0) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) +} + +func TestPingVerbose(t *testing.T) { + listener := newTestListener() + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post ping to listener + resp, err := http.Post(createURL(listener, "http", "/ping", "verbose=1"), "", nil) + require.NoError(t, err) + require.Equal(t, "1.0", resp.Header["X-Influxdb-Version"][0]) + require.Equal(t, "application/json", resp.Header["Content-Type"][0]) + resp.Body.Close() + require.EqualValues(t, 200, resp.StatusCode) +} + +func TestWriteWithPrecision(t *testing.T) { + listener := newTestListener() + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + msg := "xyzzy value=42 1422568543\n" + resp, err := http.Post( + createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + require.Equal(t, 1, len(acc.Metrics)) + // When timestamp is provided, the precision parameter is + // overloaded to specify the timestamp's unit + require.Equal(t, time.Unix(0, 1422568543000000000), acc.Metrics[0].Time) +} + +func TestWriteWithPrecisionNoTimestamp(t *testing.T) { + listener := newTestListener() + listener.timeFunc = func() time.Time { + return time.Unix(42, 123456789) + } + + acc := &testutil.Accumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + msg := "xyzzy value=42\n" + resp, err := http.Post( + createURL(listener, "http", "/write", "precision=s"), "", bytes.NewBuffer([]byte(msg))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 204, resp.StatusCode) + + acc.Wait(1) + require.Equal(t, 1, len(acc.Metrics)) + // When timestamp is omitted, the precision parameter actually + // specifies the precision. The timestamp is set to the greatest + // integer unit less than the provided timestamp (floor). + require.Equal(t, time.Unix(42, 0), acc.Metrics[0].Time) +} + +func TestWriteParseErrors(t *testing.T) { + var tests = []struct { + name string + input string + expected string + }{ + { + name: "one parse error", + input: "foo value=1.0\nfoo value=2asdf2.0\nfoo value=3.0\nfoo value=4.0", + expected: `metric parse error: expected field at 2:12: "foo value=2"`, + }, + { + name: "two parse errors", + input: "foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4.0", + expected: `metric parse error: expected field at 1:12: "foo value=1" (and 1 other parse error)`, + }, + { + name: "three or more parse errors", + input: "foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4asdf2.0", + expected: `metric parse error: expected field at 1:12: "foo value=1" (and 2 other parse errors)`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + listener := newTestListener() + + acc := &testutil.NopAccumulator{} + require.NoError(t, listener.Init()) + require.NoError(t, listener.Start(acc)) + defer listener.Stop() + + // post single message to listener + resp, err := http.Post(createURL(listener, "http", "/write", ""), "", bytes.NewBuffer([]byte(tt.input))) + require.NoError(t, err) + resp.Body.Close() + require.EqualValues(t, 400, resp.StatusCode) + require.Equal(t, tt.expected, resp.Header["X-Influxdb-Error"][0]) + }) + } +} + +const hugeMetric = `super_long_metric,foo=bar clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=1i,connected_slaves=0i,evicted_keys=0i,expired_keys=0i,instantaneous_ops_per_sec=0i,keyspace_hitrate=0,keyspace_hits=0i,keyspace_misses=2i,latest_fork_usec=0i,master_repl_offset=0i,mem_fragmentation_ratio=3.58,pubsub_channels=0i,pubsub_patterns=0i,rdb_changes_since_last_save=0i,repl_backlog_active=0i,repl_backlog_histlen=0i,repl_backlog_size=1048576i,sync_full=0i,sync_partial_err=0i,sync_partial_ok=0i,total_commands_processed=4i,total_connections_received=2i,uptime=869i,used_cpu_sys=0.07,used_cpu_sys_children=0,used_cpu_user=0.1,used_cpu_user_children=0,used_memory=502048i,used_memory_lua=33792i,used_memory_peak=501128i,used_memory_rss=1798144i,clients=42i,connected_slaves=43i,evicted_keys=44i,expired_keys=45i,instantaneous_ops_per_sec=46i,keyspace_hitrate=47,keyspace_hits=48i,keyspace_misses=49i,latest_fork_usec=50i,master_repl_offset=51i,mem_fragmentation_ratio=52.58,pubsub_channels=53i,pubsub_patterns=54i,rdb_changes_since_last_save=55i,repl_backlog_active=56i,repl_backlog_histlen=57i,repl_backlog_size=58i,sync_full=59i,sync_partial_err=60i,sync_partial_ok=61i,total_commands_processed=62i,total_connections_received=63i,uptime=64i,used_cpu_sys=65.07,used_cpu_sys_children=66,used_cpu_user=67.1,used_cpu_user_children=68,used_memory=692048i,used_memory_lua=70792i,used_memory_peak=711128i,used_memory_rss=7298144i +` diff --git a/plugins/inputs/influxdb_listener/testdata/testmsgs.gz b/plugins/inputs/influxdb_listener/testdata/testmsgs.gz new file mode 100644 index 000000000..f524dc071 Binary files /dev/null and b/plugins/inputs/influxdb_listener/testdata/testmsgs.gz differ diff --git a/plugins/inputs/internal/README.md b/plugins/inputs/internal/README.md index fbec4d86f..1f7fa645c 100644 --- a/plugins/inputs/internal/README.md +++ b/plugins/inputs/internal/README.md @@ -18,66 +18,71 @@ plugin. memstats are taken from the Go runtime: https://golang.org/pkg/runtime/#MemStats -- internal\_memstats - - alloc\_bytes +- internal_memstats + - alloc_bytes - frees - - heap\_alloc\_bytes - - heap\_idle\_bytes - - heap\_in\_use\_bytes - - heap\_objects\_bytes - - heap\_released\_bytes - - heap\_sys\_bytes + - heap_alloc_bytes + - heap_idle_bytes + - heap_in_use_bytes + - heap_objects_bytes + - heap_released_bytes + - heap_sys_bytes - mallocs - - num\_gc - - pointer\_lookups - - sys\_bytes - - total\_alloc\_bytes + - num_gc + - pointer_lookups + - sys_bytes + - total_alloc_bytes agent stats collect aggregate stats on all telegraf plugins. -- internal\_agent - - gather\_errors - - metrics\_dropped - - metrics\_gathered - - metrics\_written +- internal_agent + - gather_errors + - metrics_dropped + - metrics_gathered + - metrics_written -internal\_gather stats collect aggregate stats on all input plugins -that are of the same input type. They are tagged with `input=`. +internal_gather stats collect aggregate stats on all input plugins +that are of the same input type. They are tagged with `input=` +`version=` and `go_version=`. -- internal\_gather - - gather\_time\_ns - - metrics\_gathered +- internal_gather + - gather_time_ns + - metrics_gathered -internal\_write stats collect aggregate stats on all output plugins -that are of the same input type. They are tagged with `output=`. +internal_write stats collect aggregate stats on all output plugins +that are of the same input type. They are tagged with `output=` +and `version=`. -- internal\_write - - buffer\_limit - - buffer\_size - - metrics\_written - - metrics\_filtered - - write\_time\_ns +- internal_write + - buffer_limit + - buffer_size + - metrics_added + - metrics_written + - metrics_dropped + - metrics_filtered + - write_time_ns -internal\_\ are metrics which are defined on a per-plugin basis, and +internal_ are metrics which are defined on a per-plugin basis, and usually contain tags which differentiate each instance of a particular type of -plugin. +plugin and `version=`. -- internal\_\ +- internal_ - individual plugin-specific fields, such as requests counts. ### Tags: All measurements for specific plugins are tagged with information relevant -to each particular plugin. +to each particular plugin and with `version=`. + ### Example Output: ``` internal_memstats,host=tyrion alloc_bytes=4457408i,sys_bytes=10590456i,pointer_lookups=7i,mallocs=17642i,frees=7473i,heap_sys_bytes=6848512i,heap_idle_bytes=1368064i,heap_in_use_bytes=5480448i,heap_released_bytes=0i,total_alloc_bytes=6875560i,heap_alloc_bytes=4457408i,heap_objects_bytes=10169i,num_gc=2i 1480682800000000000 -internal_agent,host=tyrion metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000 -internal_write,output=file,host=tyrion buffer_limit=10000i,write_time_ns=636609i,metrics_written=18i,buffer_size=0i 1480682800000000000 -internal_gather,input=internal,host=tyrion metrics_gathered=19i,gather_time_ns=442114i 1480682800000000000 -internal_gather,input=http_listener,host=tyrion metrics_gathered=0i,gather_time_ns=167285i 1480682800000000000 -internal_http_listener,address=:8186,host=tyrion queries_received=0i,writes_received=0i,requests_received=0i,buffers_created=0i,requests_served=0i,pings_received=0i,bytes_received=0i,not_founds_served=0i,pings_served=0i,queries_served=0i,writes_served=0i 1480682800000000000 +internal_agent,host=tyrion,go_version=1.12.7,version=1.99.0 metrics_written=18i,metrics_dropped=0i,metrics_gathered=19i,gather_errors=0i 1480682800000000000 +internal_write,output=file,host=tyrion,version=1.99.0 buffer_limit=10000i,write_time_ns=636609i,metrics_added=18i,metrics_written=18i,buffer_size=0i 1480682800000000000 +internal_gather,input=internal,host=tyrion,version=1.99.0 metrics_gathered=19i,gather_time_ns=442114i 1480682800000000000 +internal_gather,input=http_listener,host=tyrion,version=1.99.0 metrics_gathered=0i,gather_time_ns=167285i 1480682800000000000 +internal_http_listener,address=:8186,host=tyrion,version=1.99.0 queries_received=0i,writes_received=0i,requests_received=0i,buffers_created=0i,requests_served=0i,pings_received=0i,bytes_received=0i,not_founds_served=0i,pings_served=0i,queries_served=0i,writes_served=0i 1480682800000000000 ``` diff --git a/plugins/inputs/internal/internal.go b/plugins/inputs/internal/internal.go index 8b5286f56..2eb8b91c9 100644 --- a/plugins/inputs/internal/internal.go +++ b/plugins/inputs/internal/internal.go @@ -2,8 +2,10 @@ package internal import ( "runtime" + "strings" "github.com/influxdata/telegraf" + inter "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/selfstat" ) @@ -54,7 +56,14 @@ func (s *Self) Gather(acc telegraf.Accumulator) error { acc.AddFields("internal_memstats", fields, map[string]string{}) } + telegrafVersion := inter.Version() + goVersion := strings.TrimPrefix(runtime.Version(), "go") + for _, m := range selfstat.Metrics() { + if m.Name() == "internal_agent" { + m.AddTag("go_version", goVersion) + } + m.AddTag("version", telegrafVersion) acc.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) } diff --git a/plugins/inputs/internal/internal_test.go b/plugins/inputs/internal/internal_test.go index b17c53038..4cdba9099 100644 --- a/plugins/inputs/internal/internal_test.go +++ b/plugins/inputs/internal/internal_test.go @@ -26,7 +26,8 @@ func TestSelfPlugin(t *testing.T) { "test": int64(3), }, map[string]string{ - "test": "foo", + "test": "foo", + "version": "", }, ) acc.ClearMetrics() @@ -39,7 +40,8 @@ func TestSelfPlugin(t *testing.T) { "test": int64(101), }, map[string]string{ - "test": "foo", + "test": "foo", + "version": "", }, ) acc.ClearMetrics() @@ -56,7 +58,8 @@ func TestSelfPlugin(t *testing.T) { "test_ns": int64(150), }, map[string]string{ - "test": "foo", + "test": "foo", + "version": "", }, ) } diff --git a/plugins/inputs/interrupts/README.md b/plugins/inputs/interrupts/README.md index eb1e3979d..5da647f47 100644 --- a/plugins/inputs/interrupts/README.md +++ b/plugins/inputs/interrupts/README.md @@ -3,33 +3,81 @@ The interrupts plugin gathers metrics about IRQs from `/proc/interrupts` and `/proc/softirqs`. ### Configuration -``` +```toml [[inputs.interrupts]] + ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is + ## stored as a field. + ## + ## The default is false for backwards compatibility, and will be changed to + ## true in a future version. It is recommended to set to true on new + ## deployments. + # cpu_as_tag = false + ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. # [inputs.interrupts.tagdrop] - # irq = [ "NET_RX", "TASKLET" ] + # irq = [ "NET_RX", "TASKLET" ] ``` -### Measurements -There are two measurements reported by this plugin. -- `interrupts` gathers metrics from the `/proc/interrupts` file -- `soft_interrupts` gathers metrics from the `/proc/softirqs` file +### Metrics -### Fields -- CPUx: the amount of interrupts for the IRQ handled by that CPU -- total: total amount of interrupts for all CPUs +There are two styles depending on the value of `cpu_as_tag`. -### Tags -- irq: the IRQ -- type: the type of interrupt -- device: the name of the device that is located at that IRQ +With `cpu_as_tag = false`: + +- interrupts + - tags: + - irq (IRQ name) + - type + - device (name of the device that is located at the IRQ) + - cpu + - fields: + - cpu (int, number of interrupts per cpu) + - total (int, total number of interrupts) + +- soft_interrupts + - tags: + - irq (IRQ name) + - type + - device (name of the device that is located at the IRQ) + - cpu + - fields: + - cpu (int, number of interrupts per cpu) + - total (int, total number of interrupts) + +With `cpu_as_tag = true`: + +- interrupts + - tags: + - irq (IRQ name) + - type + - device (name of the device that is located at the IRQ) + - cpu + - fields: + - count (int, number of interrupts) + +- soft_interrupts + - tags: + - irq (IRQ name) + - type + - device (name of the device that is located at the IRQ) + - cpu + - fields: + - count (int, number of interrupts) ### Example Output + +With `cpu_as_tag = false`: ``` -./telegraf --config ~/interrupts_config.conf --test -* Plugin: inputs.interrupts, Collection 1 -> interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,host=hostname CPU0=23i,total=23i 1489346531000000000 -> interrupts,irq=1,host=hostname,type=IO-APIC,device=1-edge\ i8042 CPU0=9i,total=9i 1489346531000000000 -> interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,host=hostname CPU0=1i,total=1i 1489346531000000000 -> soft_interrupts,irq=NET_RX,host=hostname CPU0=280879i,total=280879i 1489346531000000000 +interrupts,irq=0,type=IO-APIC,device=2-edge\ timer,cpu=cpu0 count=23i 1489346531000000000 +interrupts,irq=1,type=IO-APIC,device=1-edge\ i8042,cpu=cpu0 count=9i 1489346531000000000 +interrupts,irq=30,type=PCI-MSI,device=65537-edge\ virtio1-input.0,cpu=cpu1 count=1i 1489346531000000000 +soft_interrupts,irq=NET_RX,cpu=cpu0 count=280879i 1489346531000000000 +``` + +With `cpu_as_tag = true`: +``` +interrupts,cpu=cpu6,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 +interrupts,cpu=cpu7,irq=PIW,type=Posted-interrupt\ wakeup\ event count=0i 1543539773000000000 +soft_interrupts,cpu=cpu0,irq=HI count=246441i 1543539773000000000 +soft_interrupts,cpu=cpu1,irq=HI count=159154i 1543539773000000000 ``` diff --git a/plugins/inputs/interrupts/interrupts.go b/plugins/inputs/interrupts/interrupts.go index 75cbf3be1..39b3020dd 100644 --- a/plugins/inputs/interrupts/interrupts.go +++ b/plugins/inputs/interrupts/interrupts.go @@ -3,15 +3,18 @@ package interrupts import ( "bufio" "fmt" - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/inputs" "io" "os" "strconv" "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" ) -type Interrupts struct{} +type Interrupts struct { + CpuAsTag bool `toml:"cpu_as_tag"` +} type IRQ struct { ID string @@ -26,9 +29,17 @@ func NewIRQ(id string) *IRQ { } const sampleConfig = ` + ## When set to true, cpu metrics are tagged with the cpu. Otherwise cpu is + ## stored as a field. + ## + ## The default is false for backwards compatibility, and will be changed to + ## true in a future version. It is recommended to set to true on new + ## deployments. + # cpu_as_tag = false + ## To filter which IRQs to collect, make use of tagpass / tagdrop, i.e. # [inputs.interrupts.tagdrop] - # irq = [ "NET_RX", "TASKLET" ] + # irq = [ "NET_RX", "TASKLET" ] ` func (s *Interrupts) Description() string { @@ -50,6 +61,8 @@ func parseInterrupts(r io.Reader) ([]IRQ, error) { } cpucount = len(cpus) } + +scan: for scanner.Scan() { fields := strings.Fields(scanner.Text()) if !strings.HasSuffix(fields[0], ":") { @@ -57,12 +70,12 @@ func parseInterrupts(r io.Reader) ([]IRQ, error) { } irqid := strings.TrimRight(fields[0], ":") irq := NewIRQ(irqid) - irqvals := fields[1:len(fields)] + irqvals := fields[1:] for i := 0; i < cpucount; i++ { if i < len(irqvals) { irqval, err := strconv.ParseInt(irqvals[i], 10, 64) if err != nil { - return irqs, fmt.Errorf("Unable to parse %q from %q: %s", irqvals[i], scanner.Text(), err) + continue scan } irq.Cpus = append(irq.Cpus, irqval) } @@ -108,12 +121,26 @@ func (s *Interrupts) Gather(acc telegraf.Accumulator) error { acc.AddError(fmt.Errorf("Parsing %s: %s", file, err)) continue } - for _, irq := range irqs { - tags, fields := gatherTagsFields(irq) + reportMetrics(measurement, irqs, acc, s.CpuAsTag) + } + return nil +} + +func reportMetrics(measurement string, irqs []IRQ, acc telegraf.Accumulator, cpusAsTags bool) { + for _, irq := range irqs { + tags, fields := gatherTagsFields(irq) + if cpusAsTags { + for cpu, count := range irq.Cpus { + cpuTags := map[string]string{"cpu": fmt.Sprintf("cpu%d", cpu)} + for k, v := range tags { + cpuTags[k] = v + } + acc.AddFields(measurement, map[string]interface{}{"count": count}, cpuTags) + } + } else { acc.AddFields(measurement, fields, tags) } } - return nil } func init() { diff --git a/plugins/inputs/interrupts/interrupts_test.go b/plugins/inputs/interrupts/interrupts_test.go index 6c76c8504..63ff765b6 100644 --- a/plugins/inputs/interrupts/interrupts_test.go +++ b/plugins/inputs/interrupts/interrupts_test.go @@ -2,59 +2,155 @@ package interrupts import ( "bytes" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" + "fmt" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) -func TestParseInterrupts(t *testing.T) { - interruptStr := ` CPU0 CPU1 - 0: 134 0 IO-APIC-edge timer - 1: 7 3 IO-APIC-edge i8042 -NMI: 0 0 Non-maskable interrupts -LOC: 2338608687 2334309625 Local timer interrupts -MIS: 0 -NET_RX: 867028 225 -TASKLET: 205 0` - f := bytes.NewBufferString(interruptStr) - parsed := []IRQ{ - IRQ{ - ID: "0", Type: "IO-APIC-edge", Device: "timer", - Cpus: []int64{int64(134), int64(0)}, Total: int64(134), - }, - IRQ{ - ID: "1", Type: "IO-APIC-edge", Device: "i8042", - Cpus: []int64{int64(7), int64(3)}, Total: int64(10), - }, - IRQ{ - ID: "NMI", Type: "Non-maskable interrupts", - Cpus: []int64{int64(0), int64(0)}, Total: int64(0), - }, - IRQ{ - ID: "LOC", Type: "Local timer interrupts", - Cpus: []int64{int64(2338608687), int64(2334309625)}, - Total: int64(4672918312), - }, - IRQ{ - ID: "MIS", Cpus: []int64{int64(0)}, Total: int64(0), - }, - IRQ{ - ID: "NET_RX", Cpus: []int64{int64(867028), int64(225)}, - Total: int64(867253), - }, - IRQ{ - ID: "TASKLET", Cpus: []int64{int64(205), int64(0)}, - Total: int64(205), - }, - } - got, err := parseInterrupts(f) - require.Equal(t, nil, err) - require.NotEqual(t, 0, len(got)) - require.Equal(t, len(got), len(parsed)) - for i := 0; i < len(parsed); i++ { - assert.Equal(t, parsed[i], got[i]) - for k := 0; k < len(parsed[i].Cpus); k++ { - assert.Equal(t, parsed[i].Cpus[k], got[i].Cpus[k]) - } +// ===================================================================================== +// Setup and helper functions +// ===================================================================================== + +func expectCpuAsTags(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { + for idx, value := range irq.Cpus { + m.AssertContainsTaggedFields(t, measurement, map[string]interface{}{"count": value}, map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device, "cpu": fmt.Sprintf("cpu%d", idx)}) + } +} + +func expectCpuAsFields(m *testutil.Accumulator, t *testing.T, measurement string, irq IRQ) { + fields := map[string]interface{}{} + total := int64(0) + for idx, count := range irq.Cpus { + fields[fmt.Sprintf("CPU%d", idx)] = count + total += count + } + fields["total"] = total + + m.AssertContainsTaggedFields(t, measurement, fields, map[string]string{"irq": irq.ID, "type": irq.Type, "device": irq.Device}) +} + +func setup(t *testing.T, irqString string, cpuAsTags bool) (*testutil.Accumulator, []IRQ) { + f := bytes.NewBufferString(irqString) + irqs, err := parseInterrupts(f) + require.Equal(t, nil, err) + require.NotEqual(t, 0, len(irqs)) + + acc := new(testutil.Accumulator) + reportMetrics("soft_interrupts", irqs, acc, cpuAsTags) + + return acc, irqs +} + +// ===================================================================================== +// Soft interrupts +// ===================================================================================== + +const softIrqsString = ` CPU0 CPU1 + 0: 134 0 IO-APIC-edge timer + 1: 7 3 IO-APIC-edge i8042 + NMI: 0 0 Non-maskable interrupts + LOC: 2338608687 2334309625 Local timer interrupts + MIS: 0 + NET_RX: 867028 225 + TASKLET: 205 0` + +var softIrqsExpectedArgs = []IRQ{ + {ID: "0", Type: "IO-APIC-edge", Device: "timer", Cpus: []int64{134, 0}}, + {ID: "1", Type: "IO-APIC-edge", Device: "i8042", Cpus: []int64{7, 3}}, + {ID: "NMI", Type: "Non-maskable interrupts", Cpus: []int64{0, 0}}, + {ID: "MIS", Cpus: []int64{0}}, + {ID: "NET_RX", Cpus: []int64{867028, 225}}, + {ID: "TASKLET", Cpus: []int64{205, 0}}, +} + +func TestCpuAsTagsSoftIrqs(t *testing.T) { + acc, irqs := setup(t, softIrqsString, true) + reportMetrics("soft_interrupts", irqs, acc, true) + + for _, irq := range softIrqsExpectedArgs { + expectCpuAsTags(acc, t, "soft_interrupts", irq) + } +} + +func TestCpuAsFieldsSoftIrqs(t *testing.T) { + acc, irqs := setup(t, softIrqsString, false) + reportMetrics("soft_interrupts", irqs, acc, false) + + for _, irq := range softIrqsExpectedArgs { + expectCpuAsFields(acc, t, "soft_interrupts", irq) + } +} + +// ===================================================================================== +// HW interrupts, tests #4470 +// ===================================================================================== + +const hwIrqsString = ` CPU0 CPU1 CPU2 CPU3 + 16: 0 0 0 0 bcm2836-timer 0 Edge arch_timer + 17: 127224250 118424219 127224437 117885416 bcm2836-timer 1 Edge arch_timer + 21: 0 0 0 0 bcm2836-pmu 9 Edge arm-pmu + 23: 1549514 0 0 0 ARMCTRL-level 1 Edge 3f00b880.mailbox + 24: 2 0 0 0 ARMCTRL-level 2 Edge VCHIQ doorbell + 46: 0 0 0 0 ARMCTRL-level 48 Edge bcm2708_fb dma + 48: 0 0 0 0 ARMCTRL-level 50 Edge DMA IRQ + 50: 0 0 0 0 ARMCTRL-level 52 Edge DMA IRQ + 51: 208 0 0 0 ARMCTRL-level 53 Edge DMA IRQ + 54: 883002 0 0 0 ARMCTRL-level 56 Edge DMA IRQ + 59: 0 0 0 0 ARMCTRL-level 61 Edge bcm2835-auxirq + 62: 521451447 0 0 0 ARMCTRL-level 64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1 + 86: 857597 0 0 0 ARMCTRL-level 88 Edge mmc0 + 87: 4938 0 0 0 ARMCTRL-level 89 Edge uart-pl011 + 92: 5669 0 0 0 ARMCTRL-level 94 Edge mmc1 + FIQ: usb_fiq + IPI0: 0 0 0 0 CPU wakeup interrupts + IPI1: 0 0 0 0 Timer broadcast interrupts + IPI2: 23564958 23464876 23531165 23040826 Rescheduling interrupts + IPI3: 148438 639704 644266 588150 Function call interrupts + IPI4: 0 0 0 0 CPU stop interrupts + IPI5: 4348149 1843985 3819457 1822877 IRQ work interrupts + IPI6: 0 0 0 0 completion interrupts` + +var hwIrqsExpectedArgs = []IRQ{ + {ID: "16", Type: "bcm2836-timer", Device: "0 Edge arch_timer", Cpus: []int64{0, 0, 0, 0}}, + {ID: "17", Type: "bcm2836-timer", Device: "1 Edge arch_timer", Cpus: []int64{127224250, 118424219, 127224437, 117885416}}, + {ID: "21", Type: "bcm2836-pmu", Device: "9 Edge arm-pmu", Cpus: []int64{0, 0, 0, 0}}, + {ID: "23", Type: "ARMCTRL-level", Device: "1 Edge 3f00b880.mailbox", Cpus: []int64{1549514, 0, 0, 0}}, + {ID: "24", Type: "ARMCTRL-level", Device: "2 Edge VCHIQ doorbell", Cpus: []int64{2, 0, 0, 0}}, + {ID: "46", Type: "ARMCTRL-level", Device: "48 Edge bcm2708_fb dma", Cpus: []int64{0, 0, 0, 0}}, + {ID: "48", Type: "ARMCTRL-level", Device: "50 Edge DMA IRQ", Cpus: []int64{0, 0, 0, 0}}, + {ID: "50", Type: "ARMCTRL-level", Device: "52 Edge DMA IRQ", Cpus: []int64{0, 0, 0, 0}}, + {ID: "51", Type: "ARMCTRL-level", Device: "53 Edge DMA IRQ", Cpus: []int64{208, 0, 0, 0}}, + {ID: "54", Type: "ARMCTRL-level", Device: "56 Edge DMA IRQ", Cpus: []int64{883002, 0, 0, 0}}, + {ID: "59", Type: "ARMCTRL-level", Device: "61 Edge bcm2835-auxirq", Cpus: []int64{0, 0, 0, 0}}, + {ID: "62", Type: "ARMCTRL-level", Device: "64 Edge dwc_otg, dwc_otg_pcd, dwc_otg_hcd:usb1", Cpus: []int64{521451447, 0, 0, 0}}, + {ID: "86", Type: "ARMCTRL-level", Device: "88 Edge mmc0", Cpus: []int64{857597, 0, 0, 0}}, + {ID: "87", Type: "ARMCTRL-level", Device: "89 Edge uart-pl011", Cpus: []int64{4938, 0, 0, 0}}, + {ID: "92", Type: "ARMCTRL-level", Device: "94 Edge mmc1", Cpus: []int64{5669, 0, 0, 0}}, + {ID: "IPI0", Type: "CPU wakeup interrupts", Cpus: []int64{0, 0, 0, 0}}, + {ID: "IPI1", Type: "Timer broadcast interrupts", Cpus: []int64{0, 0, 0, 0}}, + {ID: "IPI2", Type: "Rescheduling interrupts", Cpus: []int64{23564958, 23464876, 23531165, 23040826}}, + {ID: "IPI3", Type: "Function call interrupts", Cpus: []int64{148438, 639704, 644266, 588150}}, + {ID: "IPI4", Type: "CPU stop interrupts", Cpus: []int64{0, 0, 0, 0}}, + {ID: "IPI5", Type: "IRQ work interrupts", Cpus: []int64{4348149, 1843985, 3819457, 1822877}}, + {ID: "IPI6", Type: "completion interrupts", Cpus: []int64{0, 0, 0, 0}}, +} + +func TestCpuAsTagsHwIrqs(t *testing.T) { + acc, irqs := setup(t, hwIrqsString, true) + reportMetrics("interrupts", irqs, acc, true) + + for _, irq := range hwIrqsExpectedArgs { + expectCpuAsTags(acc, t, "interrupts", irq) + } +} + +func TestCpuAsFieldsHwIrqs(t *testing.T) { + acc, irqs := setup(t, hwIrqsString, false) + reportMetrics("interrupts", irqs, acc, false) + + for _, irq := range hwIrqsExpectedArgs { + expectCpuAsFields(acc, t, "interrupts", irq) } } diff --git a/plugins/inputs/ipmi_sensor/README.md b/plugins/inputs/ipmi_sensor/README.md index 74cfe3bc5..2fd7cc707 100644 --- a/plugins/inputs/ipmi_sensor/README.md +++ b/plugins/inputs/ipmi_sensor/README.md @@ -8,6 +8,10 @@ If no servers are specified, the plugin will query the local machine sensor stat ``` ipmitool sdr ``` +or with the version 2 schema: +``` +ipmitool sdr elist +``` When one or more servers are specified, the plugin will use the following command to collect remote host sensor stats: @@ -23,6 +27,11 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ## optionally specify the path to the ipmitool executable # path = "/usr/bin/ipmitool" ## + ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. + ## Sudo must be configured to allow the telegraf user to run ipmitool + ## without a password. + # use_sudo = false + ## ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR # privilege = "ADMINISTRATOR" ## @@ -35,25 +44,42 @@ ipmitool -I lan -H SERVER -U USERID -P PASSW0RD sdr ## # servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] - ## Recomended: use metric 'interval' that is a multiple of 'timeout' to avoid + ## Recommended: use metric 'interval' that is a multiple of 'timeout' to avoid ## gaps or overlap in pulled data interval = "30s" ## Timeout for the ipmitool command to complete. Default is 20 seconds. timeout = "20s" + + ## Schema Version: (Optional, defaults to version 1) + metric_version = 2 ``` ### Measurements +Version 1 schema: - ipmi_sensor: - tags: - name - unit + - host - server (only when retrieving stats from remote servers) - fields: - - status (int) + - status (int, 1=ok status_code/0=anything else) - value (float) +Version 2 schema: +- ipmi_sensor: + - tags: + - name + - entity_id (can help uniquify duplicate names) + - status_code (two letter code from IPMI documentation) + - status_desc (extended status description field) + - unit (only on analog values) + - host + - server (only when retrieving stats from remote) + - fields: + - value (float) #### Permissions @@ -65,27 +91,54 @@ ipmi device node. When using udev you can create the device node giving ``` KERNEL=="ipmi*", MODE="660", GROUP="telegraf" ``` +Alternatively, it is possible to use sudo. You will need the following in your telegraf config: +```toml +[[inputs.ipmi_sensor]] + use_sudo = true +``` + +You will also need to update your sudoers file: + +```bash +$ visudo +# Add the following line: +Cmnd_Alias IPMITOOL = /usr/bin/ipmitool * +telegraf ALL=(root) NOPASSWD: IPMITOOL +Defaults!IPMITOOL !logfile, !syslog, !pam_session +``` ### Example Output +#### Version 1 Schema When retrieving stats from a remote server: ``` -ipmi_sensor,server=10.20.2.203,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 -ipmi_sensor,server=10.20.2.203,unit=feet,name=altitude status=1i,value=80 1458488465012688613 -ipmi_sensor,server=10.20.2.203,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 -ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 -ipmi_sensor,server=10.20.2.203,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 -ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 -ipmi_sensor,server=10.20.2.203,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 +ipmi_sensor,server=10.20.2.203,name=uid_light value=0,status=1i 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=sys._health_led status=1i,value=0 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=power_supply_2,unit=watts status=1i,value=120 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=power_supplies value=0,status=1i 1517125513000000000 +ipmi_sensor,server=10.20.2.203,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 ``` + When retrieving stats from the local machine (no server specified): ``` -ipmi_sensor,unit=degrees_c,name=ambient_temp status=1i,value=20 1458488465012559455 -ipmi_sensor,unit=feet,name=altitude status=1i,value=80 1458488465012688613 -ipmi_sensor,unit=watts,name=avg_power status=1i,value=220 1458488465012776511 -ipmi_sensor,unit=volts,name=planar_3.3v status=1i,value=3.28 1458488465012861875 -ipmi_sensor,unit=volts,name=planar_vbat status=1i,value=3.04 1458488465013072508 -ipmi_sensor,unit=rpm,name=fan_1a_tach status=1i,value=2610 1458488465013137932 -ipmi_sensor,unit=rpm,name=fan_1b_tach status=1i,value=1775 1458488465013279896 +ipmi_sensor,name=uid_light value=0,status=1i 1517125513000000000 +ipmi_sensor,name=sys._health_led status=1i,value=0 1517125513000000000 +ipmi_sensor,name=power_supply_1,unit=watts status=1i,value=110 1517125513000000000 +ipmi_sensor,name=power_supply_2,unit=watts status=1i,value=120 1517125513000000000 +ipmi_sensor,name=power_supplies value=0,status=1i 1517125513000000000 +ipmi_sensor,name=fan_1,unit=percent status=1i,value=43.12 1517125513000000000 +``` + +#### Version 2 Schema + +When retrieving stats from the local machine (no server specified): +``` +ipmi_sensor,name=uid_light,entity_id=23.1,status_code=ok,status_desc=ok value=0 1517125474000000000 +ipmi_sensor,name=sys._health_led,entity_id=23.2,status_code=ok,status_desc=ok value=0 1517125474000000000 +ipmi_sensor,entity_id=10.1,name=power_supply_1,status_code=ok,status_desc=presence_detected,unit=watts value=110 1517125474000000000 +ipmi_sensor,name=power_supply_2,entity_id=10.2,status_code=ok,unit=watts,status_desc=presence_detected value=125 1517125474000000000 +ipmi_sensor,name=power_supplies,entity_id=10.3,status_code=ok,status_desc=fully_redundant value=0 1517125474000000000 +ipmi_sensor,entity_id=7.1,name=fan_1,status_code=ok,status_desc=transition_to_running,unit=percent value=43.12 1517125474000000000 ``` diff --git a/plugins/inputs/ipmi_sensor/connection.go b/plugins/inputs/ipmi_sensor/connection.go index 87922b984..8ce5e3448 100644 --- a/plugins/inputs/ipmi_sensor/connection.go +++ b/plugins/inputs/ipmi_sensor/connection.go @@ -28,7 +28,7 @@ func NewConnection(server string, privilege string) *Connection { if inx1 > 0 { security := server[0:inx1] - connstr = server[inx1+1 : len(server)] + connstr = server[inx1+1:] up := strings.SplitN(security, ":", 2) conn.Username = up[0] conn.Password = up[1] diff --git a/plugins/inputs/ipmi_sensor/ipmi.go b/plugins/inputs/ipmi_sensor/ipmi.go index ee99b0a3d..fb53e1bc7 100644 --- a/plugins/inputs/ipmi_sensor/ipmi.go +++ b/plugins/inputs/ipmi_sensor/ipmi.go @@ -1,8 +1,12 @@ package ipmi_sensor import ( + "bufio" + "bytes" "fmt" + "log" "os/exec" + "regexp" "strconv" "strings" "sync" @@ -14,20 +18,32 @@ import ( ) var ( - execCommand = exec.Command // execCommand is used to mock commands in tests. + execCommand = exec.Command // execCommand is used to mock commands in tests. + re_v1_parse_line = regexp.MustCompile(`^(?P[^|]*)\|(?P[^|]*)\|(?P.*)`) + re_v2_parse_line = regexp.MustCompile(`^(?P[^|]*)\|[^|]+\|(?P[^|]*)\|(?P[^|]*)\|(?:(?P[^|]+))?`) + re_v2_parse_description = regexp.MustCompile(`^(?P-?[0-9.]+)\s(?P.*)|(?P.+)|^$`) + re_v2_parse_unit = regexp.MustCompile(`^(?P[^,]+)(?:,\s*(?P.*))?`) ) +// Ipmi stores the configuration values for the ipmi_sensor input plugin type Ipmi struct { - Path string - Privilege string - Servers []string - Timeout internal.Duration + Path string + Privilege string + Servers []string + Timeout internal.Duration + MetricVersion int + UseSudo bool } var sampleConfig = ` ## optionally specify the path to the ipmitool executable # path = "/usr/bin/ipmitool" ## + ## Setting 'use_sudo' to true will make use of sudo to run ipmitool. + ## Sudo must be configured to allow the telegraf user to run ipmitool + ## without a password. + # use_sudo = false + ## ## optionally force session privilege level. Can be CALLBACK, USER, OPERATOR, ADMINISTRATOR # privilege = "ADMINISTRATOR" ## @@ -46,16 +62,22 @@ var sampleConfig = ` ## Timeout for the ipmitool command to complete timeout = "20s" + + ## Schema Version: (Optional, defaults to version 1) + metric_version = 2 ` +// SampleConfig returns the documentation about the sample configuration func (m *Ipmi) SampleConfig() string { return sampleConfig } +// Description returns a basic description for the plugin functions func (m *Ipmi) Description() string { return "Read metrics from the bare metal servers via IPMI" } +// Gather is the main execution function for the plugin func (m *Ipmi) Gather(acc telegraf.Accumulator) error { if len(m.Path) == 0 { return fmt.Errorf("ipmitool not found: verify that ipmitool is installed and that ipmitool is in your PATH") @@ -93,23 +115,39 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { opts = conn.options() } opts = append(opts, "sdr") - cmd := execCommand(m.Path, opts...) + if m.MetricVersion == 2 { + opts = append(opts, "elist") + } + name := m.Path + if m.UseSudo { + // -n - avoid prompting the user for input of any kind + opts = append([]string{"-n", name}, opts...) + name = "sudo" + } + cmd := execCommand(name, opts...) out, err := internal.CombinedOutputTimeout(cmd, m.Timeout.Duration) + timestamp := time.Now() if err != nil { return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } + if m.MetricVersion == 2 { + return parseV2(acc, hostname, out, timestamp) + } + return parseV1(acc, hostname, out, timestamp) +} +func parseV1(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { // each line will look something like // Planar VBAT | 3.05 Volts | ok - lines := strings.Split(string(out), "\n") - for i := 0; i < len(lines); i++ { - vals := strings.Split(lines[i], "|") - if len(vals) != 3 { + scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) + for scanner.Scan() { + ipmiFields := extractFieldsFromRegex(re_v1_parse_line, scanner.Text()) + if len(ipmiFields) != 3 { continue } tags := map[string]string{ - "name": transform(vals[0]), + "name": transform(ipmiFields["name"]), } // tag the server is we have one @@ -118,18 +156,30 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { } fields := make(map[string]interface{}) - if strings.EqualFold("ok", trim(vals[2])) { + if strings.EqualFold("ok", trim(ipmiFields["status_code"])) { fields["status"] = 1 } else { fields["status"] = 0 } - val1 := trim(vals[1]) + description := ipmiFields["description"] - if strings.Index(val1, " ") > 0 { + // handle hex description field + if strings.HasPrefix(description, "0x") { + descriptionInt, err := strconv.ParseInt(description, 0, 64) + if err != nil { + continue + } + + fields["value"] = float64(descriptionInt) + } else if strings.Index(description, " ") > 0 { // split middle column into value and unit - valunit := strings.SplitN(val1, " ", 2) - fields["value"] = Atofloat(valunit[0]) + valunit := strings.SplitN(description, " ", 2) + var err error + fields["value"], err = aToFloat(valunit[0]) + if err != nil { + continue + } if len(valunit) > 1 { tags["unit"] = transform(valunit[1]) } @@ -137,19 +187,90 @@ func (m *Ipmi) parse(acc telegraf.Accumulator, server string) error { fields["value"] = 0.0 } - acc.AddFields("ipmi_sensor", fields, tags, time.Now()) + acc.AddFields("ipmi_sensor", fields, tags, measured_at) } - return nil + return scanner.Err() } -func Atofloat(val string) float64 { +func parseV2(acc telegraf.Accumulator, hostname string, cmdOut []byte, measured_at time.Time) error { + // each line will look something like + // CMOS Battery | 65h | ok | 7.1 | + // Temp | 0Eh | ok | 3.1 | 55 degrees C + // Drive 0 | A0h | ok | 7.1 | Drive Present + scanner := bufio.NewScanner(bytes.NewReader(cmdOut)) + for scanner.Scan() { + ipmiFields := extractFieldsFromRegex(re_v2_parse_line, scanner.Text()) + if len(ipmiFields) < 3 || len(ipmiFields) > 4 { + continue + } + + tags := map[string]string{ + "name": transform(ipmiFields["name"]), + } + + // tag the server is we have one + if hostname != "" { + tags["server"] = hostname + } + tags["entity_id"] = transform(ipmiFields["entity_id"]) + tags["status_code"] = trim(ipmiFields["status_code"]) + fields := make(map[string]interface{}) + descriptionResults := extractFieldsFromRegex(re_v2_parse_description, trim(ipmiFields["description"])) + // This is an analog value with a unit + if descriptionResults["analogValue"] != "" && len(descriptionResults["analogUnit"]) >= 1 { + var err error + fields["value"], err = aToFloat(descriptionResults["analogValue"]) + if err != nil { + continue + } + // Some implementations add an extra status to their analog units + unitResults := extractFieldsFromRegex(re_v2_parse_unit, descriptionResults["analogUnit"]) + tags["unit"] = transform(unitResults["realAnalogUnit"]) + if unitResults["statusDesc"] != "" { + tags["status_desc"] = transform(unitResults["statusDesc"]) + } + } else { + // This is a status value + fields["value"] = 0.0 + // Extended status descriptions aren't required, in which case for consistency re-use the status code + if descriptionResults["status"] != "" { + tags["status_desc"] = transform(descriptionResults["status"]) + } else { + tags["status_desc"] = transform(ipmiFields["status_code"]) + } + } + + acc.AddFields("ipmi_sensor", fields, tags, measured_at) + } + + return scanner.Err() +} + +// extractFieldsFromRegex consumes a regex with named capture groups and returns a kvp map of strings with the results +func extractFieldsFromRegex(re *regexp.Regexp, input string) map[string]string { + submatches := re.FindStringSubmatch(input) + results := make(map[string]string) + subexpNames := re.SubexpNames() + if len(subexpNames) > len(submatches) { + log.Printf("D! No matches found in '%s'", input) + return results + } + for i, name := range subexpNames { + if name != input && name != "" && input != "" { + results[name] = trim(submatches[i]) + } + } + return results +} + +// aToFloat converts string representations of numbers to float64 values +func aToFloat(val string) (float64, error) { f, err := strconv.ParseFloat(val, 64) if err != nil { - return 0.0 - } else { - return f + return 0.0, err } + return f, nil } func trim(s string) string { diff --git a/plugins/inputs/ipmi_sensor/ipmi_test.go b/plugins/inputs/ipmi_sensor/ipmi_test.go index 3d45f2fa8..bd5e02c19 100644 --- a/plugins/inputs/ipmi_sensor/ipmi_test.go +++ b/plugins/inputs/ipmi_sensor/ipmi_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" @@ -28,7 +29,7 @@ func TestGather(t *testing.T) { require.NoError(t, err) - assert.Equal(t, acc.NFields(), 266, "non-numeric measurements should be ignored") + assert.Equal(t, acc.NFields(), 262, "non-numeric measurements should be ignored") conn := NewConnection(i.Servers[0], i.Privilege) assert.Equal(t, "USERID", conn.Username) @@ -127,6 +128,7 @@ func TestGather(t *testing.T) { } err = acc.GatherError(i.Gather) + require.NoError(t, err) var testsWithoutServer = []struct { fields map[string]interface{} @@ -378,3 +380,371 @@ OS RealTime Mod | 0x00 | ok } os.Exit(0) } + +func TestGatherV2(t *testing.T) { + i := &Ipmi{ + Servers: []string{"USERID:PASSW0RD@lan(192.168.1.1)"}, + Path: "ipmitool", + Privilege: "USER", + Timeout: internal.Duration{Duration: time.Second * 5}, + MetricVersion: 2, + } + // overwriting exec commands with mock commands + execCommand = fakeExecCommandV2 + var acc testutil.Accumulator + + err := acc.GatherError(i.Gather) + + require.NoError(t, err) + + conn := NewConnection(i.Servers[0], i.Privilege) + assert.Equal(t, "USERID", conn.Username) + assert.Equal(t, "lan", conn.Interface) + + var testsWithServer = []struct { + fields map[string]interface{} + tags map[string]string + }{ + //SEL | 72h | ns | 7.1 | No Reading + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "sel", + "entity_id": "7.1", + "status_code": "ns", + "status_desc": "no_reading", + "server": "192.168.1.1", + }, + }, + } + + for _, test := range testsWithServer { + acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags) + } + + i = &Ipmi{ + Path: "ipmitool", + Timeout: internal.Duration{Duration: time.Second * 5}, + MetricVersion: 2, + } + + err = acc.GatherError(i.Gather) + require.NoError(t, err) + + var testsWithoutServer = []struct { + fields map[string]interface{} + tags map[string]string + }{ + //SEL | 72h | ns | 7.1 | No Reading + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "sel", + "entity_id": "7.1", + "status_code": "ns", + "status_desc": "no_reading", + }, + }, + //Intrusion | 73h | ok | 7.1 | + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "intrusion", + "entity_id": "7.1", + "status_code": "ok", + "status_desc": "ok", + }, + }, + //Fan1 | 30h | ok | 7.1 | 5040 RPM + { + map[string]interface{}{ + "value": float64(5040), + }, + map[string]string{ + "name": "fan1", + "entity_id": "7.1", + "status_code": "ok", + "unit": "rpm", + }, + }, + //Inlet Temp | 04h | ok | 7.1 | 25 degrees C + { + map[string]interface{}{ + "value": float64(25), + }, + map[string]string{ + "name": "inlet_temp", + "entity_id": "7.1", + "status_code": "ok", + "unit": "degrees_c", + }, + }, + //USB Cable Pres | 50h | ok | 7.1 | Connected + { + map[string]interface{}{ + "value": float64(0), + }, + map[string]string{ + "name": "usb_cable_pres", + "entity_id": "7.1", + "status_code": "ok", + "status_desc": "connected", + }, + }, + //Current 1 | 6Ah | ok | 10.1 | 7.20 Amps + { + map[string]interface{}{ + "value": float64(7.2), + }, + map[string]string{ + "name": "current_1", + "entity_id": "10.1", + "status_code": "ok", + "unit": "amps", + }, + }, + //Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected + { + map[string]interface{}{ + "value": float64(110), + }, + map[string]string{ + "name": "power_supply_1", + "entity_id": "10.1", + "status_code": "ok", + "unit": "watts", + "status_desc": "presence_detected", + }, + }, + } + + for _, test := range testsWithoutServer { + acc.AssertContainsTaggedFields(t, "ipmi_sensor", test.fields, test.tags) + } +} + +// fackeExecCommandV2 is a helper function that mock +// the exec.Command call (and call the test binary) +func fakeExecCommandV2(command string, args ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcessV2", "--", command} + cs = append(cs, args...) + cmd := exec.Command(os.Args[0], cs...) + cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} + return cmd +} + +// TestHelperProcessV2 isn't a real test. It's used to mock exec.Command +// For example, if you run: +// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcessV2 -- chrony tracking +// it returns below mockData. +func TestHelperProcessV2(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { + return + } + + // Curated list of use cases instead of full dumps + mockData := `SEL | 72h | ns | 7.1 | No Reading +Intrusion | 73h | ok | 7.1 | +Fan1 | 30h | ok | 7.1 | 5040 RPM +Inlet Temp | 04h | ok | 7.1 | 25 degrees C +USB Cable Pres | 50h | ok | 7.1 | Connected +Current 1 | 6Ah | ok | 10.1 | 7.20 Amps +Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected +` + + args := os.Args + + // Previous arguments are tests stuff, that looks like : + // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- + cmd, args := args[3], args[4:] + + if cmd == "ipmitool" { + fmt.Fprint(os.Stdout, mockData) + } else { + fmt.Fprint(os.Stdout, "command not found") + os.Exit(1) + + } + os.Exit(0) +} + +func TestExtractFields(t *testing.T) { + v1Data := `Ambient Temp | 20 degrees C | ok +Altitude | 80 feet | ok +Avg Power | 210 Watts | ok +Planar 3.3V | 3.29 Volts | ok +Planar 5V | 4.90 Volts | ok +Planar 12V | 12.04 Volts | ok +B | 0x00 | ok +Unable to send command: Invalid argument +ECC Corr Err | Not Readable | ns +Unable to send command: Invalid argument +ECC Uncorr Err | Not Readable | ns +Unable to send command: Invalid argument +` + + v2Data := `SEL | 72h | ns | 7.1 | No Reading +Intrusion | 73h | ok | 7.1 | +Fan1 | 30h | ok | 7.1 | 5040 RPM +Inlet Temp | 04h | ok | 7.1 | 25 degrees C +USB Cable Pres | 50h | ok | 7.1 | Connected +Unable to send command: Invalid argument +Current 1 | 6Ah | ok | 10.1 | 7.20 Amps +Unable to send command: Invalid argument +Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected +` + + tests := []string{ + v1Data, + v2Data, + } + + for i := range tests { + t.Logf("Checking v%d data...", i+1) + extractFieldsFromRegex(re_v1_parse_line, tests[i]) + extractFieldsFromRegex(re_v2_parse_line, tests[i]) + } +} + +func Test_parseV1(t *testing.T) { + type args struct { + hostname string + cmdOut []byte + measuredAt time.Time + } + tests := []struct { + name string + args args + wantFields map[string]interface{} + wantErr bool + }{ + { + name: "Test correct V1 parsing with hex code", + args: args{ + hostname: "host", + measuredAt: time.Now(), + cmdOut: []byte("PS1 Status | 0x02 | ok"), + }, + wantFields: map[string]interface{}{"value": float64(2), "status": 1}, + wantErr: false, + }, + { + name: "Test correct V1 parsing with value with unit", + args: args{ + hostname: "host", + measuredAt: time.Now(), + cmdOut: []byte("Avg Power | 210 Watts | ok"), + }, + wantFields: map[string]interface{}{"value": float64(210), "status": 1}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + + if err := parseV1(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + t.Errorf("parseV1() error = %v, wantErr %v", err, tt.wantErr) + } + + acc.AssertContainsFields(t, "ipmi_sensor", tt.wantFields) + }) + } +} + +func Test_parseV2(t *testing.T) { + type args struct { + hostname string + cmdOut []byte + measuredAt time.Time + } + tests := []struct { + name string + args args + expected []telegraf.Metric + wantErr bool + }{ + { + name: "Test correct V2 parsing with analog value with unit", + args: args{ + hostname: "host", + cmdOut: []byte("Power Supply 1 | 03h | ok | 10.1 | 110 Watts, Presence detected"), + measuredAt: time.Now(), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("ipmi_sensor", + map[string]string{ + "name": "power_supply_1", + "status_code": "ok", + "server": "host", + "entity_id": "10.1", + "unit": "watts", + "status_desc": "presence_detected", + }, + map[string]interface{}{"value": 110.0}, + time.Unix(0, 0), + ), + }, + wantErr: false, + }, + { + name: "Test correct V2 parsing without analog value", + args: args{ + hostname: "host", + cmdOut: []byte("Intrusion | 73h | ok | 7.1 |"), + measuredAt: time.Now(), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("ipmi_sensor", + map[string]string{ + "name": "intrusion", + "status_code": "ok", + "server": "host", + "entity_id": "7.1", + "status_desc": "ok", + }, + map[string]interface{}{"value": 0.0}, + time.Unix(0, 0), + ), + }, + wantErr: false, + }, + { + name: "parse negative value", + args: args{ + hostname: "host", + cmdOut: []byte("DIMM Thrm Mrgn 1 | B0h | ok | 8.1 | -55 degrees C"), + measuredAt: time.Now(), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("ipmi_sensor", + map[string]string{ + "name": "dimm_thrm_mrgn_1", + "status_code": "ok", + "server": "host", + "entity_id": "8.1", + "unit": "degrees_c", + }, + map[string]interface{}{"value": -55.0}, + time.Unix(0, 0), + ), + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + if err := parseV2(&acc, tt.args.hostname, tt.args.cmdOut, tt.args.measuredAt); (err != nil) != tt.wantErr { + t.Errorf("parseV2() error = %v, wantErr %v", err, tt.wantErr) + } + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/inputs/ipset/README.md b/plugins/inputs/ipset/README.md index 2209de911..ae66ccfc0 100644 --- a/plugins/inputs/ipset/README.md +++ b/plugins/inputs/ipset/README.md @@ -25,10 +25,19 @@ AmbientCapabilities=CAP_NET_RAW CAP_NET_ADMIN ### Using sudo -You may edit your sudo configuration with the following: +You will need the following in your telegraf config: +```toml +[[inputs.ipset]] + use_sudo = true +``` -```sudo -telegraf ALL=(root) NOPASSWD: /sbin/ipset save +You will also need to update your sudoers file: +```bash +$ visudo +# Add the following line: +Cmnd_Alias IPSETSAVE = /sbin/ipset save +telegraf ALL=(root) NOPASSWD: IPSETSAVE +Defaults!IPSETSAVE !logfile, !syslog, !pam_session ``` ### Configuration diff --git a/plugins/inputs/ipset/ipset_test.go b/plugins/inputs/ipset/ipset_test.go index 9438c806d..31a9f3cfc 100644 --- a/plugins/inputs/ipset/ipset_test.go +++ b/plugins/inputs/ipset/ipset_test.go @@ -50,8 +50,8 @@ func TestIpset(t *testing.T) { add myset 3.4.5.6 packets 3 bytes 222 `, tags: []map[string]string{ - map[string]string{"set": "myset", "rule": "1.2.3.4"}, - map[string]string{"set": "myset", "rule": "3.4.5.6"}, + {"set": "myset", "rule": "1.2.3.4"}, + {"set": "myset", "rule": "3.4.5.6"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"packets_total": uint64(1328), "bytes_total": uint64(79680)}}, @@ -66,8 +66,8 @@ func TestIpset(t *testing.T) { add myset 3.4.5.6 packets 3 bytes 222 "3rd IP" `, tags: []map[string]string{ - map[string]string{"set": "myset", "rule": "1.2.3.4"}, - map[string]string{"set": "myset", "rule": "3.4.5.6"}, + {"set": "myset", "rule": "1.2.3.4"}, + {"set": "myset", "rule": "3.4.5.6"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"packets_total": uint64(1328), "bytes_total": uint64(79680)}}, diff --git a/plugins/inputs/iptables/README.md b/plugins/inputs/iptables/README.md index 527723f09..6b56febba 100644 --- a/plugins/inputs/iptables/README.md +++ b/plugins/inputs/iptables/README.md @@ -28,10 +28,20 @@ Since telegraf will fork a process to run iptables, `AmbientCapabilities` is req ### Using sudo -You may edit your sudo configuration with the following: +You will need the following in your telegraf config: +```toml +[[inputs.iptables]] + use_sudo = true +``` -```sudo -telegraf ALL=(root) NOPASSWD: /usr/bin/iptables -nvL * +You will also need to update your sudoers file: + +```bash +$ visudo +# Add the following line: +Cmnd_Alias IPTABLESSHOW = /usr/bin/iptables -nvL * +telegraf ALL=(root) NOPASSWD: IPTABLESSHOW +Defaults!IPTABLESSHOW !logfile, !syslog, !pam_session ``` ### Using IPtables lock feature @@ -45,6 +55,8 @@ Defining multiple instances of this plugin in telegraf.conf can lead to concurre use_sudo = false # run iptables with the lock option use_lock = false + # Define an alternate executable, such as "ip6tables". Default is "iptables". + # binary = "ip6tables" # defines the table to monitor: table = "filter" # defines the chains to monitor: diff --git a/plugins/inputs/iptables/iptables.go b/plugins/inputs/iptables/iptables.go index 01041fcc1..e56f8b31d 100644 --- a/plugins/inputs/iptables/iptables.go +++ b/plugins/inputs/iptables/iptables.go @@ -17,6 +17,7 @@ import ( type Iptables struct { UseSudo bool UseLock bool + Binary string Table string Chains []string lister chainLister @@ -36,8 +37,10 @@ func (ipt *Iptables) SampleConfig() string { ## iptables can be restricted to only list command "iptables -nvL". use_sudo = false ## Setting 'use_lock' to true runs iptables with the "-w" option. - ## Adjust your sudo settings appropriately if using this option ("iptables -wnvl") + ## Adjust your sudo settings appropriately if using this option ("iptables -w 5 -nvl") use_lock = false + ## Define an alternate executable, such as "ip6tables". Default is "iptables". + # binary = "ip6tables" ## defines the table to monitor: table = "filter" ## defines the chains to monitor. @@ -70,7 +73,13 @@ func (ipt *Iptables) Gather(acc telegraf.Accumulator) error { } func (ipt *Iptables) chainList(table, chain string) (string, error) { - iptablePath, err := exec.LookPath("iptables") + var binary string + if ipt.Binary != "" { + binary = ipt.Binary + } else { + binary = "iptables" + } + iptablePath, err := exec.LookPath(binary) if err != nil { return "", err } @@ -80,11 +89,10 @@ func (ipt *Iptables) chainList(table, chain string) (string, error) { name = "sudo" args = append(args, iptablePath) } - iptablesBaseArgs := "-nvL" if ipt.UseLock { - iptablesBaseArgs = "-wnvL" + args = append(args, "-w", "5") } - args = append(args, iptablesBaseArgs, chain, "-t", table, "-x") + args = append(args, "-nvL", chain, "-t", table, "-x") c := exec.Command(name, args...) out, err := c.Output() return string(out), err @@ -94,8 +102,8 @@ const measurement = "iptables" var errParse = errors.New("Cannot parse iptables list information") var chainNameRe = regexp.MustCompile(`^Chain\s+(\S+)`) -var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+`) -var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+.*?/\*\s*(.+?)\s*\*/\s*`) +var fieldsHeaderRe = regexp.MustCompile(`^\s*pkts\s+bytes\s+target`) +var valuesRe = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+(\w+).*?/\*\s*(.+?)\s*\*/\s*`) func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error { lines := strings.Split(data, "\n") @@ -111,15 +119,16 @@ func (ipt *Iptables) parseAndGather(data string, acc telegraf.Accumulator) error } for _, line := range lines[2:] { matches := valuesRe.FindStringSubmatch(line) - if len(matches) != 4 { + if len(matches) != 5 { continue } pkts := matches[1] bytes := matches[2] - comment := matches[3] + target := matches[3] + comment := matches[4] - tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "ruleid": comment} + tags := map[string]string{"table": ipt.Table, "chain": mchain[1], "target": target, "ruleid": comment} fields := make(map[string]interface{}) var err error diff --git a/plugins/inputs/iptables/iptables_test.go b/plugins/inputs/iptables/iptables_test.go index a98c24190..681d8bbfc 100644 --- a/plugins/inputs/iptables/iptables_test.go +++ b/plugins/inputs/iptables/iptables_test.go @@ -42,7 +42,7 @@ func TestIptables_Gather(t *testing.T) { pkts bytes target prot opt in out source destination 57 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 /* foobar */ `}, - tags: []map[string]string{map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}}, + tags: []map[string]string{{"table": "filter", "chain": "INPUT", "target": "RETURN", "ruleid": "foobar"}}, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}}, }, @@ -98,9 +98,9 @@ func TestIptables_Gather(t *testing.T) { `, }, tags: []map[string]string{ - map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foo"}, - map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "bar"}, - map[string]string{"table": "filter", "chain": "FORWARD", "ruleid": "foobar"}, + {"table": "filter", "chain": "INPUT", "target": "RETURN", "ruleid": "foo"}, + {"table": "filter", "chain": "FORWARD", "target": "RETURN", "ruleid": "bar"}, + {"table": "filter", "chain": "FORWARD", "target": "RETURN", "ruleid": "foobar"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(200), "bytes": uint64(4520)}}, @@ -118,7 +118,7 @@ func TestIptables_Gather(t *testing.T) { 100 4520 RETURN tcp -- * * 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 `}, tags: []map[string]string{ - map[string]string{"table": "filter", "chain": "INPUT", "ruleid": "foobar"}, + {"table": "filter", "chain": "INPUT", "target": "RETURN", "ruleid": "foobar"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(57), "bytes": uint64(4520)}}, @@ -134,8 +134,8 @@ func TestIptables_Gather(t *testing.T) { 0 0 CLASSIFY all -- * * 1.3.5.7 0.0.0.0/0 /* test2 */ CLASSIFY set 1:4 `}, tags: []map[string]string{ - map[string]string{"table": "mangle", "chain": "SHAPER", "ruleid": "test"}, - map[string]string{"table": "mangle", "chain": "SHAPER", "ruleid": "test2"}, + {"table": "mangle", "chain": "SHAPER", "target": "ACCEPT", "ruleid": "test"}, + {"table": "mangle", "chain": "SHAPER", "target": "CLASSIFY", "ruleid": "test2"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(0), "bytes": uint64(0)}}, @@ -163,7 +163,7 @@ func TestIptables_Gather(t *testing.T) { 123 456 all -- eth0 * 0.0.0.0/0 0.0.0.0/0 /* all_recv */ `}, tags: []map[string]string{ - map[string]string{"table": "all_recv", "chain": "accountfwd", "ruleid": "all_recv"}, + {"table": "all_recv", "chain": "accountfwd", "target": "all", "ruleid": "all_recv"}, }, fields: [][]map[string]interface{}{ {map[string]interface{}{"pkts": uint64(123), "bytes": uint64(456)}}, diff --git a/plugins/inputs/ipvs/README.md b/plugins/inputs/ipvs/README.md new file mode 100644 index 000000000..75e5b5103 --- /dev/null +++ b/plugins/inputs/ipvs/README.md @@ -0,0 +1,83 @@ +# IPVS Input Plugin + +The IPVS input plugin uses the linux kernel netlink socket interface to gather +metrics about ipvs virtual and real servers. + +**Supported Platforms:** Linux + +### Configuration + +```toml +[[inputs.ipvs]] + # no configuration +``` + +#### Permissions + +Assuming you installed the telegraf package via one of the published packages, +the process will be running as the `telegraf` user. However, in order for this +plugin to communicate over netlink sockets it needs the telegraf process to be +running as `root` (or some user with `CAP_NET_ADMIN` and `CAP_NET_RAW`). Be sure +to ensure these permissions before running telegraf with this plugin included. + +### Metrics + +Server will contain tags identifying how it was configured, using one of +`address` + `port` + `protocol` *OR* `fwmark`. This is how one would normally +configure a virtual server using `ipvsadm`. + +- ipvs_virtual_server + - tags: + - sched (the scheduler in use) + - netmask (the mask used for determining affinity) + - address_family (inet/inet6) + - address + - port + - protocol + - fwmark + - fields: + - connections + - pkts_in + - pkts_out + - bytes_in + - bytes_out + - pps_in + - pps_out + - cps + +- ipvs_real_server + - tags: + - address + - port + - address_family (inet/inet6) + - virtual_address + - virtual_port + - virtual_protocol + - virtual_fwmark + - fields: + - active_connections + - inactive_connections + - connections + - pkts_in + - pkts_out + - bytes_in + - bytes_out + - pps_in + - pps_out + - cps + +### Example Output + +Virtual server is configured using `fwmark` and backed by 2 real servers: +``` +ipvs_virtual_server,address=172.18.64.234,address_family=inet,netmask=32,port=9000,protocol=tcp,sched=rr bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i,cps=0i,connections=0i,pkts_in=0i,pkts_out=0i 1541019340000000000 +ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pkts_in=0i,bytes_out=0i,pps_out=0i,connections=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,cps=0i 1541019340000000000 +ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_address=172.18.64.234,virtual_port=9000,virtual_protocol=tcp active_connections=0i,inactive_connections=0i,pps_in=0i,pps_out=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,cps=0i 1541019340000000000 +``` + +Virtual server is configured using `proto+addr+port` and backed by 2 real servers: +``` +ipvs_virtual_server,address_family=inet,fwmark=47,netmask=32,sched=rr cps=0i,connections=0i,pkts_in=0i,pkts_out=0i,bytes_in=0i,bytes_out=0i,pps_in=0i,pps_out=0i 1541019340000000000 +ipvs_real_server,address=172.18.64.220,address_family=inet,port=9000,virtual_fwmark=47 inactive_connections=0i,pkts_out=0i,bytes_out=0i,pps_in=0i,cps=0i,active_connections=0i,pkts_in=0i,bytes_in=0i,pps_out=0i,connections=0i 1541019340000000000 +ipvs_real_server,address=172.18.64.219,address_family=inet,port=9000,virtual_fwmark=47 cps=0i,active_connections=0i,inactive_connections=0i,connections=0i,pkts_in=0i,bytes_out=0i,pkts_out=0i,bytes_in=0i,pps_in=0i,pps_out=0i 1541019340000000000 +``` diff --git a/plugins/inputs/ipvs/ipvs.go b/plugins/inputs/ipvs/ipvs.go new file mode 100644 index 000000000..5e3ae0d56 --- /dev/null +++ b/plugins/inputs/ipvs/ipvs.go @@ -0,0 +1,155 @@ +// +build linux + +package ipvs + +import ( + "fmt" + "math/bits" + "strconv" + "syscall" + + "github.com/docker/libnetwork/ipvs" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/common/logrus" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// IPVS holds the state for this input plugin +type IPVS struct { + handle *ipvs.Handle + Log telegraf.Logger +} + +// Description returns a description string +func (i *IPVS) Description() string { + return "Collect virtual and real server stats from Linux IPVS" +} + +// SampleConfig returns a sample configuration for this input plugin +func (i *IPVS) SampleConfig() string { + return `` +} + +// Gather gathers the stats +func (i *IPVS) Gather(acc telegraf.Accumulator) error { + if i.handle == nil { + h, err := ipvs.New("") // TODO: make the namespace configurable + if err != nil { + return fmt.Errorf("unable to open IPVS handle: %v", err) + } + i.handle = h + } + + services, err := i.handle.GetServices() + if err != nil { + i.handle.Close() + i.handle = nil // trigger a reopen on next call to gather + return fmt.Errorf("failed to list IPVS services: %v", err) + } + for _, s := range services { + fields := map[string]interface{}{ + "connections": s.Stats.Connections, + "pkts_in": s.Stats.PacketsIn, + "pkts_out": s.Stats.PacketsOut, + "bytes_in": s.Stats.BytesIn, + "bytes_out": s.Stats.BytesOut, + "pps_in": s.Stats.PPSIn, + "pps_out": s.Stats.PPSOut, + "cps": s.Stats.CPS, + } + acc.AddGauge("ipvs_virtual_server", fields, serviceTags(s)) + + destinations, err := i.handle.GetDestinations(s) + if err != nil { + i.Log.Errorf("Failed to list destinations for a virtual server: %v", err) + continue // move on to the next virtual server + } + + for _, d := range destinations { + fields := map[string]interface{}{ + "active_connections": d.ActiveConnections, + "inactive_connections": d.InactiveConnections, + "connections": d.Stats.Connections, + "pkts_in": d.Stats.PacketsIn, + "pkts_out": d.Stats.PacketsOut, + "bytes_in": d.Stats.BytesIn, + "bytes_out": d.Stats.BytesOut, + "pps_in": d.Stats.PPSIn, + "pps_out": d.Stats.PPSOut, + "cps": d.Stats.CPS, + } + destTags := destinationTags(d) + if s.FWMark > 0 { + destTags["virtual_fwmark"] = strconv.Itoa(int(s.FWMark)) + } else { + destTags["virtual_protocol"] = protocolToString(s.Protocol) + destTags["virtual_address"] = s.Address.String() + destTags["virtual_port"] = strconv.Itoa(int(s.Port)) + } + acc.AddGauge("ipvs_real_server", fields, destTags) + } + } + + return nil +} + +// helper: given a Service, return tags that identify it +func serviceTags(s *ipvs.Service) map[string]string { + ret := map[string]string{ + "sched": s.SchedName, + "netmask": strconv.Itoa(bits.OnesCount32(s.Netmask)), + "address_family": addressFamilyToString(s.AddressFamily), + } + // Per the ipvsadm man page, a virtual service is defined "based on + // protocol/addr/port or firewall mark" + if s.FWMark > 0 { + ret["fwmark"] = strconv.Itoa(int(s.FWMark)) + } else { + ret["protocol"] = protocolToString(s.Protocol) + ret["address"] = s.Address.String() + ret["port"] = strconv.Itoa(int(s.Port)) + } + return ret +} + +// helper: given a Destination, return tags that identify it +func destinationTags(d *ipvs.Destination) map[string]string { + return map[string]string{ + "address": d.Address.String(), + "port": strconv.Itoa(int(d.Port)), + "address_family": addressFamilyToString(d.AddressFamily), + } +} + +// helper: convert protocol uint16 to human readable string (if possible) +func protocolToString(p uint16) string { + switch p { + case syscall.IPPROTO_TCP: + return "tcp" + case syscall.IPPROTO_UDP: + return "udp" + case syscall.IPPROTO_SCTP: + return "sctp" + default: + return fmt.Sprintf("%d", p) + } +} + +// helper: convert addressFamily to a human readable string +func addressFamilyToString(af uint16) string { + switch af { + case syscall.AF_INET: + return "inet" + case syscall.AF_INET6: + return "inet6" + default: + return fmt.Sprintf("%d", af) + } +} + +func init() { + inputs.Add("ipvs", func() telegraf.Input { + logrus.InstallHook() + return &IPVS{} + }) +} diff --git a/plugins/inputs/ipvs/ipvs_notlinux.go b/plugins/inputs/ipvs/ipvs_notlinux.go new file mode 100644 index 000000000..bbbb1240b --- /dev/null +++ b/plugins/inputs/ipvs/ipvs_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package ipvs diff --git a/plugins/inputs/jenkins/README.md b/plugins/inputs/jenkins/README.md new file mode 100644 index 000000000..3615006c4 --- /dev/null +++ b/plugins/inputs/jenkins/README.md @@ -0,0 +1,112 @@ +# Jenkins Plugin + +The jenkins plugin gathers information about the nodes and jobs running in a jenkins instance. + +This plugin does not require a plugin on jenkins and it makes use of Jenkins API to retrieve all the information needed. + +### Configuration: + +```toml +[[inputs.jenkins]] + ## The Jenkins URL in the format "schema://host:port" + url = "http://my-jenkins-instance:8080" + # username = "admin" + # password = "admin" + + ## Set response_timeout + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Optional Max Job Build Age filter + ## Default 1 hour, ignore builds older than max_build_age + # max_build_age = "1h" + + ## Optional Sub Job Depth filter + ## Jenkins can have unlimited layer of sub jobs + ## This config will limit the layers of pulling, default value 0 means + ## unlimited pulling until no more sub jobs + # max_subjob_depth = 0 + + ## Optional Sub Job Per Layer + ## In workflow-multibranch-plugin, each branch will be created as a sub job. + ## This config will limit to call only the lasted branches in each layer, + ## empty will use default value 10 + # max_subjob_per_layer = 10 + + ## Jobs to exclude from gathering + # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + + ## Nodes to exclude from gathering + # node_exclude = [ "node1", "node2" ] + + ## Worker pool for jenkins plugin only + ## Empty this field will use default value 5 + # max_connections = 5 +``` + +### Metrics: + +- jenkins_node + - tags: + - source + - port + - fields: + - busy_executors + - total_executors + ++ jenkins_node + - tags: + - arch + - disk_path + - temp_path + - node_name + - status ("online", "offline") + - source + - port + - fields: + - disk_available (Bytes) + - temp_available (Bytes) + - memory_available (Bytes) + - memory_total (Bytes) + - swap_available (Bytes) + - swap_total (Bytes) + - response_time (ms) + - num_executors + +- jenkins_job + - tags: + - name + - parents + - result + - source + - port + - fields: + - duration (ms) + - result_code (0 = SUCCESS, 1 = FAILURE, 2 = NOT_BUILD, 3 = UNSTABLE, 4 = ABORTED) + +### Sample Queries: + +``` +SELECT mean("memory_available") AS "mean_memory_available", mean("memory_total") AS "mean_memory_total", mean("temp_available") AS "mean_temp_available" FROM "jenkins_node" WHERE time > now() - 15m GROUP BY time(:interval:) FILL(null) +``` + +``` +SELECT mean("duration") AS "mean_duration" FROM "jenkins_job" WHERE time > now() - 24h GROUP BY time(:interval:) FILL(null) +``` + +### Example Output: + +``` +$ ./telegraf --config telegraf.conf --input-filter jenkins --test +jenkins,host=myhost,port=80,source=my-jenkins-instance busy_executors=4i,total_executors=8i 1580418261000000000 +jenkins_node,arch=Linux\ (amd64),disk_path=/var/jenkins_home,temp_path=/tmp,host=myhost,node_name=master,source=my-jenkins-instance,port=8080 swap_total=4294963200,memory_available=586711040,memory_total=6089498624,status=online,response_time=1000i,disk_available=152392036352,temp_available=152392036352,swap_available=3503263744,num_executors=2i 1516031535000000000 +jenkins_job,host=myhost,name=JOB1,parents=apps/br1,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2831i,result_code=0i 1516026630000000000 +jenkins_job,host=myhost,name=JOB2,parents=apps/br2,result=SUCCESS,source=my-jenkins-instance,port=8080 duration=2285i,result_code=0i 1516027230000000000 +``` + diff --git a/plugins/inputs/jenkins/client.go b/plugins/inputs/jenkins/client.go new file mode 100644 index 000000000..6c0a125aa --- /dev/null +++ b/plugins/inputs/jenkins/client.go @@ -0,0 +1,156 @@ +package jenkins + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" +) + +type client struct { + baseURL string + httpClient *http.Client + username string + password string + sessionCookie *http.Cookie + semaphore chan struct{} +} + +func newClient(httpClient *http.Client, url, username, password string, maxConnections int) *client { + return &client{ + baseURL: url, + httpClient: httpClient, + username: username, + password: password, + semaphore: make(chan struct{}, maxConnections), + } +} + +func (c *client) init() error { + // get session cookie + req, err := http.NewRequest("GET", c.baseURL, nil) + if err != nil { + return err + } + if c.username != "" || c.password != "" { + req.SetBasicAuth(c.username, c.password) + } + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + for _, cc := range resp.Cookies() { + if strings.Contains(cc.Name, "JSESSIONID") { + c.sessionCookie = cc + break + } + } + // first api fetch + if err := c.doGet(context.Background(), jobPath, new(jobResponse)); err != nil { + return err + } + return nil +} + +func (c *client) doGet(ctx context.Context, url string, v interface{}) error { + req, err := createGetRequest(c.baseURL+url, c.username, c.password, c.sessionCookie) + if err != nil { + return err + } + select { + case c.semaphore <- struct{}{}: + break + case <-ctx.Done(): + return ctx.Err() + } + resp, err := c.httpClient.Do(req.WithContext(ctx)) + if err != nil { + <-c.semaphore + return err + } + defer func() { + resp.Body.Close() + <-c.semaphore + }() + // Clear invalid token if unauthorized + if resp.StatusCode == http.StatusUnauthorized { + c.sessionCookie = nil + return APIError{ + URL: url, + StatusCode: resp.StatusCode, + Title: resp.Status, + } + } + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return APIError{ + URL: url, + StatusCode: resp.StatusCode, + Title: resp.Status, + } + } + if resp.StatusCode == http.StatusNoContent { + return APIError{ + URL: url, + StatusCode: resp.StatusCode, + Title: resp.Status, + } + } + if err = json.NewDecoder(resp.Body).Decode(v); err != nil { + return err + } + return nil +} + +type APIError struct { + URL string + StatusCode int + Title string + Description string +} + +func (e APIError) Error() string { + if e.Description != "" { + return fmt.Sprintf("[%s] %s: %s", e.URL, e.Title, e.Description) + } + return fmt.Sprintf("[%s] %s", e.URL, e.Title) +} + +func createGetRequest(url string, username, password string, sessionCookie *http.Cookie) (*http.Request, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + if username != "" || password != "" { + req.SetBasicAuth(username, password) + } + if sessionCookie != nil { + req.AddCookie(sessionCookie) + } + req.Header.Add("Accept", "application/json") + return req, nil +} + +func (c *client) getJobs(ctx context.Context, jr *jobRequest) (js *jobResponse, err error) { + js = new(jobResponse) + url := jobPath + if jr != nil { + url = jr.URL() + } + err = c.doGet(ctx, url, js) + return js, err +} + +func (c *client) getBuild(ctx context.Context, jr jobRequest, number int64) (b *buildResponse, err error) { + b = new(buildResponse) + url := jr.buildURL(number) + err = c.doGet(ctx, url, b) + return b, err +} + +func (c *client) getAllNodes(ctx context.Context) (nodeResp *nodeResponse, err error) { + nodeResp = new(nodeResponse) + err = c.doGet(ctx, nodePath, nodeResp) + return nodeResp, err +} diff --git a/plugins/inputs/jenkins/jenkins.go b/plugins/inputs/jenkins/jenkins.go new file mode 100644 index 000000000..a909f5ea4 --- /dev/null +++ b/plugins/inputs/jenkins/jenkins.go @@ -0,0 +1,503 @@ +package jenkins + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Jenkins plugin gathers information about the nodes and jobs running in a jenkins instance. +type Jenkins struct { + URL string + Username string + Password string + Source string + Port string + // HTTP Timeout specified as a string - 3s, 1m, 1h + ResponseTimeout internal.Duration + + tls.ClientConfig + client *client + + Log telegraf.Logger + + MaxConnections int `toml:"max_connections"` + MaxBuildAge internal.Duration `toml:"max_build_age"` + MaxSubJobDepth int `toml:"max_subjob_depth"` + MaxSubJobPerLayer int `toml:"max_subjob_per_layer"` + JobExclude []string `toml:"job_exclude"` + jobFilter filter.Filter + + NodeExclude []string `toml:"node_exclude"` + nodeFilter filter.Filter + + semaphore chan struct{} +} + +const sampleConfig = ` + ## The Jenkins URL in the format "schema://host:port" + url = "http://my-jenkins-instance:8080" + # username = "admin" + # password = "admin" + + ## Set response_timeout + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + ## Optional Max Job Build Age filter + ## Default 1 hour, ignore builds older than max_build_age + # max_build_age = "1h" + + ## Optional Sub Job Depth filter + ## Jenkins can have unlimited layer of sub jobs + ## This config will limit the layers of pulling, default value 0 means + ## unlimited pulling until no more sub jobs + # max_subjob_depth = 0 + + ## Optional Sub Job Per Layer + ## In workflow-multibranch-plugin, each branch will be created as a sub job. + ## This config will limit to call only the lasted branches in each layer, + ## empty will use default value 10 + # max_subjob_per_layer = 10 + + ## Jobs to exclude from gathering + # job_exclude = [ "job1", "job2/subjob1/subjob2", "job3/*"] + + ## Nodes to exclude from gathering + # node_exclude = [ "node1", "node2" ] + + ## Worker pool for jenkins plugin only + ## Empty this field will use default value 5 + # max_connections = 5 +` + +// measurement +const ( + measurementJenkins = "jenkins" + measurementNode = "jenkins_node" + measurementJob = "jenkins_job" +) + +// SampleConfig implements telegraf.Input interface +func (j *Jenkins) SampleConfig() string { + return sampleConfig +} + +// Description implements telegraf.Input interface +func (j *Jenkins) Description() string { + return "Read jobs and cluster metrics from Jenkins instances" +} + +// Gather implements telegraf.Input interface +func (j *Jenkins) Gather(acc telegraf.Accumulator) error { + if j.client == nil { + client, err := j.newHTTPClient() + if err != nil { + return err + } + if err = j.initialize(client); err != nil { + return err + } + } + + j.gatherNodesData(acc) + j.gatherJobs(acc) + + return nil +} + +func (j *Jenkins) newHTTPClient() (*http.Client, error) { + tlsCfg, err := j.ClientConfig.TLSConfig() + if err != nil { + return nil, fmt.Errorf("error parse jenkins config[%s]: %v", j.URL, err) + } + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + MaxIdleConns: j.MaxConnections, + }, + Timeout: j.ResponseTimeout.Duration, + }, nil +} + +// separate the client as dependency to use httptest Client for mocking +func (j *Jenkins) initialize(client *http.Client) error { + var err error + + // init jenkins tags + u, err := url.Parse(j.URL) + if err != nil { + return err + } + if u.Port() == "" { + if u.Scheme == "http" { + j.Port = "80" + } else if u.Scheme == "https" { + j.Port = "443" + } + } else { + j.Port = u.Port() + } + j.Source = u.Hostname() + + // init job filter + j.jobFilter, err = filter.Compile(j.JobExclude) + if err != nil { + return fmt.Errorf("error compile job filters[%s]: %v", j.URL, err) + } + + // init node filter + j.nodeFilter, err = filter.Compile(j.NodeExclude) + if err != nil { + return fmt.Errorf("error compile node filters[%s]: %v", j.URL, err) + } + + // init tcp pool with default value + if j.MaxConnections <= 0 { + j.MaxConnections = 5 + } + + // default sub jobs can be acquired + if j.MaxSubJobPerLayer <= 0 { + j.MaxSubJobPerLayer = 10 + } + + j.semaphore = make(chan struct{}, j.MaxConnections) + + j.client = newClient(client, j.URL, j.Username, j.Password, j.MaxConnections) + + return j.client.init() +} + +func (j *Jenkins) gatherNodeData(n node, acc telegraf.Accumulator) error { + + tags := map[string]string{} + if n.DisplayName == "" { + return fmt.Errorf("error empty node name") + } + + tags["node_name"] = n.DisplayName + // filter out excluded node_name + if j.nodeFilter != nil && j.nodeFilter.Match(tags["node_name"]) { + return nil + } + + monitorData := n.MonitorData + + if monitorData.HudsonNodeMonitorsArchitectureMonitor != "" { + tags["arch"] = monitorData.HudsonNodeMonitorsArchitectureMonitor + } + + tags["status"] = "online" + if n.Offline { + tags["status"] = "offline" + } + + tags["source"] = j.Source + tags["port"] = j.Port + + fields := make(map[string]interface{}) + fields["num_executors"] = n.NumExecutors + + if monitorData.HudsonNodeMonitorsResponseTimeMonitor != nil { + fields["response_time"] = monitorData.HudsonNodeMonitorsResponseTimeMonitor.Average + } + if monitorData.HudsonNodeMonitorsDiskSpaceMonitor != nil { + tags["disk_path"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Path + fields["disk_available"] = monitorData.HudsonNodeMonitorsDiskSpaceMonitor.Size + } + if monitorData.HudsonNodeMonitorsTemporarySpaceMonitor != nil { + tags["temp_path"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Path + fields["temp_available"] = monitorData.HudsonNodeMonitorsTemporarySpaceMonitor.Size + } + if monitorData.HudsonNodeMonitorsSwapSpaceMonitor != nil { + fields["swap_available"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapAvailable + fields["memory_available"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryAvailable + fields["swap_total"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.SwapTotal + fields["memory_total"] = monitorData.HudsonNodeMonitorsSwapSpaceMonitor.MemoryTotal + } + acc.AddFields(measurementNode, fields, tags) + + return nil +} + +func (j *Jenkins) gatherNodesData(acc telegraf.Accumulator) { + + nodeResp, err := j.client.getAllNodes(context.Background()) + if err != nil { + acc.AddError(err) + return + } + + // get total and busy executors + tags := map[string]string{"source": j.Source, "port": j.Port} + fields := make(map[string]interface{}) + fields["busy_executors"] = nodeResp.BusyExecutors + fields["total_executors"] = nodeResp.TotalExecutors + + acc.AddFields(measurementJenkins, fields, tags) + + // get node data + for _, node := range nodeResp.Computers { + err = j.gatherNodeData(node, acc) + if err == nil { + continue + } + acc.AddError(err) + } +} + +func (j *Jenkins) gatherJobs(acc telegraf.Accumulator) { + js, err := j.client.getJobs(context.Background(), nil) + if err != nil { + acc.AddError(err) + return + } + var wg sync.WaitGroup + for _, job := range js.Jobs { + wg.Add(1) + go func(name string, wg *sync.WaitGroup, acc telegraf.Accumulator) { + defer wg.Done() + if err := j.getJobDetail(jobRequest{ + name: name, + parents: []string{}, + layer: 0, + }, acc); err != nil { + acc.AddError(err) + } + }(job.Name, &wg, acc) + } + wg.Wait() +} + +// wrap the tcp request with doGet +// block tcp request if buffered channel is full +func (j *Jenkins) doGet(tcp func() error) error { + j.semaphore <- struct{}{} + if err := tcp(); err != nil { + <-j.semaphore + return err + } + <-j.semaphore + return nil +} + +func (j *Jenkins) getJobDetail(jr jobRequest, acc telegraf.Accumulator) error { + if j.MaxSubJobDepth > 0 && jr.layer == j.MaxSubJobDepth { + return nil + } + // filter out excluded job. + if j.jobFilter != nil && j.jobFilter.Match(jr.hierarchyName()) { + return nil + } + + js, err := j.client.getJobs(context.Background(), &jr) + if err != nil { + return err + } + + var wg sync.WaitGroup + for k, ij := range js.Jobs { + if k < len(js.Jobs)-j.MaxSubJobPerLayer-1 { + continue + } + wg.Add(1) + // schedule tcp fetch for inner jobs + go func(ij innerJob, jr jobRequest, acc telegraf.Accumulator) { + defer wg.Done() + if err := j.getJobDetail(jobRequest{ + name: ij.Name, + parents: jr.combined(), + layer: jr.layer + 1, + }, acc); err != nil { + acc.AddError(err) + } + }(ij, jr, acc) + } + wg.Wait() + + // collect build info + number := js.LastBuild.Number + if number < 1 { + // no build info + return nil + } + build, err := j.client.getBuild(context.Background(), jr, number) + if err != nil { + return err + } + + if build.Building { + j.Log.Debugf("Ignore running build on %s, build %v", jr.name, number) + return nil + } + + // stop if build is too old + // Higher up in gatherJobs + cutoff := time.Now().Add(-1 * j.MaxBuildAge.Duration) + + // Here we just test + if build.GetTimestamp().Before(cutoff) { + return nil + } + + j.gatherJobBuild(jr, build, acc) + return nil +} + +type nodeResponse struct { + Computers []node `json:"computer"` + BusyExecutors int `json:"busyExecutors"` + TotalExecutors int `json:"totalExecutors"` +} + +type node struct { + DisplayName string `json:"displayName"` + Offline bool `json:"offline"` + NumExecutors int `json:"numExecutors"` + MonitorData monitorData `json:"monitorData"` +} + +type monitorData struct { + HudsonNodeMonitorsArchitectureMonitor string `json:"hudson.node_monitors.ArchitectureMonitor"` + HudsonNodeMonitorsDiskSpaceMonitor *nodeSpaceMonitor `json:"hudson.node_monitors.DiskSpaceMonitor"` + HudsonNodeMonitorsResponseTimeMonitor *responseTimeMonitor `json:"hudson.node_monitors.ResponseTimeMonitor"` + HudsonNodeMonitorsSwapSpaceMonitor *swapSpaceMonitor `json:"hudson.node_monitors.SwapSpaceMonitor"` + HudsonNodeMonitorsTemporarySpaceMonitor *nodeSpaceMonitor `json:"hudson.node_monitors.TemporarySpaceMonitor"` +} + +type nodeSpaceMonitor struct { + Path string `json:"path"` + Size float64 `json:"size"` +} + +type responseTimeMonitor struct { + Average int64 `json:"average"` +} + +type swapSpaceMonitor struct { + SwapAvailable float64 `json:"availableSwapSpace"` + SwapTotal float64 `json:"totalSwapSpace"` + MemoryAvailable float64 `json:"availablePhysicalMemory"` + MemoryTotal float64 `json:"totalPhysicalMemory"` +} + +type jobResponse struct { + LastBuild jobBuild `json:"lastBuild"` + Jobs []innerJob `json:"jobs"` + Name string `json:"name"` +} + +type innerJob struct { + Name string `json:"name"` + URL string `json:"url"` + Color string `json:"color"` +} + +type jobBuild struct { + Number int64 + URL string +} + +type buildResponse struct { + Building bool `json:"building"` + Duration int64 `json:"duration"` + Result string `json:"result"` + Timestamp int64 `json:"timestamp"` +} + +func (b *buildResponse) GetTimestamp() time.Time { + return time.Unix(0, int64(b.Timestamp)*int64(time.Millisecond)) +} + +const ( + nodePath = "/computer/api/json" + jobPath = "/api/json" +) + +type jobRequest struct { + name string + parents []string + layer int +} + +func (jr jobRequest) combined() []string { + return append(jr.parents, jr.name) +} + +func (jr jobRequest) combinedEscaped() []string { + jobs := jr.combined() + for index, job := range jobs { + jobs[index] = url.PathEscape(job) + } + return jobs +} + +func (jr jobRequest) URL() string { + return "/job/" + strings.Join(jr.combinedEscaped(), "/job/") + jobPath +} + +func (jr jobRequest) buildURL(number int64) string { + return "/job/" + strings.Join(jr.combinedEscaped(), "/job/") + "/" + strconv.Itoa(int(number)) + jobPath +} + +func (jr jobRequest) hierarchyName() string { + return strings.Join(jr.combined(), "/") +} + +func (jr jobRequest) parentsString() string { + return strings.Join(jr.parents, "/") +} + +func (j *Jenkins) gatherJobBuild(jr jobRequest, b *buildResponse, acc telegraf.Accumulator) { + tags := map[string]string{"name": jr.name, "parents": jr.parentsString(), "result": b.Result, "source": j.Source, "port": j.Port} + fields := make(map[string]interface{}) + fields["duration"] = b.Duration + fields["result_code"] = mapResultCode(b.Result) + + acc.AddFields(measurementJob, fields, tags, b.GetTimestamp()) +} + +// perform status mapping +func mapResultCode(s string) int { + switch strings.ToLower(s) { + case "success": + return 0 + case "failure": + return 1 + case "not_built": + return 2 + case "unstable": + return 3 + case "aborted": + return 4 + } + return -1 +} + +func init() { + inputs.Add("jenkins", func() telegraf.Input { + return &Jenkins{ + MaxBuildAge: internal.Duration{Duration: time.Duration(time.Hour)}, + MaxConnections: 5, + MaxSubJobPerLayer: 10, + } + }) +} diff --git a/plugins/inputs/jenkins/jenkins_test.go b/plugins/inputs/jenkins/jenkins_test.go new file mode 100644 index 000000000..b8284fc0d --- /dev/null +++ b/plugins/inputs/jenkins/jenkins_test.go @@ -0,0 +1,782 @@ +// Test Suite +package jenkins + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sort" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" +) + +func TestJobRequest(t *testing.T) { + tests := []struct { + input jobRequest + hierarchyName string + URL string + }{ + { + jobRequest{}, + "", + "", + }, + { + jobRequest{ + name: "1", + parents: []string{"3", "2"}, + }, + "3/2/1", + "/job/3/job/2/job/1/api/json", + }, + { + jobRequest{ + name: "job 3", + parents: []string{"job 1", "job 2"}, + }, + "job 1/job 2/job 3", + "/job/job%201/job/job%202/job/job%203/api/json", + }, + } + for _, test := range tests { + hierarchyName := test.input.hierarchyName() + URL := test.input.URL() + if hierarchyName != test.hierarchyName { + t.Errorf("Expected %s, got %s\n", test.hierarchyName, hierarchyName) + } + + if test.URL != "" && URL != test.URL { + t.Errorf("Expected %s, got %s\n", test.URL, URL) + } + } +} + +func TestResultCode(t *testing.T) { + tests := []struct { + input string + output int + }{ + {"SUCCESS", 0}, + {"Failure", 1}, + {"NOT_BUILT", 2}, + {"UNSTABLE", 3}, + {"ABORTED", 4}, + } + for _, test := range tests { + output := mapResultCode(test.input) + if output != test.output { + t.Errorf("Expected %d, got %d\n", test.output, output) + } + } +} + +type mockHandler struct { + // responseMap is the path to response interface + // we will output the serialized response in json when serving http + // example '/computer/api/json': *gojenkins. + responseMap map[string]interface{} +} + +func (h mockHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + o, ok := h.responseMap[r.URL.RequestURI()] + if !ok { + w.WriteHeader(http.StatusNotFound) + return + } + + b, err := json.Marshal(o) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + if len(b) == 0 { + w.WriteHeader(http.StatusNoContent) + return + } + w.Write(b) +} + +func TestGatherNodeData(t *testing.T) { + tests := []struct { + name string + input mockHandler + output *testutil.Accumulator + wantErr bool + }{ + { + name: "bad node data", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + {}, + {}, + {}, + }, + }, + }, + }, + wantErr: true, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 0, + "total_executors": 0, + }, + }, + }, + }, + }, + { + name: "empty monitor data", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + Computers: []node{ + {DisplayName: "master"}, + {DisplayName: "node1"}, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{}, + }, + }, + { + name: "filtered nodes", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + BusyExecutors: 4, + TotalExecutors: 8, + Computers: []node{ + {DisplayName: "ignore-1"}, + {DisplayName: "ignore-2"}, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 4, + "total_executors": 8, + }, + }, + }, + }, + }, + { + name: "normal data collection", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + BusyExecutors: 4, + TotalExecutors: 8, + Computers: []node{ + { + DisplayName: "master", + MonitorData: monitorData{ + HudsonNodeMonitorsArchitectureMonitor: "linux", + HudsonNodeMonitorsResponseTimeMonitor: &responseTimeMonitor{ + Average: 10032, + }, + HudsonNodeMonitorsDiskSpaceMonitor: &nodeSpaceMonitor{ + Path: "/path/1", + Size: 123, + }, + HudsonNodeMonitorsTemporarySpaceMonitor: &nodeSpaceMonitor{ + Path: "/path/2", + Size: 245, + }, + HudsonNodeMonitorsSwapSpaceMonitor: &swapSpaceMonitor{ + SwapAvailable: 212, + SwapTotal: 500, + MemoryAvailable: 101, + MemoryTotal: 500, + }, + }, + Offline: false, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 4, + "total_executors": 8, + }, + }, + { + Tags: map[string]string{ + "node_name": "master", + "arch": "linux", + "status": "online", + "disk_path": "/path/1", + "temp_path": "/path/2", + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "response_time": int64(10032), + "disk_available": float64(123), + "temp_available": float64(245), + "swap_available": float64(212), + "swap_total": float64(500), + "memory_available": float64(101), + "memory_total": float64(500), + }, + }, + }, + }, + }, + { + name: "slave is offline", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + "/computer/api/json": nodeResponse{ + BusyExecutors: 4, + TotalExecutors: 8, + Computers: []node{ + { + DisplayName: "slave", + MonitorData: monitorData{}, + NumExecutors: 1, + Offline: true, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "source": "127.0.0.1", + }, + Fields: map[string]interface{}{ + "busy_executors": 4, + "total_executors": 8, + }, + }, + { + Tags: map[string]string{ + "node_name": "slave", + "status": "offline", + }, + Fields: map[string]interface{}{ + "num_executors": 1, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ts := httptest.NewServer(test.input) + defer ts.Close() + j := &Jenkins{ + Log: testutil.Logger{}, + URL: ts.URL, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + NodeExclude: []string{"ignore-1", "ignore-2"}, + } + te := j.initialize(&http.Client{Transport: &http.Transport{}}) + acc := new(testutil.Accumulator) + j.gatherNodesData(acc) + if err := acc.FirstError(); err != nil { + te = err + } + + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) + } + if test.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data %s", test.name, acc.Metrics) + } else if test.output != nil && len(test.output.Metrics) > 0 { + for i := 0; i < len(test.output.Metrics); i++ { + for k, m := range test.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[0].Tags[k]) + } + } + for k, m := range test.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[0].Fields[k], acc.Metrics[0].Fields[k]) + } + } + } + } + }) + } +} + +func TestInitialize(t *testing.T) { + mh := mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": struct{}{}, + }, + } + ts := httptest.NewServer(mh) + defer ts.Close() + mockClient := &http.Client{Transport: &http.Transport{}} + tests := []struct { + // name of the test + name string + input *Jenkins + output *Jenkins + wantErr bool + }{ + { + name: "bad jenkins config", + input: &Jenkins{ + Log: testutil.Logger{}, + URL: "http://a bad url", + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + }, + wantErr: true, + }, + { + name: "has filter", + input: &Jenkins{ + Log: testutil.Logger{}, + URL: ts.URL, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + JobExclude: []string{"job1", "job2"}, + NodeExclude: []string{"node1", "node2"}, + }, + }, + { + name: "default config", + input: &Jenkins{ + Log: testutil.Logger{}, + URL: ts.URL, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + }, + output: &Jenkins{ + Log: testutil.Logger{}, + MaxConnections: 5, + MaxSubJobPerLayer: 10, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + te := test.input.initialize(mockClient) + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) + } + if test.output != nil { + if test.input.client == nil { + t.Fatalf("%s: failed %s, jenkins instance shouldn't be nil", test.name, te.Error()) + } + if test.input.MaxConnections != test.output.MaxConnections { + t.Fatalf("%s: different MaxConnections Expected %d, got %d\n", test.name, test.output.MaxConnections, test.input.MaxConnections) + } + } + }) + } +} + +func TestGatherJobs(t *testing.T) { + tests := []struct { + name string + input mockHandler + output *testutil.Accumulator + wantErr bool + }{ + { + name: "empty job", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{}, + }, + }, + }, + { + name: "bad inner jobs", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "jobs has no build", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{}, + }, + }, + }, + { + name: "bad build info", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "ignore building job", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/job1/1/api/json": &buildResponse{ + Building: true, + }, + }, + }, + }, + { + name: "ignore old build", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 2, + }, + }, + "/job/job1/2/api/json": &buildResponse{ + Building: false, + Timestamp: 100, + }, + }, + }, + }, + { + name: "gather metrics", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job1"}, + {Name: "job2"}, + }, + }, + "/job/job1/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 3, + }, + }, + "/job/job2/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/job1/3/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 25558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + "/job/job2/1/api/json": &buildResponse{ + Building: false, + Result: "FAILURE", + Duration: 1558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "name": "job1", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(25558), + "result_code": 0, + }, + }, + { + Tags: map[string]string{ + "name": "job2", + "result": "FAILURE", + }, + Fields: map[string]interface{}{ + "duration": int64(1558), + "result_code": 1, + }, + }, + }, + }, + }, + { + name: "gather metrics for jobs with space", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "job 1"}, + }, + }, + "/job/job%201/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 3, + }, + }, + "/job/job%201/3/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 25558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "name": "job 1", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(25558), + "result_code": 0, + }, + }, + }, + }, + }, + { + name: "gather sub jobs, jobs filter", + input: mockHandler{ + responseMap: map[string]interface{}{ + "/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "apps"}, + {Name: "ignore-1"}, + }, + }, + "/job/apps/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "k8s-cloud"}, + {Name: "chronograf"}, + {Name: "ignore-all"}, + }, + }, + "/job/apps/job/ignore-all/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "1"}, + {Name: "2"}, + }, + }, + "/job/apps/job/chronograf/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/apps/job/k8s-cloud/api/json": &jobResponse{ + Jobs: []innerJob{ + {Name: "PR-100"}, + {Name: "PR-101"}, + {Name: "PR-ignore2"}, + {Name: "PR 1"}, + {Name: "PR ignore"}, + }, + }, + "/job/apps/job/k8s-cloud/job/PR-100/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/apps/job/k8s-cloud/job/PR-101/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 4, + }, + }, + "/job/apps/job/k8s-cloud/job/PR%201/api/json": &jobResponse{ + LastBuild: jobBuild{ + Number: 1, + }, + }, + "/job/apps/job/chronograf/1/api/json": &buildResponse{ + Building: false, + Result: "FAILURE", + Duration: 1558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + "/job/apps/job/k8s-cloud/job/PR-101/4/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 76558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + "/job/apps/job/k8s-cloud/job/PR-100/1/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 91558, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + "/job/apps/job/k8s-cloud/job/PR%201/1/api/json": &buildResponse{ + Building: false, + Result: "SUCCESS", + Duration: 87832, + Timestamp: (time.Now().Unix() - int64(time.Minute.Seconds())) * 1000, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Tags: map[string]string{ + "name": "PR 1", + "parents": "apps/k8s-cloud", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(87832), + "result_code": 0, + }, + }, + { + Tags: map[string]string{ + "name": "PR-100", + "parents": "apps/k8s-cloud", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(91558), + "result_code": 0, + }, + }, + { + Tags: map[string]string{ + "name": "PR-101", + "parents": "apps/k8s-cloud", + "result": "SUCCESS", + }, + Fields: map[string]interface{}{ + "duration": int64(76558), + "result_code": 0, + }, + }, + { + Tags: map[string]string{ + "name": "chronograf", + "parents": "apps", + "result": "FAILURE", + }, + Fields: map[string]interface{}{ + "duration": int64(1558), + "result_code": 1, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ts := httptest.NewServer(test.input) + defer ts.Close() + j := &Jenkins{ + Log: testutil.Logger{}, + URL: ts.URL, + MaxBuildAge: internal.Duration{Duration: time.Hour}, + ResponseTimeout: internal.Duration{Duration: time.Microsecond}, + JobExclude: []string{ + "ignore-1", + "apps/ignore-all/*", + "apps/k8s-cloud/PR-ignore2", + "apps/k8s-cloud/PR ignore", + }, + } + te := j.initialize(&http.Client{Transport: &http.Transport{}}) + acc := new(testutil.Accumulator) + j.gatherJobs(acc) + if err := acc.FirstError(); err != nil { + te = err + } + if !test.wantErr && te != nil { + t.Fatalf("%s: failed %s, expected to be nil", test.name, te.Error()) + } else if test.wantErr && te == nil { + t.Fatalf("%s: expected err, got nil", test.name) + } + + if test.output != nil && len(test.output.Metrics) > 0 { + // sort metrics + sort.Slice(acc.Metrics, func(i, j int) bool { + return strings.Compare(acc.Metrics[i].Tags["name"], acc.Metrics[j].Tags["name"]) < 0 + }) + for i := range test.output.Metrics { + for k, m := range test.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", test.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range test.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", test.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[0].Fields[k]) + } + } + } + + } + }) + } +} diff --git a/plugins/inputs/jolokia/jolokia_test.go b/plugins/inputs/jolokia/jolokia_test.go index b47ffbc26..a6acd2953 100644 --- a/plugins/inputs/jolokia/jolokia_test.go +++ b/plugins/inputs/jolokia/jolokia_test.go @@ -117,7 +117,7 @@ const invalidJSON = "I don't think this is JSON" const empty = "" -var Servers = []Server{Server{Name: "as1", Host: "127.0.0.1", Port: "8080"}} +var Servers = []Server{{Name: "as1", Host: "127.0.0.1", Port: "8080"}} var HeapMetric = Metric{Name: "heap_memory_usage", Mbean: "java.lang:type=Memory", Attribute: "HeapMemoryUsage"} var UsedHeapMetric = Metric{Name: "heap_memory_usage", diff --git a/plugins/inputs/jolokia2/README.md b/plugins/inputs/jolokia2/README.md index 1efc59f1f..190e6627d 100644 --- a/plugins/inputs/jolokia2/README.md +++ b/plugins/inputs/jolokia2/README.md @@ -181,5 +181,6 @@ Both `jolokia2_agent` and `jolokia2_proxy` plugins support default configuration - [Kafka](/plugins/inputs/jolokia2/examples/kafka.conf) - [Tomcat](/plugins/inputs/jolokia2/examples/tomcat.conf) - [Weblogic](/plugins/inputs/jolokia2/examples/weblogic.conf) +- [ZooKeeper](/plugins/inputs/jolokia2/examples/zookeeper.conf) Please help improve this list and contribute new configuration files by opening an issue or pull request. diff --git a/plugins/inputs/jolokia2/examples/cassandra.conf b/plugins/inputs/jolokia2/examples/cassandra.conf index b8bb60980..bc9c97ff1 100644 --- a/plugins/inputs/jolokia2/examples/cassandra.conf +++ b/plugins/inputs/jolokia2/examples/cassandra.conf @@ -2,7 +2,7 @@ urls = ["http://localhost:8778/jolokia"] name_prefix = "java_" - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "Memory" mbean = "java.lang:type=Memory" diff --git a/plugins/inputs/jolokia2/examples/java.conf b/plugins/inputs/jolokia2/examples/java.conf index 361bce1d2..aa9bc6852 100644 --- a/plugins/inputs/jolokia2/examples/java.conf +++ b/plugins/inputs/jolokia2/examples/java.conf @@ -14,7 +14,7 @@ [[inputs.jolokia2_agent.metric]] name = "java_garbage_collector" - mbean = "java.lang:name=G1*,type=GarbageCollector" + mbean = "java.lang:name=*,type=GarbageCollector" paths = ["CollectionTime", "CollectionCount"] tag_keys = ["name"] @@ -23,17 +23,17 @@ mbean = "java.lang:name=G1 Young Generation,type=GarbageCollector" paths = ["LastGcInfo/duration", "LastGcInfo/GcThreadCount", "LastGcInfo/memoryUsageAfterGc"] - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "java_threading" mbean = "java.lang:type=Threading" paths = ["TotalStartedThreadCount", "ThreadCount", "DaemonThreadCount", "PeakThreadCount"] - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "java_class_loading" mbean = "java.lang:type=ClassLoading" paths = ["LoadedClassCount", "UnloadedClassCount", "TotalLoadedClassCount"] - [[inputs.jolokia2_agent.metrics]] + [[inputs.jolokia2_agent.metric]] name = "java_memory_pool" mbean = "java.lang:name=*,type=MemoryPool" paths = ["Usage", "PeakUsage", "CollectionUsage"] diff --git a/plugins/inputs/jolokia2/examples/zookeeper.conf b/plugins/inputs/jolokia2/examples/zookeeper.conf new file mode 100644 index 000000000..514e43ea8 --- /dev/null +++ b/plugins/inputs/jolokia2/examples/zookeeper.conf @@ -0,0 +1,18 @@ +[[inputs.jolokia2_agent]] + urls = ["http://localhost:8080/jolokia"] + name_prefix = "zk_" + + [[inputs.jolokia2_agent.metric]] + name = "quorum" + mbean = "org.apache.ZooKeeperService:name0=*" + tag_keys = ["name0"] + + [[inputs.jolokia2_agent.metric]] + name = "leader" + mbean = "org.apache.ZooKeeperService:name0=*,name1=*,name2=Leader" + tag_keys = ["name1"] + + [[inputs.jolokia2_agent.metric]] + name = "follower" + mbean = "org.apache.ZooKeeperService:name0=*,name1=*,name2=Follower" + tag_keys = ["name1"] diff --git a/plugins/inputs/jolokia2/gatherer.go b/plugins/inputs/jolokia2/gatherer.go index 5005e8225..5b2aa00d8 100644 --- a/plugins/inputs/jolokia2/gatherer.go +++ b/plugins/inputs/jolokia2/gatherer.go @@ -43,7 +43,7 @@ func (g *Gatherer) Gather(client *Client, acc telegraf.Accumulator) error { return nil } -// gatherReponses adds points to an accumulator from the ReadResponse objects +// gatherResponses adds points to an accumulator from the ReadResponse objects // returned by a Jolokia agent. func (g *Gatherer) gatherResponses(responses []ReadResponse, tags map[string]string, acc telegraf.Accumulator) { series := make(map[string][]point, 0) @@ -144,7 +144,7 @@ func metricMatchesResponse(metric Metric, response ReadResponse) bool { return false } -// compactPoints attepts to remove points by compacting points +// compactPoints attempts to remove points by compacting points // with matching tag sets. When a match is found, the fields from // one point are moved to another, and the empty point is removed. func compactPoints(points []point) []point { diff --git a/plugins/inputs/jolokia2/gatherer_test.go b/plugins/inputs/jolokia2/gatherer_test.go index ca83cf0ac..4ba4b586a 100644 --- a/plugins/inputs/jolokia2/gatherer_test.go +++ b/plugins/inputs/jolokia2/gatherer_test.go @@ -17,7 +17,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Mbean: "test:foo=bar", }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{}, }, @@ -29,7 +29,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Paths: []string{"biz"}, }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"biz"}, }, @@ -41,7 +41,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Paths: []string{"baz", "biz"}, }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"baz", "biz"}, }, @@ -53,7 +53,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Paths: []string{"biz/baz"}, }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"biz"}, Path: "baz", @@ -66,7 +66,7 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Paths: []string{"biz/baz/fiz/faz"}, }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"biz"}, Path: "baz/fiz/faz", @@ -79,12 +79,12 @@ func TestJolokia2_makeReadRequests(t *testing.T) { Paths: []string{"baz/biz", "faz/fiz"}, }, expected: []ReadRequest{ - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"baz"}, Path: "biz", }, - ReadRequest{ + { Mbean: "test:foo=bar", Attributes: []string{"faz"}, Path: "fiz", diff --git a/plugins/inputs/jolokia2/jolokia_test.go b/plugins/inputs/jolokia2/jolokia_test.go index f94606ae6..61c410c0b 100644 --- a/plugins/inputs/jolokia2/jolokia_test.go +++ b/plugins/inputs/jolokia2/jolokia_test.go @@ -143,7 +143,12 @@ func TestJolokia2_ObjectValues(t *testing.T) { [[jolokia2_agent.metric]] name = "object_with_key_pattern" mbean = "object_with_key_pattern:test=*" - tag_keys = ["test"]` + tag_keys = ["test"] + + [[jolokia2_agent.metric]] + name = "ColumnFamily" + mbean = "org.apache.cassandra.metrics:keyspace=*,name=EstimatedRowSizeHistogram,scope=schema_columns,type=ColumnFamily" + tag_keys = ["keyspace", "name", "scope"]` response := `[{ "request": { @@ -214,7 +219,20 @@ func TestJolokia2_ObjectValues(t *testing.T) { } }, "status": 200 - }]` + }, { + "request": { + "mbean": "org.apache.cassandra.metrics:keyspace=*,name=EstimatedRowSizeHistogram,scope=schema_columns,type=ColumnFamily", + "type": "read" + }, + "value": { + "org.apache.cassandra.metrics:keyspace=system,name=EstimatedRowSizeHistogram,scope=schema_columns,type=ColumnFamily": { + "Value": [ + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + ] + } + }, + "status": 200 + }]` server := setupServer(http.StatusOK, response) defer server.Close() @@ -730,6 +748,20 @@ func TestJolokia2_ProxyTargets(t *testing.T) { }) } +func TestFillFields(t *testing.T) { + complex := map[string]interface{}{"Value": []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + var scalar interface{} + scalar = []interface{}{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + + results := map[string]interface{}{} + newPointBuilder(Metric{Name: "test", Mbean: "complex"}, []string{"this", "that"}, "/").fillFields("", complex, results) + assert.Equal(t, map[string]interface{}{}, results) + + results = map[string]interface{}{} + newPointBuilder(Metric{Name: "test", Mbean: "scalar"}, []string{"this", "that"}, "/").fillFields("", scalar, results) + assert.Equal(t, map[string]interface{}{}, results) +} + func setupServer(status int, resp string) *httptest.Server { return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) @@ -748,7 +780,7 @@ func setupPlugin(t *testing.T, conf string) telegraf.Input { t.Fatalf("Unable to parse config! %v", err) } - for name, _ := range table.Fields { + for name := range table.Fields { object := table.Fields[name] switch name { case "jolokia2_agent": diff --git a/plugins/inputs/jolokia2/point_builder.go b/plugins/inputs/jolokia2/point_builder.go index 02877ea70..f5ae1d314 100644 --- a/plugins/inputs/jolokia2/point_builder.go +++ b/plugins/inputs/jolokia2/point_builder.go @@ -158,8 +158,11 @@ func (pb *pointBuilder) fillFields(name string, value interface{}, fieldMap map[ if valueMap, ok := value.(map[string]interface{}); ok { // keep going until we get to something that is not a map for key, innerValue := range valueMap { - var innerName string + if _, ok := innerValue.([]interface{}); ok { + continue + } + var innerName string if name == "" { innerName = pb.metric.FieldPrefix + key } else { @@ -172,6 +175,10 @@ func (pb *pointBuilder) fillFields(name string, value interface{}, fieldMap map[ return } + if _, ok := value.([]interface{}); ok { + return + } + if pb.metric.FieldName != "" { name = pb.metric.FieldName if prefix := pb.metric.FieldPrefix; prefix != "" { diff --git a/plugins/inputs/jti_openconfig_telemetry/README.md b/plugins/inputs/jti_openconfig_telemetry/README.md index 7c30aaa8d..1a28b55ae 100644 --- a/plugins/inputs/jti_openconfig_telemetry/README.md +++ b/plugins/inputs/jti_openconfig_telemetry/README.md @@ -41,9 +41,13 @@ This plugin reads Juniper Networks implementation of OpenConfig telemetry data f "/interfaces", ] - ## x509 Certificate to use with TLS connection. If it is not provided, an insecure - ## channel will be opened with server - ssl_cert = "/etc/telegraf/cert.pem" + ## Optional TLS Config + # enable_tls = true + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. ## Failed streams/calls will not be retried if 0 is provided diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go index a4cd76cc4..bc7c78045 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.pb.go @@ -980,7 +980,7 @@ type OpenConfigTelemetryClient interface { // The device should send telemetry data back on the same // connection as the subscription request. TelemetrySubscribe(ctx context.Context, in *SubscriptionRequest, opts ...grpc.CallOption) (OpenConfigTelemetry_TelemetrySubscribeClient, error) - // Terminates and removes an exisiting telemetry subscription + // Terminates and removes an existing telemetry subscription CancelTelemetrySubscription(ctx context.Context, in *CancelSubscriptionRequest, opts ...grpc.CallOption) (*CancelSubscriptionReply, error) // Get the list of current telemetry subscriptions from the // target. This command returns a list of existing subscriptions @@ -1076,7 +1076,7 @@ type OpenConfigTelemetryServer interface { // The device should send telemetry data back on the same // connection as the subscription request. TelemetrySubscribe(*SubscriptionRequest, OpenConfigTelemetry_TelemetrySubscribeServer) error - // Terminates and removes an exisiting telemetry subscription + // Terminates and removes an existing telemetry subscription CancelTelemetrySubscription(context.Context, *CancelSubscriptionRequest) (*CancelSubscriptionReply, error) // Get the list of current telemetry subscriptions from the // target. This command returns a list of existing subscriptions diff --git a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto index 38ce9b422..cf4aa145e 100644 --- a/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto +++ b/plugins/inputs/jti_openconfig_telemetry/oc/oc.proto @@ -44,7 +44,7 @@ service OpenConfigTelemetry { // connection as the subscription request. rpc telemetrySubscribe(SubscriptionRequest) returns (stream OpenConfigData) {} - // Terminates and removes an exisiting telemetry subscription + // Terminates and removes an existing telemetry subscription rpc cancelTelemetrySubscription(CancelSubscriptionRequest) returns (CancelSubscriptionReply) {} // Get the list of current telemetry subscriptions from the diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go index 49a593a08..39f9bb58a 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry.go @@ -2,7 +2,6 @@ package jti_openconfig_telemetry import ( "fmt" - "log" "net" "regexp" "strings" @@ -11,6 +10,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + internaltls "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/auth" "github.com/influxdata/telegraf/plugins/inputs/jti_openconfig_telemetry/oc" @@ -22,15 +22,18 @@ import ( ) type OpenConfigTelemetry struct { - Servers []string - Sensors []string - Username string - Password string + Servers []string `toml:"servers"` + Sensors []string `toml:"sensors"` + Username string `toml:"username"` + Password string `toml:"password"` ClientID string `toml:"client_id"` SampleFrequency internal.Duration `toml:"sample_frequency"` - SSLCert string `toml:"ssl_cert"` StrAsTags bool `toml:"str_as_tags"` RetryDelay internal.Duration `toml:"retry_delay"` + EnableTLS bool `toml:"enable_tls"` + internaltls.ClientConfig + + Log telegraf.Logger sensorsConfig []sensorConfig grpcClientConns []*grpc.ClientConn @@ -44,8 +47,8 @@ var ( ## List of device addresses to collect telemetry from servers = ["localhost:1883"] - ## Authentication details. Username and password are must if device expects - ## authentication. Client ID must be unique when connecting from multiple instances + ## Authentication details. Username and password are must if device expects + ## authentication. Client ID must be unique when connecting from multiple instances ## of telegraf to the same device username = "user" password = "pass" @@ -57,16 +60,16 @@ var ( ## Sensors to subscribe for ## A identifier for each sensor can be provided in path by separating with space ## Else sensor path will be used as identifier - ## When identifier is used, we can provide a list of space separated sensors. - ## A single subscription will be created with all these sensors and data will + ## When identifier is used, we can provide a list of space separated sensors. + ## A single subscription will be created with all these sensors and data will ## be saved to measurement with this identifier name sensors = [ "/interfaces/", "collection /components/ /lldp", ] - ## We allow specifying sensor group level reporting rate. To do this, specify the - ## reporting rate in Duration at the beginning of sensor paths / collection + ## We allow specifying sensor group level reporting rate. To do this, specify the + ## reporting rate in Duration at the beginning of sensor paths / collection ## name. For entries without reporting rate, we use configured sample frequency sensors = [ "1000ms customReporting /interfaces /lldp", @@ -74,9 +77,13 @@ var ( "/interfaces", ] - ## x509 Certificate to use with TLS connection. If it is not provided, an insecure - ## channel will be opened with server - ssl_cert = "/etc/telegraf/cert.pem" + ## Optional TLS Config + # enable_tls = true + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ## Delay between retry attempts of failed RPC calls or streams. Defaults to 1000ms. ## Failed streams/calls will not be retried if 0 is provided @@ -237,7 +244,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { } if len(spathSplit) == 0 { - log.Printf("E! No sensors are specified") + m.Log.Error("No sensors are specified") continue } @@ -251,7 +258,7 @@ func (m *OpenConfigTelemetry) splitSensorConfig() int { } if len(spathSplit) == 0 { - log.Printf("E! No valid sensors are specified") + m.Log.Error("No valid sensors are specified") continue } @@ -288,13 +295,13 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, rpcStatus, _ := status.FromError(err) // If service is currently unavailable and may come back later, retry if rpcStatus.Code() != codes.Unavailable { - acc.AddError(fmt.Errorf("E! Could not subscribe to %s: %v", grpcServer, + acc.AddError(fmt.Errorf("could not subscribe to %s: %v", grpcServer, err)) return } else { // Retry with delay. If delay is not provided, use default if m.RetryDelay.Duration > 0 { - log.Printf("D! Retrying %s with timeout %v", grpcServer, + m.Log.Debugf("Retrying %s with timeout %v", grpcServer, m.RetryDelay.Duration) time.Sleep(m.RetryDelay.Duration) continue @@ -308,11 +315,11 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, if err != nil { // If we encounter error in the stream, break so we can retry // the connection - acc.AddError(fmt.Errorf("E! Failed to read from %s: %v", err, grpcServer)) + acc.AddError(fmt.Errorf("failed to read from %s: %s", grpcServer, err)) break } - log.Printf("D! Received from %s: %v", grpcServer, r) + m.Log.Debugf("Received from %s: %v", grpcServer, r) // Create a point and add to batch tags := make(map[string]string) @@ -323,7 +330,7 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, dgroups := m.extractData(r, grpcServer) // Print final data collection - log.Printf("D! Available collection for %s is: %v", grpcServer, dgroups) + m.Log.Debugf("Available collection for %s is: %v", grpcServer, dgroups) tnow := time.Now() // Iterate through data groups and add them @@ -345,19 +352,19 @@ func (m *OpenConfigTelemetry) collectData(ctx context.Context, func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { // Build sensors config if m.splitSensorConfig() == 0 { - return fmt.Errorf("E! No valid sensor configuration available") + return fmt.Errorf("no valid sensor configuration available") } - // If SSL certificate is provided, use transport credentials - var err error - var transportCredentials credentials.TransportCredentials - if m.SSLCert != "" { - transportCredentials, err = credentials.NewClientTLSFromFile(m.SSLCert, "") + // Parse TLS config + var opts []grpc.DialOption + if m.EnableTLS { + tlscfg, err := m.ClientConfig.TLSConfig() if err != nil { - return fmt.Errorf("E! Failed to read certificate: %v", err) + return err } + opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlscfg))) } else { - transportCredentials = nil + opts = append(opts, grpc.WithInsecure()) } // Connect to given list of servers and start collecting data @@ -369,20 +376,15 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { // Extract device address and port grpcServer, grpcPort, err := net.SplitHostPort(server) if err != nil { - log.Printf("E! Invalid server address: %v", err) + m.Log.Errorf("Invalid server address: %s", err.Error()) continue } - // If a certificate is provided, open a secure channel. Else open insecure one - if transportCredentials != nil { - grpcClientConn, err = grpc.Dial(server, grpc.WithTransportCredentials(transportCredentials)) - } else { - grpcClientConn, err = grpc.Dial(server, grpc.WithInsecure()) - } + grpcClientConn, err = grpc.Dial(server, opts...) if err != nil { - log.Printf("E! Failed to connect to %s: %v", server, err) + m.Log.Errorf("Failed to connect to %s: %s", server, err.Error()) } else { - log.Printf("D! Opened a new gRPC session to %s on port %s", grpcServer, grpcPort) + m.Log.Debugf("Opened a new gRPC session to %s on port %s", grpcServer, grpcPort) } // Add to the list of client connections @@ -394,13 +396,13 @@ func (m *OpenConfigTelemetry) Start(acc telegraf.Accumulator) error { &authentication.LoginRequest{UserName: m.Username, Password: m.Password, ClientId: m.ClientID}) if loginErr != nil { - log.Printf("E! Could not initiate login check for %s: %v", server, err) + m.Log.Errorf("Could not initiate login check for %s: %v", server, loginErr) continue } // Check if the user is authenticated. Bail if auth error if !loginReply.Result { - log.Printf("E! Failed to authenticate the user for %s", server) + m.Log.Errorf("Failed to authenticate the user for %s", server) continue } } diff --git a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go index 8b0abd883..a3df62e1b 100644 --- a/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go +++ b/plugins/inputs/jti_openconfig_telemetry/openconfig_telemetry_test.go @@ -17,6 +17,7 @@ import ( ) var cfg = &OpenConfigTelemetry{ + Log: testutil.Logger{}, Servers: []string{"127.0.0.1:50051"}, SampleFrequency: internal.Duration{Duration: time.Second * 2}, } diff --git a/plugins/inputs/kafka_consumer/README.md b/plugins/inputs/kafka_consumer/README.md index 67dbb539e..dec39cc32 100644 --- a/plugins/inputs/kafka_consumer/README.md +++ b/plugins/inputs/kafka_consumer/README.md @@ -1,26 +1,31 @@ # Kafka Consumer Input Plugin -The [Kafka](http://kafka.apache.org/) consumer plugin polls a specified Kafka -topic and adds messages to InfluxDB. The plugin assumes messages follow the -line protocol. [Consumer Group](http://godoc.org/github.com/wvanbergen/kafka/consumergroup) -is used to talk to the Kafka cluster so multiple instances of telegraf can read -from the same topic in parallel. +The [Kafka][kafka] consumer plugin reads from Kafka +and creates metrics using one of the supported [input data formats][]. -For old kafka version (< 0.8), please use the kafka_consumer_legacy input plugin +For old kafka version (< 0.8), please use the [kafka_consumer_legacy][] input plugin and use the old zookeeper connection method. -## Configuration +### Configuration ```toml -# Read metrics from Kafka topic(s) [[inputs.kafka_consumer]] - ## topic(s) to consume - topics = ["telegraf"] + ## Kafka brokers. brokers = ["localhost:9092"] - ## the name of the consumer group - consumer_group = "telegraf_metrics_consumers" - ## Offset (must be either "oldest" or "newest") - offset = "oldest" + + ## Topics to consume. + topics = ["telegraf"] + + ## When set this tag will be added to all metrics with the topic as the value. + # topic_tag = "" + + ## Optional Client id + # client_id = "Telegraf" + + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Must be 0.10.2.0 or greater. + ## ex: version = "1.1.0" + # version = "" ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" @@ -29,22 +34,44 @@ and use the old zookeeper connection method. ## Use TLS but skip chain & host verification # insecure_skip_verify = false - ## Optional SASL Config + ## SASL authentication credentials. These settings should typically be used + ## with TLS encryption enabled using the "enable_tls" option. # sasl_username = "kafka" # sasl_password = "secret" + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + + ## Name of the consumer group. + # consumer_group = "telegraf_metrics_consumers" + + ## Initial offset position; one of "oldest" or "newest". + # offset = "oldest" + + ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". + # balance_strategy = "range" + + ## Maximum length of a message to consume, in bytes (default 0/unlimited); + ## larger messages are dropped + max_message_len = 1000000 + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - - ## Maximum length of a message to consume, in bytes (default 0/unlimited); - ## larger messages are dropped - max_message_len = 65536 ``` -## Testing - -Running integration tests requires running Zookeeper & Kafka. See Makefile -for kafka container command. +[kafka]: https://kafka.apache.org +[kafka_consumer_legacy]: /plugins/inputs/kafka_consumer_legacy/README.md +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/kafka_consumer/kafka_consumer.go b/plugins/inputs/kafka_consumer/kafka_consumer.go index bf74dd5ab..952f50d99 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer.go @@ -1,130 +1,212 @@ package kafka_consumer import ( + "context" "fmt" "log" "strings" "sync" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal/tls" - "github.com/influxdata/telegraf/plugins/inputs" - "github.com/influxdata/telegraf/plugins/parsers" + "time" "github.com/Shopify/sarama" - cluster "github.com/bsm/sarama-cluster" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/common/kafka" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" ) -type Kafka struct { - ConsumerGroup string - Topics []string - Brokers []string - MaxMessageLen int - - Cluster *cluster.Consumer - - tls.ClientConfig - - // SASL Username - SASLUsername string `toml:"sasl_username"` - // SASL Password - SASLPassword string `toml:"sasl_password"` - - // Legacy metric buffer support - MetricBuffer int - // TODO remove PointBuffer, legacy support - PointBuffer int - - Offset string - parser parsers.Parser - - sync.Mutex - - // channel for all incoming kafka messages - in <-chan *sarama.ConsumerMessage - // channel for all kafka consumer errors - errs <-chan error - done chan struct{} - - // keep the accumulator internally: - acc telegraf.Accumulator - - // doNotCommitMsgs tells the parser not to call CommitUpTo on the consumer - // this is mostly for test purposes, but there may be a use-case for it later. - doNotCommitMsgs bool -} - -var sampleConfig = ` - ## kafka servers +const sampleConfig = ` + ## Kafka brokers. brokers = ["localhost:9092"] - ## topic(s) to consume + + ## Topics to consume. topics = ["telegraf"] + ## When set this tag will be added to all metrics with the topic as the value. + # topic_tag = "" + + ## Optional Client id + # client_id = "Telegraf" + + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Must be 0.10.2.0 or greater. + ## ex: version = "1.1.0" + # version = "" + ## Optional TLS Config + # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false - ## Optional SASL Config + ## SASL authentication credentials. These settings should typically be used + ## with TLS encryption enabled using the "enable_tls" option. # sasl_username = "kafka" # sasl_password = "secret" - ## the name of the consumer group - consumer_group = "telegraf_metrics_consumers" - ## Offset (must be either "oldest" or "newest") - offset = "oldest" + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + + ## Name of the consumer group. + # consumer_group = "telegraf_metrics_consumers" + + ## Initial offset position; one of "oldest" or "newest". + # offset = "oldest" + + ## Consumer group partition assignment strategy; one of "range", "roundrobin" or "sticky". + # balance_strategy = "range" + + ## Maximum length of a message to consume, in bytes (default 0/unlimited); + ## larger messages are dropped + max_message_len = 1000000 + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" - - ## Maximum length of a message to consume, in bytes (default 0/unlimited); - ## larger messages are dropped - max_message_len = 65536 ` -func (k *Kafka) SampleConfig() string { +const ( + defaultMaxUndeliveredMessages = 1000 + defaultMaxMessageLen = 1000000 + defaultConsumerGroup = "telegraf_metrics_consumers" + reconnectDelay = 5 * time.Second +) + +type empty struct{} +type semaphore chan empty + +type KafkaConsumer struct { + Brokers []string `toml:"brokers"` + ClientID string `toml:"client_id"` + ConsumerGroup string `toml:"consumer_group"` + MaxMessageLen int `toml:"max_message_len"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + Offset string `toml:"offset"` + BalanceStrategy string `toml:"balance_strategy"` + Topics []string `toml:"topics"` + TopicTag string `toml:"topic_tag"` + Version string `toml:"version"` + SASLPassword string `toml:"sasl_password"` + SASLUsername string `toml:"sasl_username"` + SASLVersion *int `toml:"sasl_version"` + + EnableTLS *bool `toml:"enable_tls"` + tls.ClientConfig + + Log telegraf.Logger `toml:"-"` + + ConsumerCreator ConsumerGroupCreator `toml:"-"` + consumer ConsumerGroup + config *sarama.Config + + parser parsers.Parser + wg sync.WaitGroup + cancel context.CancelFunc +} + +type ConsumerGroup interface { + Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error + Errors() <-chan error + Close() error +} + +type ConsumerGroupCreator interface { + Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) +} + +type SaramaCreator struct{} + +func (*SaramaCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) { + return sarama.NewConsumerGroup(brokers, group, config) +} + +func (k *KafkaConsumer) SampleConfig() string { return sampleConfig } -func (k *Kafka) Description() string { - return "Read metrics from Kafka topic(s)" +func (k *KafkaConsumer) Description() string { + return "Read metrics from Kafka topics" } -func (k *Kafka) SetParser(parser parsers.Parser) { +func (k *KafkaConsumer) SetParser(parser parsers.Parser) { k.parser = parser } -func (k *Kafka) Start(acc telegraf.Accumulator) error { - k.Lock() - defer k.Unlock() - var clusterErr error +func (k *KafkaConsumer) Init() error { + if k.MaxUndeliveredMessages == 0 { + k.MaxUndeliveredMessages = defaultMaxUndeliveredMessages + } + if k.ConsumerGroup == "" { + k.ConsumerGroup = defaultConsumerGroup + } - k.acc = acc - - config := cluster.NewConfig() + config := sarama.NewConfig() config.Consumer.Return.Errors = true + // Kafka version 0.10.2.0 is required for consumer groups. + config.Version = sarama.V0_10_2_0 + + if k.Version != "" { + version, err := sarama.ParseKafkaVersion(k.Version) + if err != nil { + return err + } + + config.Version = version + } + + if k.EnableTLS != nil && *k.EnableTLS { + config.Net.TLS.Enable = true + } + tlsConfig, err := k.ClientConfig.TLSConfig() if err != nil { return err } if tlsConfig != nil { - log.Printf("D! TLS Enabled") config.Net.TLS.Config = tlsConfig - config.Net.TLS.Enable = true + + // To maintain backwards compatibility, if the enable_tls option is not + // set TLS is enabled if a non-default TLS config is used. + if k.EnableTLS == nil { + k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS") + config.Net.TLS.Enable = true + } } + if k.SASLUsername != "" && k.SASLPassword != "" { - log.Printf("D! Using SASL auth with username '%s',", - k.SASLUsername) config.Net.SASL.User = k.SASLUsername config.Net.SASL.Password = k.SASLPassword config.Net.SASL.Enable = true + + version, err := kafka.SASLVersion(config.Version, k.SASLVersion) + if err != nil { + return err + } + config.Net.SASL.Version = version + } + + if k.ClientID != "" { + config.ClientID = k.ClientID + } else { + config.ClientID = "Telegraf" } switch strings.ToLower(k.Offset) { @@ -133,90 +215,239 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { case "newest": config.Consumer.Offsets.Initial = sarama.OffsetNewest default: - log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", - k.Offset) - config.Consumer.Offsets.Initial = sarama.OffsetOldest + return fmt.Errorf("invalid offset %q", k.Offset) } - if k.Cluster == nil { - k.Cluster, clusterErr = cluster.NewConsumer( - k.Brokers, - k.ConsumerGroup, - k.Topics, - config, - ) - - if clusterErr != nil { - log.Printf("E! Error when creating Kafka Consumer, brokers: %v, topics: %v\n", - k.Brokers, k.Topics) - return clusterErr - } - - // Setup message and error channels - k.in = k.Cluster.Messages() - k.errs = k.Cluster.Errors() + switch strings.ToLower(k.BalanceStrategy) { + case "range", "": + config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange + case "roundrobin": + config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin + case "sticky": + config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky + default: + return fmt.Errorf("invalid balance strategy %q", k.BalanceStrategy) } - k.done = make(chan struct{}) - // Start the kafka message reader - go k.receiver() - log.Printf("I! Started the kafka consumer service, brokers: %v, topics: %v\n", - k.Brokers, k.Topics) + if k.ConsumerCreator == nil { + k.ConsumerCreator = &SaramaCreator{} + } + + k.config = config return nil } -// receiver() reads all incoming messages from the consumer, and parses them into -// influxdb metric points. -func (k *Kafka) receiver() { +func (k *KafkaConsumer) Start(acc telegraf.Accumulator) error { + var err error + k.consumer, err = k.ConsumerCreator.Create( + k.Brokers, + k.ConsumerGroup, + k.config, + ) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(context.Background()) + k.cancel = cancel + + // Start consumer goroutine + k.wg.Add(1) + go func() { + defer k.wg.Done() + for ctx.Err() == nil { + handler := NewConsumerGroupHandler(acc, k.MaxUndeliveredMessages, k.parser) + handler.MaxMessageLen = k.MaxMessageLen + handler.TopicTag = k.TopicTag + err := k.consumer.Consume(ctx, k.Topics, handler) + if err != nil { + acc.AddError(err) + internal.SleepContext(ctx, reconnectDelay) + } + } + err = k.consumer.Close() + if err != nil { + acc.AddError(err) + } + }() + + k.wg.Add(1) + go func() { + defer k.wg.Done() + for err := range k.consumer.Errors() { + acc.AddError(err) + } + }() + + return nil +} + +func (k *KafkaConsumer) Gather(acc telegraf.Accumulator) error { + return nil +} + +func (k *KafkaConsumer) Stop() { + k.cancel() + k.wg.Wait() +} + +// Message is an aggregate type binding the Kafka message and the session so +// that offsets can be updated. +type Message struct { + message *sarama.ConsumerMessage + session sarama.ConsumerGroupSession +} + +func NewConsumerGroupHandler(acc telegraf.Accumulator, maxUndelivered int, parser parsers.Parser) *ConsumerGroupHandler { + handler := &ConsumerGroupHandler{ + acc: acc.WithTracking(maxUndelivered), + sem: make(chan empty, maxUndelivered), + undelivered: make(map[telegraf.TrackingID]Message, maxUndelivered), + parser: parser, + } + return handler +} + +// ConsumerGroupHandler is a sarama.ConsumerGroupHandler implementation. +type ConsumerGroupHandler struct { + MaxMessageLen int + TopicTag string + + acc telegraf.TrackingAccumulator + sem semaphore + parser parsers.Parser + wg sync.WaitGroup + cancel context.CancelFunc + + mu sync.Mutex + undelivered map[telegraf.TrackingID]Message +} + +// Setup is called once when a new session is opened. It setups up the handler +// and begins processing delivered messages. +func (h *ConsumerGroupHandler) Setup(sarama.ConsumerGroupSession) error { + h.undelivered = make(map[telegraf.TrackingID]Message) + + ctx, cancel := context.WithCancel(context.Background()) + h.cancel = cancel + + h.wg.Add(1) + go func() { + defer h.wg.Done() + h.run(ctx) + }() + return nil +} + +// Run processes any delivered metrics during the lifetime of the session. +func (h *ConsumerGroupHandler) run(ctx context.Context) error { for { select { - case <-k.done: - return - case err := <-k.errs: - if err != nil { - k.acc.AddError(fmt.Errorf("Consumer Error: %s\n", err)) - } - case msg := <-k.in: - if k.MaxMessageLen != 0 && len(msg.Value) > k.MaxMessageLen { - k.acc.AddError(fmt.Errorf("Message longer than max_message_len (%d > %d)", - len(msg.Value), k.MaxMessageLen)) - } else { - metrics, err := k.parser.Parse(msg.Value) - if err != nil { - k.acc.AddError(fmt.Errorf("Message Parse Error\nmessage: %s\nerror: %s", - string(msg.Value), err.Error())) - } - for _, metric := range metrics { - k.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) - } - } + case <-ctx.Done(): + return nil + case track := <-h.acc.Delivered(): + h.onDelivery(track) + } + } +} - if !k.doNotCommitMsgs { - // TODO(cam) this locking can be removed if this PR gets merged: - // https://github.com/wvanbergen/kafka/pull/84 - k.Lock() - k.Cluster.MarkOffset(msg, "") - k.Unlock() +func (h *ConsumerGroupHandler) onDelivery(track telegraf.DeliveryInfo) { + h.mu.Lock() + defer h.mu.Unlock() + + msg, ok := h.undelivered[track.ID()] + if !ok { + log.Printf("E! [inputs.kafka_consumer] Could not mark message delivered: %d", track.ID()) + return + } + + if track.Delivered() { + msg.session.MarkMessage(msg.message, "") + } + + delete(h.undelivered, track.ID()) + <-h.sem +} + +// Reserve blocks until there is an available slot for a new message. +func (h *ConsumerGroupHandler) Reserve(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case h.sem <- empty{}: + return nil + } +} + +func (h *ConsumerGroupHandler) release() { + <-h.sem +} + +// Handle processes a message and if successful saves it to be acknowledged +// after delivery. +func (h *ConsumerGroupHandler) Handle(session sarama.ConsumerGroupSession, msg *sarama.ConsumerMessage) error { + if h.MaxMessageLen != 0 && len(msg.Value) > h.MaxMessageLen { + session.MarkMessage(msg, "") + h.release() + return fmt.Errorf("message exceeds max_message_len (actual %d, max %d)", + len(msg.Value), h.MaxMessageLen) + } + + metrics, err := h.parser.Parse(msg.Value) + if err != nil { + h.release() + return err + } + + if len(h.TopicTag) > 0 { + for _, metric := range metrics { + metric.AddTag(h.TopicTag, msg.Topic) + } + } + + h.mu.Lock() + id := h.acc.AddTrackingMetricGroup(metrics) + h.undelivered[id] = Message{session: session, message: msg} + h.mu.Unlock() + return nil +} + +// ConsumeClaim is called once each claim in a goroutine and must be +// thread-safe. Should run until the claim is closed. +func (h *ConsumerGroupHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + ctx := session.Context() + + for { + err := h.Reserve(ctx) + if err != nil { + return nil + } + + select { + case <-ctx.Done(): + return nil + case msg, ok := <-claim.Messages(): + if !ok { + return nil + } + err := h.Handle(session, msg) + if err != nil { + h.acc.AddError(err) } } } } -func (k *Kafka) Stop() { - k.Lock() - defer k.Unlock() - close(k.done) - if err := k.Cluster.Close(); err != nil { - k.acc.AddError(fmt.Errorf("Error closing consumer: %s\n", err.Error())) - } -} - -func (k *Kafka) Gather(acc telegraf.Accumulator) error { +// Cleanup stops the internal goroutine and is called after all ConsumeClaim +// functions have completed. +func (h *ConsumerGroupHandler) Cleanup(sarama.ConsumerGroupSession) error { + h.cancel() + h.wg.Wait() return nil } func init() { inputs.Add("kafka_consumer", func() telegraf.Input { - return &Kafka{} + return &KafkaConsumer{} }) } diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go deleted file mode 100644 index a145a938a..000000000 --- a/plugins/inputs/kafka_consumer/kafka_consumer_integration_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package kafka_consumer - -import ( - "fmt" - "testing" - "time" - - "github.com/Shopify/sarama" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/influxdata/telegraf/plugins/parsers" -) - -func TestReadsMetricsFromKafka(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - brokerPeers := []string{testutil.GetLocalHost() + ":9092"} - testTopic := fmt.Sprintf("telegraf_test_topic_%d", time.Now().Unix()) - - // Send a Kafka message to the kafka host - msg := "cpu_load_short,direction=in,host=server01,region=us-west value=23422.0 1422568543702900257\n" - producer, err := sarama.NewSyncProducer(brokerPeers, nil) - require.NoError(t, err) - _, _, err = producer.SendMessage( - &sarama.ProducerMessage{ - Topic: testTopic, - Value: sarama.StringEncoder(msg), - }) - require.NoError(t, err) - defer producer.Close() - - // Start the Kafka Consumer - k := &Kafka{ - ConsumerGroup: "telegraf_test_consumers", - Topics: []string{testTopic}, - Brokers: brokerPeers, - PointBuffer: 100000, - Offset: "oldest", - } - p, _ := parsers.NewInfluxParser() - k.SetParser(p) - - // Verify that we can now gather the sent message - var acc testutil.Accumulator - - // Sanity check - assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") - if err := k.Start(&acc); err != nil { - t.Fatal(err.Error()) - } else { - defer k.Stop() - } - - waitForPoint(&acc, t) - - // Gather points - err = acc.GatherError(k.Gather) - require.NoError(t, err) - if len(acc.Metrics) == 1 { - point := acc.Metrics[0] - assert.Equal(t, "cpu_load_short", point.Measurement) - assert.Equal(t, map[string]interface{}{"value": 23422.0}, point.Fields) - assert.Equal(t, map[string]string{ - "host": "server01", - "direction": "in", - "region": "us-west", - }, point.Tags) - assert.Equal(t, time.Unix(0, 1422568543702900257).Unix(), point.Time.Unix()) - } else { - t.Errorf("No points found in accumulator, expected 1") - } -} - -// Waits for the metric that was sent to the kafka broker to arrive at the kafka -// consumer -func waitForPoint(acc *testutil.Accumulator, t *testing.T) { - // Give the kafka container up to 2 seconds to get the point to the consumer - ticker := time.NewTicker(5 * time.Millisecond) - counter := 0 - for { - select { - case <-ticker.C: - counter++ - if counter > 1000 { - t.Fatal("Waited for 5s, point never arrived to consumer") - } else if acc.NFields() == 1 { - return - } - } - } -} diff --git a/plugins/inputs/kafka_consumer/kafka_consumer_test.go b/plugins/inputs/kafka_consumer/kafka_consumer_test.go index 9a585d6ed..0c8063578 100644 --- a/plugins/inputs/kafka_consumer/kafka_consumer_test.go +++ b/plugins/inputs/kafka_consumer/kafka_consumer_test.go @@ -1,150 +1,396 @@ package kafka_consumer import ( - "strings" + "context" "testing" - - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" + "time" "github.com/Shopify/sarama" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/parsers/value" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) -const ( - testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" - testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" - testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" - invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n" -) +type FakeConsumerGroup struct { + brokers []string + group string + config *sarama.Config -func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) { - in := make(chan *sarama.ConsumerMessage, 1000) - k := Kafka{ - ConsumerGroup: "test", - Topics: []string{"telegraf"}, - Brokers: []string{"localhost:9092"}, - Offset: "oldest", - in: in, - doNotCommitMsgs: true, - errs: make(chan error, 1000), - done: make(chan struct{}), + handler sarama.ConsumerGroupHandler + errors chan error +} + +func (g *FakeConsumerGroup) Consume(ctx context.Context, topics []string, handler sarama.ConsumerGroupHandler) error { + g.handler = handler + g.handler.Setup(nil) + return nil +} + +func (g *FakeConsumerGroup) Errors() <-chan error { + return g.errors +} + +func (g *FakeConsumerGroup) Close() error { + close(g.errors) + return nil +} + +type FakeCreator struct { + ConsumerGroup *FakeConsumerGroup +} + +func (c *FakeCreator) Create(brokers []string, group string, config *sarama.Config) (ConsumerGroup, error) { + c.ConsumerGroup.brokers = brokers + c.ConsumerGroup.group = group + c.ConsumerGroup.config = config + return c.ConsumerGroup, nil +} + +func TestInit(t *testing.T) { + tests := []struct { + name string + plugin *KafkaConsumer + initError bool + check func(t *testing.T, plugin *KafkaConsumer) + }{ + { + name: "default config", + plugin: &KafkaConsumer{}, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.Equal(t, plugin.ConsumerGroup, defaultConsumerGroup) + require.Equal(t, plugin.MaxUndeliveredMessages, defaultMaxUndeliveredMessages) + require.Equal(t, plugin.config.ClientID, "Telegraf") + require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetOldest) + }, + }, + { + name: "parses valid version string", + plugin: &KafkaConsumer{ + Version: "1.0.0", + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.Equal(t, plugin.config.Version, sarama.V1_0_0_0) + }, + }, + { + name: "invalid version string", + plugin: &KafkaConsumer{ + Version: "100", + Log: testutil.Logger{}, + }, + initError: true, + }, + { + name: "custom client_id", + plugin: &KafkaConsumer{ + ClientID: "custom", + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.Equal(t, plugin.config.ClientID, "custom") + }, + }, + { + name: "custom offset", + plugin: &KafkaConsumer{ + Offset: "newest", + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.Equal(t, plugin.config.Consumer.Offsets.Initial, sarama.OffsetNewest) + }, + }, + { + name: "invalid offset", + plugin: &KafkaConsumer{ + Offset: "middle", + Log: testutil.Logger{}, + }, + initError: true, + }, + { + name: "default tls without tls config", + plugin: &KafkaConsumer{ + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.False(t, plugin.config.Net.TLS.Enable) + }, + }, + { + name: "default tls with a tls config", + plugin: &KafkaConsumer{ + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.True(t, plugin.config.Net.TLS.Enable) + }, + }, + { + name: "disable tls", + plugin: &KafkaConsumer{ + EnableTLS: func() *bool { v := false; return &v }(), + ClientConfig: tls.ClientConfig{ + InsecureSkipVerify: true, + }, + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.False(t, plugin.config.Net.TLS.Enable) + }, + }, + { + name: "enable tls", + plugin: &KafkaConsumer{ + EnableTLS: func() *bool { v := true; return &v }(), + Log: testutil.Logger{}, + }, + check: func(t *testing.T, plugin *KafkaConsumer) { + require.True(t, plugin.config.Net.TLS.Enable) + }, + }, } - return &k, in -} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cg := &FakeConsumerGroup{} + tt.plugin.ConsumerCreator = &FakeCreator{ConsumerGroup: cg} + err := tt.plugin.Init() + if tt.initError { + require.Error(t, err) + return + } -// Test that the parser parses kafka messages into points -func TestRunParser(t *testing.T) { - k, in := newTestKafka() - acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) - - k.parser, _ = parsers.NewInfluxParser() - go k.receiver() - in <- saramaMsg(testMsg) - acc.Wait(1) - - assert.Equal(t, acc.NFields(), 1) -} - -// Test that the parser ignores invalid messages -func TestRunParserInvalidMsg(t *testing.T) { - k, in := newTestKafka() - acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) - - k.parser, _ = parsers.NewInfluxParser() - go k.receiver() - in <- saramaMsg(invalidMsg) - acc.WaitError(1) - - assert.Equal(t, acc.NFields(), 0) -} - -// Test that overlong messages are dropped -func TestDropOverlongMsg(t *testing.T) { - const maxMessageLen = 64 * 1024 - k, in := newTestKafka() - k.MaxMessageLen = maxMessageLen - acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) - overlongMsg := strings.Repeat("v", maxMessageLen+1) - - go k.receiver() - in <- saramaMsg(overlongMsg) - acc.WaitError(1) - - assert.Equal(t, acc.NFields(), 0) -} - -// Test that the parser parses kafka messages into points -func TestRunParserAndGather(t *testing.T) { - k, in := newTestKafka() - acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) - - k.parser, _ = parsers.NewInfluxParser() - go k.receiver() - in <- saramaMsg(testMsg) - acc.Wait(1) - - acc.GatherError(k.Gather) - - assert.Equal(t, acc.NFields(), 1) - acc.AssertContainsFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses kafka messages into points -func TestRunParserAndGatherGraphite(t *testing.T) { - k, in := newTestKafka() - acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) - - k.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) - go k.receiver() - in <- saramaMsg(testMsgGraphite) - acc.Wait(1) - - acc.GatherError(k.Gather) - - assert.Equal(t, acc.NFields(), 1) - acc.AssertContainsFields(t, "cpu_load_short_graphite", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses kafka messages into points -func TestRunParserAndGatherJSON(t *testing.T) { - k, in := newTestKafka() - acc := testutil.Accumulator{} - k.acc = &acc - defer close(k.done) - - k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil) - go k.receiver() - in <- saramaMsg(testMsgJSON) - acc.Wait(1) - - acc.GatherError(k.Gather) - - assert.Equal(t, acc.NFields(), 2) - acc.AssertContainsFields(t, "kafka_json_test", - map[string]interface{}{ - "a": float64(5), - "b_c": float64(6), + tt.check(t, tt.plugin) + }) + } +} + +func TestStartStop(t *testing.T) { + cg := &FakeConsumerGroup{errors: make(chan error)} + plugin := &KafkaConsumer{ + ConsumerCreator: &FakeCreator{ConsumerGroup: cg}, + Log: testutil.Logger{}, + } + err := plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + plugin.Stop() +} + +type FakeConsumerGroupSession struct { + ctx context.Context +} + +func (s *FakeConsumerGroupSession) Claims() map[string][]int32 { + panic("not implemented") +} + +func (s *FakeConsumerGroupSession) MemberID() string { + panic("not implemented") +} + +func (s *FakeConsumerGroupSession) GenerationID() int32 { + panic("not implemented") +} + +func (s *FakeConsumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") +} + +func (s *FakeConsumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) { + panic("not implemented") +} + +func (s *FakeConsumerGroupSession) MarkMessage(msg *sarama.ConsumerMessage, metadata string) { +} + +func (s *FakeConsumerGroupSession) Context() context.Context { + return s.ctx +} + +type FakeConsumerGroupClaim struct { + messages chan *sarama.ConsumerMessage +} + +func (c *FakeConsumerGroupClaim) Topic() string { + panic("not implemented") +} + +func (c *FakeConsumerGroupClaim) Partition() int32 { + panic("not implemented") +} + +func (c *FakeConsumerGroupClaim) InitialOffset() int64 { + panic("not implemented") +} + +func (c *FakeConsumerGroupClaim) HighWaterMarkOffset() int64 { + panic("not implemented") +} + +func (c *FakeConsumerGroupClaim) Messages() <-chan *sarama.ConsumerMessage { + return c.messages +} + +func TestConsumerGroupHandler_Lifecycle(t *testing.T) { + acc := &testutil.Accumulator{} + parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} + cg := NewConsumerGroupHandler(acc, 1, parser) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + session := &FakeConsumerGroupSession{ + ctx: ctx, + } + var claim FakeConsumerGroupClaim + var err error + + err = cg.Setup(session) + require.NoError(t, err) + + cancel() + err = cg.ConsumeClaim(session, &claim) + require.NoError(t, err) + + err = cg.Cleanup(session) + require.NoError(t, err) +} + +func TestConsumerGroupHandler_ConsumeClaim(t *testing.T) { + acc := &testutil.Accumulator{} + parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} + cg := NewConsumerGroupHandler(acc, 1, parser) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + session := &FakeConsumerGroupSession{ctx: ctx} + claim := &FakeConsumerGroupClaim{ + messages: make(chan *sarama.ConsumerMessage, 1), + } + + err := cg.Setup(session) + require.NoError(t, err) + + claim.messages <- &sarama.ConsumerMessage{ + Topic: "telegraf", + Value: []byte("42"), + } + + go func() { + err = cg.ConsumeClaim(session, claim) + require.NoError(t, err) + }() + + acc.Wait(1) + cancel() + + err = cg.Cleanup(session) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestConsumerGroupHandler_Handle(t *testing.T) { + tests := []struct { + name string + maxMessageLen int + topicTag string + msg *sarama.ConsumerMessage + expected []telegraf.Metric + }{ + { + name: "happy path", + msg: &sarama.ConsumerMessage{ + Topic: "telegraf", + Value: []byte("42"), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + ), + }, + }, + { + name: "message to long", + maxMessageLen: 4, + msg: &sarama.ConsumerMessage{ + Topic: "telegraf", + Value: []byte("12345"), + }, + expected: []telegraf.Metric{}, + }, + { + name: "parse error", + msg: &sarama.ConsumerMessage{ + Topic: "telegraf", + Value: []byte("not an integer"), + }, + expected: []telegraf.Metric{}, + }, + { + name: "add topic tag", + topicTag: "topic", + msg: &sarama.ConsumerMessage{ + Topic: "telegraf", + Value: []byte("42"), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "topic": "telegraf", + }, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + acc := &testutil.Accumulator{} + parser := &value.ValueParser{MetricName: "cpu", DataType: "int"} + cg := NewConsumerGroupHandler(acc, 1, parser) + cg.MaxMessageLen = tt.maxMessageLen + cg.TopicTag = tt.topicTag + + ctx := context.Background() + session := &FakeConsumerGroupSession{ctx: ctx} + + cg.Reserve(ctx) + cg.Handle(session, tt.msg) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) }) -} - -func saramaMsg(val string) *sarama.ConsumerMessage { - return &sarama.ConsumerMessage{ - Key: nil, - Value: []byte(val), - Offset: 0, - Partition: 0, } } diff --git a/plugins/inputs/kafka_consumer_legacy/README.md b/plugins/inputs/kafka_consumer_legacy/README.md index 31976788b..8fc7ed295 100644 --- a/plugins/inputs/kafka_consumer_legacy/README.md +++ b/plugins/inputs/kafka_consumer_legacy/README.md @@ -13,12 +13,16 @@ from the same topic in parallel. [[inputs.kafka_consumer]] ## topic(s) to consume topics = ["telegraf"] + ## an array of Zookeeper connection strings zookeeper_peers = ["localhost:2181"] + ## Zookeeper Chroot zookeeper_chroot = "" + ## the name of the consumer group consumer_group = "telegraf_metrics_consumers" + ## Offset (must be either "oldest" or "newest") offset = "oldest" diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go index d9558d5bd..939fc8850 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy.go @@ -2,7 +2,6 @@ package kafka_consumer_legacy import ( "fmt" - "log" "strings" "sync" @@ -30,6 +29,8 @@ type Kafka struct { Offset string parser parsers.Parser + Log telegraf.Logger + sync.Mutex // channel for all incoming kafka messages @@ -49,12 +50,16 @@ type Kafka struct { var sampleConfig = ` ## topic(s) to consume topics = ["telegraf"] + ## an array of Zookeeper connection strings zookeeper_peers = ["localhost:2181"] + ## Zookeeper Chroot zookeeper_chroot = "" + ## the name of the consumer group consumer_group = "telegraf_metrics_consumers" + ## Offset (must be either "oldest" or "newest") offset = "oldest" @@ -96,7 +101,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { case "newest": config.Offsets.Initial = sarama.OffsetNewest default: - log.Printf("I! WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", + k.Log.Infof("WARNING: Kafka consumer invalid offset '%s', using 'oldest'\n", k.Offset) config.Offsets.Initial = sarama.OffsetOldest } @@ -121,7 +126,7 @@ func (k *Kafka) Start(acc telegraf.Accumulator) error { // Start the kafka message reader go k.receiver() - log.Printf("I! Started the kafka consumer service, peers: %v, topics: %v\n", + k.Log.Infof("Started the kafka consumer service, peers: %v, topics: %v\n", k.ZookeeperPeers, k.Topics) return nil } diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go index 60404cfac..31bea2210 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_integration_test.go @@ -37,6 +37,7 @@ func TestReadsMetricsFromKafka(t *testing.T) { // Start the Kafka Consumer k := &Kafka{ + Log: testutil.Logger{}, ConsumerGroup: "telegraf_test_consumers", Topics: []string{testTopic}, ZookeeperPeers: zkPeers, diff --git a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go index 630aca163..8037f49a0 100644 --- a/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go +++ b/plugins/inputs/kafka_consumer_legacy/kafka_consumer_legacy_test.go @@ -21,6 +21,7 @@ const ( func newTestKafka() (*Kafka, chan *sarama.ConsumerMessage) { in := make(chan *sarama.ConsumerMessage, 1000) k := Kafka{ + Log: testutil.Logger{}, ConsumerGroup: "test", Topics: []string{"telegraf"}, ZookeeperPeers: []string{"localhost:2181"}, @@ -125,7 +126,10 @@ func TestRunParserAndGatherJSON(t *testing.T) { k.acc = &acc defer close(k.done) - k.parser, _ = parsers.NewJSONParser("kafka_json_test", []string{}, nil) + k.parser, _ = parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "kafka_json_test", + }) go k.receiver() in <- saramaMsg(testMsgJSON) acc.Wait(1) diff --git a/plugins/inputs/kapacitor/README.md b/plugins/inputs/kapacitor/README.md index 2ff4eab88..ace4f18ff 100644 --- a/plugins/inputs/kapacitor/README.md +++ b/plugins/inputs/kapacitor/README.md @@ -1,6 +1,6 @@ # Kapacitor Plugin -The Kapacitor plugin will collect metrics from the given Kapacitor instances. +The Kapacitor plugin collects metrics from the given Kapacitor instances. ### Configuration: @@ -23,70 +23,290 @@ The Kapacitor plugin will collect metrics from the given Kapacitor instances. # insecure_skip_verify = false ``` -### Measurements & Fields +### Measurements and fields -- kapacitor - - num_enabled_tasks, integer - - num_subscriptions, integer - - num_tasks, integer -- kapacitor_edges - - collected, integer - - emitted, integer -- kapacitor_ingress - - points_received, integer -- kapacitor_memstats - - alloc_bytes, integer - - buck_hash_sys_bytes, integer - - frees, integer - - gcc_pu_fraction, float - - gc_sys_bytes, integer - - heap_alloc_bytes, integer - - heap_idle_bytes, integer - - heap_inuse_bytes, integer - - heap_objects, integer - - heap_released_bytes, integer - - heap_sys_bytes, integer - - last_gc_ns, integer - - lookups, integer - - mallocs, integer - - mcache_in_use_bytes, integer - - mcache_sys_bytes, integer - - mspan_in_use_bytes, integer - - mspan_sys_bytes, integer - - next_gc_ns, integer - - num_gc, integer - - other_sys_bytes, integer - - pause_total_ns, integer - - stack_in_use_bytes, integer - - stack_sys_bytes, integer - - sys_bytes, integer - - total_alloc_bytes, integer -- kapacitor_nodes - - alerts_triggered, integer - - avg_exec_time_ns, integer - - batches_queried, integer - - crits_triggered, integer - - eval_errors, integer - - fields_defaulted, integer - - infos_triggered, integer - - oks_triggered, integer - - points_queried, integer - - points_written, integer - - query_errors, integer - - tags_defaulted, integer - - warns_triggered, integer - - write_errors, integer +- [kapacitor](#kapacitor) + - [num_enabled_tasks](#num_enabled_tasks) _(integer)_ + - [num_subscriptions](#num_subscriptions) _(integer)_ + - [num_tasks](#num_tasks) _(integer)_ +- [kapacitor_alert](#kapacitor_alert) + - [notification_dropped](#notification_dropped) _(integer)_ + - [primary-handle-count](#primary-handle-count) _(integer)_ + - [secondary-handle-count](#secondary-handle-count) _(integer)_ +- (Kapacitor Enterprise only) [kapacitor_cluster](#kapacitor_cluster) _(integer)_ + - [dropped_member_events](#dropped_member_events) _(integer)_ + - [dropped_user_events](#dropped_user_events) _(integer)_ + - [query_handler_errors](#query_handler_errors) _(integer)_ +- [kapacitor_edges](#kapacitor_edges) + - [collected](#collected) _(integer)_ + - [emitted](#emitted) _(integer)_ +- [kapacitor_ingress](#kapacitor_ingress) + - [points_received](#points_received) _(integer)_ +- [kapacitor_load](#kapacitor_load) + - [errors](#errors) _(integer)_ +- [kapacitor_memstats](#kapacitor_memstats) + - [alloc_bytes](#alloc_bytes) _(integer)_ + - [buck_hash_sys_bytes](#buck_hash_sys_bytes) _(integer)_ + - [frees](#frees) _(integer)_ + - [gc_sys_bytes](#gc_sys_bytes) _(integer)_ + - [gc_cpu_fraction](#gc_cpu_fraction) _(float)_ + - [heap_alloc_bytes](#heap_alloc_bytes) _(integer)_ + - [heap_idle_bytes](#heap_idle_bytes) _(integer)_ + - [heap_in_use_bytes](#heap_in_use_bytes) _(integer)_ + - [heap_objects](#heap_objects) _(integer)_ + - [heap_released_bytes](#heap_released_bytes) _(integer)_ + - [heap_sys_bytes](#heap_sys_bytes) _(integer)_ + - [last_gc_ns](#last_gc_ns) _(integer)_ + - [lookups](#lookups) _(integer)_ + - [mallocs](#mallocs) _(integer)_ + - [mcache_in_use_bytes](#mcache_in_use_bytes) _(integer)_ + - [mcache_sys_bytes](#mcache_sys_bytes) _(integer)_ + - [mspan_in_use_bytes](#mspan_in_use_bytes) _(integer)_ + - [mspan_sys_bytes](#mspan_sys_bytes) _(integer)_ + - [next_gc_ns](#next_gc_ns) _(integer)_ + - [num_gc](#num_gc) _(integer)_ + - [other_sys_bytes](#other_sys_bytes) _(integer)_ + - [pause_total_ns](#pause_total_ns) _(integer)_ + - [stack_in_use_bytes](#stack_in_use_bytes) _(integer)_ + - [stack_sys_bytes](#stack_sys_bytes) _(integer)_ + - [sys_bytes](#sys_bytes) _(integer)_ + - [total_alloc_bytes](#total_alloc_bytes) _(integer)_ +- [kapacitor_nodes](#kapacitor_nodes) + - [alerts_inhibited](#alerts_inhibited) _(integer)_ + - [alerts_triggered](#alerts_triggered) _(integer)_ + - [avg_exec_time_ns](#avg_exec_time_ns) _(integer)_ + - [crits_triggered](#crits_triggered) _(integer)_ + - [errors](#errors) _(integer)_ + - [infos_triggered](#infos_triggered) _(integer)_ + - [oks_triggered](#oks_triggered) _(integer)_ + - [points_written](#points_written) _(integer)_ + - [warns_triggered](#warns_triggered) _(integer)_ + - [write_errors](#write_errors) _(integer)_ +- [kapacitor_topics](#kapacitor_topics) + - [collected](#collected) _(integer)_ + + +--- + +### kapacitor +The `kapacitor` measurement stores fields with information related to +[Kapacitor tasks](https://docs.influxdata.com/kapacitor/latest/introduction/getting-started/#kapacitor-tasks) +and [subscriptions](https://docs.influxdata.com/kapacitor/latest/administration/subscription-management/). + +#### num_enabled_tasks +The number of enabled Kapacitor tasks. + +#### num_subscriptions +The number of Kapacitor/InfluxDB subscriptions. + +#### num_tasks +The total number of Kapacitor tasks. + +--- + +### kapacitor_alert +The `kapacitor_alert` measurement stores fields with information related to +[Kapacitor alerts](https://docs.influxdata.com/kapacitor/v1.5/working/alerts/). + +#### notification-dropped +The number of internal notifications dropped because they arrive too late from another Kapacitor node. +If this count is increasing, Kapacitor Enterprise nodes aren't able to communicate fast enough +to keep up with the volume of alerts. + +#### primary-handle-count +The number of times this node handled an alert as the primary. This count should increase under normal conditions. + +#### secondary-handle-count +The number of times this node handled an alert as the secondary. An increase in this counter indicates that the primary is failing to handle alerts in a timely manner. + +--- + +### kapacitor_cluster +The `kapacitor_cluster` measurement reflects the ability of [Kapacitor nodes to communicate](https://docs.influxdata.com/enterprise_kapacitor/v1.5/administration/configuration/#cluster-communications) with one another. Specifically, these metrics track the gossip communication between the Kapacitor nodes. + +#### dropped_member_events +The number of gossip member events that were dropped. + +#### dropped_user_events +The number of gossip user events that were dropped. + +--- + +### kapacitor_edges +The `kapacitor_edges` measurement stores fields with information related to +[edges](https://docs.influxdata.com/kapacitor/latest/tick/introduction/#pipelines) +in Kapacitor TICKscripts. + +#### collected +The number of messages collected by TICKscript edges. + +#### emitted +The number of messages emitted by TICKscript edges. + +--- + +### kapacitor_ingress +The `kapacitor_ingress` measurement stores fields with information related to data +coming into Kapacitor. + +#### points_received +The number of points received by Kapacitor. + +--- + +### kapacitor_load +The `kapacitor_load` measurement stores fields with information related to the +[Kapacitor Load Directory service](https://docs.influxdata.com/kapacitor/latest/guides/load_directory/). + +#### errors +The number of errors reported from the load directory service. + +--- + +### kapacitor_memstats +The `kapacitor_memstats` measurement stores fields related to Kapacitor memory usage. + +#### alloc_bytes +The number of bytes of memory allocated by Kapacitor that are still in use. + +#### buck_hash_sys_bytes +The number of bytes of memory used by the profiling bucket hash table. + +#### frees +The number of heap objects freed. + +#### gc_sys_bytes +The number of bytes of memory used for garbage collection system metadata. + +#### gc_cpu_fraction +The fraction of Kapacitor's available CPU time used by garbage collection since +Kapacitor started. + +#### heap_alloc_bytes +The number of reachable and unreachable heap objects garbage collection has +not freed. + +#### heap_idle_bytes +The number of heap bytes waiting to be used. + +#### heap_in_use_bytes +The number of heap bytes in use. + +#### heap_objects +The number of allocated objects. + +#### heap_released_bytes +The number of heap bytes released to the operating system. + +#### heap_sys_bytes +The number of heap bytes obtained from `system`. + +#### last_gc_ns +The nanosecond epoch time of the last garbage collection. + +#### lookups +The total number of pointer lookups. + +#### mallocs +The total number of mallocs. + +#### mcache_in_use_bytes +The number of bytes in use by mcache structures. + +#### mcache_sys_bytes +The number of bytes used for mcache structures obtained from `system`. + +#### mspan_in_use_bytes +The number of bytes in use by mspan structures. + +#### mspan_sys_bytes +The number of bytes used for mspan structures obtained from `system`. + +#### next_gc_ns +The nanosecond epoch time of the next garbage collection. + +#### num_gc +The number of completed garbage collection cycles. + +#### other_sys_bytes +The number of bytes used for other system allocations. + +#### pause_total_ns +The total number of nanoseconds spent in garbage collection "stop-the-world" +pauses since Kapacitor started. + +#### stack_in_use_bytes +The number of bytes in use by the stack allocator. + +#### stack_sys_bytes +The number of bytes obtained from `system` for stack allocator. + +#### sys_bytes +The number of bytes of memory obtained from `system`. + +#### total_alloc_bytes +The total number of bytes allocated, even if freed. + +--- + +### kapacitor_nodes +The `kapacitor_nodes` measurement stores fields related to events that occur in +[TICKscript nodes](https://docs.influxdata.com/kapacitor/latest/nodes/). + +#### alerts_inhibited +The total number of alerts inhibited by TICKscripts. + +#### alerts_triggered +The total number of alerts triggered by TICKscripts. + +#### avg_exec_time_ns +The average execution time of TICKscripts in nanoseconds. + +#### crits_triggered +The number of critical (`crit`) alerts triggered by TICKscripts. + +#### errors +The number of errors caused caused by TICKscripts. + +#### infos_triggered +The number of info (`info`) alerts triggered by TICKscripts. + +#### oks_triggered +The number of ok (`ok`) alerts triggered by TICKscripts. + +#### points_written +The number of points written to InfluxDB or back to Kapacitor. + +#### warns_triggered +The number of warning (`warn`) alerts triggered by TICKscripts. + +#### working_cardinality +The total number of unique series processed. + +#### write_errors +The number of errors that occurred when writing to InfluxDB or other write endpoints. + +--- + +### kapacitor_topics +The `kapacitor_topics` measurement stores fields related to +Kapacitor topics](https://docs.influxdata.com/kapacitor/latest/working/using_alert_topics/). + +#### collected +The number of events collected by Kapacitor topics. + +--- *Note:* The Kapacitor variables `host`, `cluster_id`, and `server_id` are currently not recorded due to the potential high cardinality of these values. -### Example Output: +## Example Output ``` $ telegraf --config /etc/telegraf.conf --input-filter kapacitor --test * Plugin: inputs.kapacitor, Collection 1 -> kapacitor_memstats,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars alloc_bytes=6974808i,buck_hash_sys_bytes=1452609i,frees=207281i,gc_sys_bytes=802816i,gcc_pu_fraction=0.00004693548939673313,heap_alloc_bytes=6974808i,heap_idle_bytes=6742016i,heap_in_use_bytes=9183232i,heap_objects=23216i,heap_released_bytes=0i,heap_sys_bytes=15925248i,last_gc_ns=1478791460012676997i,lookups=88i,mallocs=230497i,mcache_in_use_bytes=9600i,mcache_sys_bytes=16384i,mspan_in_use_bytes=98560i,mspan_sys_bytes=131072i,next_gc_ns=11467528i,num_gc=8i,other_sys_bytes=2236087i,pause_total_ns=2994110i,stack_in_use_bytes=1900544i,stack_sys_bytes=1900544i,sys_bytes=22464760i,total_alloc_bytes=35023600i 1478791462000000000 +> kapacitor_memstats,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars alloc_bytes=6974808i,buck_hash_sys_bytes=1452609i,frees=207281i,gc_sys_bytes=802816i,gc_cpu_fraction=0.00004693548939673313,heap_alloc_bytes=6974808i,heap_idle_bytes=6742016i,heap_in_use_bytes=9183232i,heap_objects=23216i,heap_released_bytes=0i,heap_sys_bytes=15925248i,last_gc_ns=1478791460012676997i,lookups=88i,mallocs=230497i,mcache_in_use_bytes=9600i,mcache_sys_bytes=16384i,mspan_in_use_bytes=98560i,mspan_sys_bytes=131072i,next_gc_ns=11467528i,num_gc=8i,other_sys_bytes=2236087i,pause_total_ns=2994110i,stack_in_use_bytes=1900544i,stack_sys_bytes=1900544i,sys_bytes=22464760i,total_alloc_bytes=35023600i 1478791462000000000 > kapacitor,host=hostname.local,kap_version=1.1.0~rc2,url=http://localhost:9092/kapacitor/v1/debug/vars num_enabled_tasks=5i,num_subscriptions=5i,num_tasks=5i 1478791462000000000 > kapacitor_edges,child=stream0,host=hostname.local,parent=stream,task=deadman-test,type=stream collected=0,emitted=0 1478791462000000000 > kapacitor_ingress,database=_internal,host=hostname.local,measurement=shard,retention_policy=monitor,task_master=main points_received=120 1478791462000000000 diff --git a/plugins/inputs/kapacitor/kapacitor.go b/plugins/inputs/kapacitor/kapacitor.go index f20b98774..71febf307 100644 --- a/plugins/inputs/kapacitor/kapacitor.go +++ b/plugins/inputs/kapacitor/kapacitor.go @@ -171,7 +171,7 @@ func (k *Kapacitor) gatherURL( "alloc_bytes": s.MemStats.Alloc, "buck_hash_sys_bytes": s.MemStats.BuckHashSys, "frees": s.MemStats.Frees, - "gcc_pu_fraction": s.MemStats.GCCPUFraction, + "gc_cpu_fraction": s.MemStats.GCCPUFraction, "gc_sys_bytes": s.MemStats.GCSys, "heap_alloc_bytes": s.MemStats.HeapAlloc, "heap_idle_bytes": s.MemStats.HeapIdle, diff --git a/plugins/inputs/kapacitor/kapacitor_test.go b/plugins/inputs/kapacitor/kapacitor_test.go index b32aeec24..cae1f9ce3 100644 --- a/plugins/inputs/kapacitor/kapacitor_test.go +++ b/plugins/inputs/kapacitor/kapacitor_test.go @@ -33,7 +33,7 @@ func TestKapacitor(t *testing.T) { "alloc_bytes": int64(6950624), "buck_hash_sys_bytes": int64(1446737), "frees": int64(129656), - "gcc_pu_fraction": float64(0.006757149597237818), + "gc_cpu_fraction": float64(0.006757149597237818), "gc_sys_bytes": int64(575488), "heap_alloc_bytes": int64(6950624), "heap_idle_bytes": int64(499712), diff --git a/plugins/inputs/system/KERNEL_README.md b/plugins/inputs/kernel/README.md similarity index 100% rename from plugins/inputs/system/KERNEL_README.md rename to plugins/inputs/kernel/README.md diff --git a/plugins/inputs/system/kernel.go b/plugins/inputs/kernel/kernel.go similarity index 99% rename from plugins/inputs/system/kernel.go rename to plugins/inputs/kernel/kernel.go index 1b3bc1dfa..461c9564a 100644 --- a/plugins/inputs/system/kernel.go +++ b/plugins/inputs/kernel/kernel.go @@ -1,6 +1,6 @@ // +build linux -package system +package kernel import ( "bytes" diff --git a/plugins/inputs/system/kernel_notlinux.go b/plugins/inputs/kernel/kernel_notlinux.go similarity index 96% rename from plugins/inputs/system/kernel_notlinux.go rename to plugins/inputs/kernel/kernel_notlinux.go index 9053b5c04..05f6e55c4 100644 --- a/plugins/inputs/system/kernel_notlinux.go +++ b/plugins/inputs/kernel/kernel_notlinux.go @@ -1,6 +1,6 @@ // +build !linux -package system +package kernel import ( "github.com/influxdata/telegraf" diff --git a/plugins/inputs/system/kernel_test.go b/plugins/inputs/kernel/kernel_test.go similarity index 98% rename from plugins/inputs/system/kernel_test.go rename to plugins/inputs/kernel/kernel_test.go index bf090eb88..d356f4380 100644 --- a/plugins/inputs/system/kernel_test.go +++ b/plugins/inputs/kernel/kernel_test.go @@ -1,6 +1,6 @@ // +build linux -package system +package kernel import ( "io/ioutil" @@ -168,7 +168,7 @@ const entropyStatFile_Partial = `1024` const entropyStatFile_Invalid = `` func makeFakeStatFile(content []byte) string { - tmpfile, err := ioutil.TempFile("", "kerneltest") + tmpfile, err := ioutil.TempFile("", "kernel_test") if err != nil { panic(err) } diff --git a/plugins/inputs/system/KERNEL_VMSTAT_README.md b/plugins/inputs/kernel_vmstat/README.md similarity index 100% rename from plugins/inputs/system/KERNEL_VMSTAT_README.md rename to plugins/inputs/kernel_vmstat/README.md diff --git a/plugins/inputs/system/kernel_vmstat.go b/plugins/inputs/kernel_vmstat/kernel_vmstat.go similarity index 98% rename from plugins/inputs/system/kernel_vmstat.go rename to plugins/inputs/kernel_vmstat/kernel_vmstat.go index 197d81185..ffc56d97d 100644 --- a/plugins/inputs/system/kernel_vmstat.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat.go @@ -1,6 +1,6 @@ // +build linux -package system +package kernel_vmstat import ( "bytes" diff --git a/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go new file mode 100644 index 000000000..11a5d2e55 --- /dev/null +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package kernel_vmstat diff --git a/plugins/inputs/system/kernel_vmstat_test.go b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go similarity index 97% rename from plugins/inputs/system/kernel_vmstat_test.go rename to plugins/inputs/kernel_vmstat/kernel_vmstat_test.go index ed0c03e28..bba615a74 100644 --- a/plugins/inputs/system/kernel_vmstat_test.go +++ b/plugins/inputs/kernel_vmstat/kernel_vmstat_test.go @@ -1,6 +1,6 @@ // +build linux -package system +package kernel_vmstat import ( "io/ioutil" @@ -13,7 +13,7 @@ import ( ) func TestFullVmStatProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(vmStatFile_Full)) + tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Full)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -121,7 +121,7 @@ func TestFullVmStatProcFile(t *testing.T) { } func TestPartialVmStatProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(vmStatFile_Partial)) + tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Partial)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -151,7 +151,7 @@ func TestPartialVmStatProcFile(t *testing.T) { } func TestInvalidVmStatProcFile1(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(vmStatFile_Invalid)) + tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid)) defer os.Remove(tmpfile) k := KernelVmstat{ @@ -164,7 +164,7 @@ func TestInvalidVmStatProcFile1(t *testing.T) { } func TestNoVmStatProcFile(t *testing.T) { - tmpfile := makeFakeStatFile([]byte(vmStatFile_Invalid)) + tmpfile := makeFakeVmStatFile([]byte(vmStatFile_Invalid)) os.Remove(tmpfile) k := KernelVmstat{ diff --git a/plugins/inputs/kibana/README.md b/plugins/inputs/kibana/README.md new file mode 100644 index 000000000..73bf4a298 --- /dev/null +++ b/plugins/inputs/kibana/README.md @@ -0,0 +1,55 @@ +# Kibana Input Plugin + +The `kibana` plugin queries the [Kibana][] API to obtain the service status. + +- Telegraf minimum version: 1.8 +- Kibana minimum tested version: 6.0 + +[Kibana]: https://www.elastic.co/ + +### Configuration + +```toml +[[inputs.kibana]] + ## Specify a list of one or more Kibana servers + servers = ["http://localhost:5601"] + + ## Timeout for HTTP requests + timeout = "5s" + + ## HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Metrics + +- kibana + - tags: + - name (Kibana reported name) + - source (Kibana server hostname or IP) + - status (Kibana health: green, yellow, red) + - version (Kibana version) + - fields: + - status_code (integer, green=1 yellow=2 red=3 unknown=0) + - heap_total_bytes (integer) + - heap_max_bytes (integer; deprecated in 1.13.3: use `heap_total_bytes` field) + - heap_used_bytes (integer) + - uptime_ms (integer) + - response_time_avg_ms (float) + - response_time_max_ms (integer) + - concurrent_connections (integer) + - requests_per_sec (float) + +### Example Output + +``` +kibana,host=myhost,name=my-kibana,source=localhost:5601,status=green,version=6.5.4 concurrent_connections=8i,heap_max_bytes=447778816i,heap_total_bytes=447778816i,heap_used_bytes=380603352i,requests_per_sec=1,response_time_avg_ms=57.6,response_time_max_ms=220i,status_code=1i,uptime_ms=6717489805i 1534864502000000000 +``` diff --git a/plugins/inputs/kibana/kibana.go b/plugins/inputs/kibana/kibana.go new file mode 100644 index 000000000..858922451 --- /dev/null +++ b/plugins/inputs/kibana/kibana.go @@ -0,0 +1,272 @@ +package kibana + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const statusPath = "/api/status" + +type kibanaStatus struct { + Name string `json:"name"` + Version version `json:"version"` + Status status `json:"status"` + Metrics metrics `json:"metrics"` +} + +type version struct { + Number string `json:"number"` + BuildHash string `json:"build_hash"` + BuildNumber int `json:"build_number"` + BuildSnapshot bool `json:"build_snapshot"` +} + +type status struct { + Overall overallStatus `json:"overall"` + Statuses interface{} `json:"statuses"` +} + +type overallStatus struct { + State string `json:"state"` +} + +type metrics struct { + UptimeInMillis int64 `json:"uptime_in_millis"` + ConcurrentConnections int64 `json:"concurrent_connections"` + CollectionIntervalInMilles int64 `json:"collection_interval_in_millis"` + ResponseTimes responseTimes `json:"response_times"` + Process process `json:"process"` + Requests requests `json:"requests"` +} + +type responseTimes struct { + AvgInMillis float64 `json:"avg_in_millis"` + MaxInMillis int64 `json:"max_in_millis"` +} + +type process struct { + Mem mem `json:"mem"` + Memory memory `json:"memory"` + UptimeInMillis int64 `json:"uptime_in_millis"` +} + +type requests struct { + Total int64 `json:"total"` +} + +type mem struct { + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` +} + +type memory struct { + Heap heap `json:"heap"` +} + +type heap struct { + TotalInBytes int64 `json:"total_in_bytes"` + UsedInBytes int64 `json:"used_in_bytes"` +} + +const sampleConfig = ` + ## Specify a list of one or more Kibana servers + servers = ["http://localhost:5601"] + + ## Timeout for HTTP requests + timeout = "5s" + + ## HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +type Kibana struct { + Local bool + Servers []string + Username string + Password string + Timeout internal.Duration + tls.ClientConfig + + client *http.Client +} + +func NewKibana() *Kibana { + return &Kibana{ + Timeout: internal.Duration{Duration: time.Second * 5}, + } +} + +// perform status mapping +func mapHealthStatusToCode(s string) int { + switch strings.ToLower(s) { + case "green": + return 1 + case "yellow": + return 2 + case "red": + return 3 + } + return 0 +} + +// SampleConfig returns sample configuration for this plugin. +func (k *Kibana) SampleConfig() string { + return sampleConfig +} + +// Description returns the plugin description. +func (k *Kibana) Description() string { + return "Read status information from one or more Kibana servers" +} + +func (k *Kibana) Gather(acc telegraf.Accumulator) error { + if k.client == nil { + client, err := k.createHttpClient() + + if err != nil { + return err + } + k.client = client + } + + var wg sync.WaitGroup + wg.Add(len(k.Servers)) + + for _, serv := range k.Servers { + go func(baseUrl string, acc telegraf.Accumulator) { + defer wg.Done() + if err := k.gatherKibanaStatus(baseUrl, acc); err != nil { + acc.AddError(fmt.Errorf("[url=%s]: %s", baseUrl, err)) + return + } + }(serv, acc) + } + + wg.Wait() + return nil +} + +func (k *Kibana) createHttpClient() (*http.Client, error) { + tlsCfg, err := k.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: k.Timeout.Duration, + } + + return client, nil +} + +func (k *Kibana) gatherKibanaStatus(baseUrl string, acc telegraf.Accumulator) error { + + kibanaStatus := &kibanaStatus{} + url := baseUrl + statusPath + + host, err := k.gatherJsonData(url, kibanaStatus) + if err != nil { + return err + } + + fields := make(map[string]interface{}) + tags := make(map[string]string) + + tags["name"] = kibanaStatus.Name + tags["source"] = host + tags["version"] = kibanaStatus.Version.Number + tags["status"] = kibanaStatus.Status.Overall.State + + fields["status_code"] = mapHealthStatusToCode(kibanaStatus.Status.Overall.State) + fields["concurrent_connections"] = kibanaStatus.Metrics.ConcurrentConnections + fields["response_time_avg_ms"] = kibanaStatus.Metrics.ResponseTimes.AvgInMillis + fields["response_time_max_ms"] = kibanaStatus.Metrics.ResponseTimes.MaxInMillis + fields["requests_per_sec"] = float64(kibanaStatus.Metrics.Requests.Total) / float64(kibanaStatus.Metrics.CollectionIntervalInMilles) * 1000 + + versionArray := strings.Split(kibanaStatus.Version.Number, ".") + arrayElement := 1 + + if len(versionArray) > 1 { + arrayElement = 2 + } + versionNumber, err := strconv.ParseFloat(strings.Join(versionArray[:arrayElement], "."), 64) + if err != nil { + return err + } + + // Same value will be assigned to both the metrics [heap_max_bytes and heap_total_bytes ] + // Which keeps the code backward compatible + if versionNumber >= 6.4 { + fields["uptime_ms"] = kibanaStatus.Metrics.Process.UptimeInMillis + fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes + fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.TotalInBytes + fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Memory.Heap.UsedInBytes + } else { + fields["uptime_ms"] = kibanaStatus.Metrics.UptimeInMillis + fields["heap_max_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes + fields["heap_total_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapMaxInBytes + fields["heap_used_bytes"] = kibanaStatus.Metrics.Process.Mem.HeapUsedInBytes + + } + + acc.AddFields("kibana", fields, tags) + + return nil +} + +func (k *Kibana) gatherJsonData(url string, v interface{}) (host string, err error) { + + request, err := http.NewRequest("GET", url, nil) + + if (k.Username != "") || (k.Password != "") { + request.SetBasicAuth(k.Username, k.Password) + } + + response, err := k.client.Do(request) + if err != nil { + return "", err + } + + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + return request.Host, fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + } + + if err = json.NewDecoder(response.Body).Decode(v); err != nil { + return request.Host, err + } + + return request.Host, nil +} + +func init() { + inputs.Add("kibana", func() telegraf.Input { + return NewKibana() + }) +} diff --git a/plugins/inputs/kibana/kibana_test.go b/plugins/inputs/kibana/kibana_test.go new file mode 100644 index 000000000..3dfed9edf --- /dev/null +++ b/plugins/inputs/kibana/kibana_test.go @@ -0,0 +1,87 @@ +package kibana + +import ( + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/influxdata/telegraf/testutil" +) + +func defaultTags6_3() map[string]string { + return map[string]string{ + "name": "my-kibana", + "source": "example.com:5601", + "version": "6.3.2", + "status": "green", + } +} + +func defaultTags6_5() map[string]string { + return map[string]string{ + "name": "my-kibana", + "source": "example.com:5601", + "version": "6.5.4", + "status": "green", + } +} + +type transportMock struct { + statusCode int + body string +} + +func newTransportMock(statusCode int, body string) http.RoundTripper { + return &transportMock{ + statusCode: statusCode, + body: body, + } +} + +func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { + res := &http.Response{ + Header: make(http.Header), + Request: r, + StatusCode: t.statusCode, + } + res.Header.Set("Content-Type", "application/json") + res.Body = ioutil.NopCloser(strings.NewReader(t.body)) + return res, nil +} + +func checkKibanaStatusResult(version string, t *testing.T, acc *testutil.Accumulator) { + if version == "6.3.2" { + tags := defaultTags6_3() + acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected6_3, tags) + } else { + tags := defaultTags6_5() + acc.AssertContainsTaggedFields(t, "kibana", kibanaStatusExpected6_5, tags) + } +} + +func TestGather(t *testing.T) { + ks := newKibanahWithClient() + ks.Servers = []string{"http://example.com:5601"} + // Unit test for Kibana version < 6.4 + ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_3) + var acc1 testutil.Accumulator + if err := acc1.GatherError(ks.Gather); err != nil { + t.Fatal(err) + } + checkKibanaStatusResult(defaultTags6_3()["version"], t, &acc1) + + //Unit test for Kibana version >= 6.4 + ks.client.Transport = newTransportMock(http.StatusOK, kibanaStatusResponse6_5) + var acc2 testutil.Accumulator + if err := acc2.GatherError(ks.Gather); err != nil { + t.Fatal(err) + } + checkKibanaStatusResult(defaultTags6_5()["version"], t, &acc2) +} + +func newKibanahWithClient() *Kibana { + ks := NewKibana() + ks.client = &http.Client{} + return ks +} diff --git a/plugins/inputs/kibana/testdata_test6_3.go b/plugins/inputs/kibana/testdata_test6_3.go new file mode 100644 index 000000000..bda529273 --- /dev/null +++ b/plugins/inputs/kibana/testdata_test6_3.go @@ -0,0 +1,200 @@ +package kibana + +const kibanaStatusResponse6_3 = ` +{ + "name": "my-kibana", + "uuid": "00000000-0000-0000-0000-000000000000", + "version": { + "number": "6.3.2", + "build_hash": "53d0c6758ac3fb38a3a1df198c1d4c87765e63f7", + "build_number": 17307, + "build_snapshot": false + }, + "status": { + "overall": { + "state": "green", + "title": "Green", + "nickname": "Looking good", + "icon": "success", + "since": "2018-07-27T07:37:42.567Z" + }, + "statuses": [{ + "id": "plugin:kibana@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.567Z" + }, + { + "id": "plugin:elasticsearch@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:04.920Z" + }, + { + "id": "plugin:xpack_main@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.393Z" + }, + { + "id": "plugin:searchprofiler@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.395Z" + }, + { + "id": "plugin:tilemap@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.396Z" + }, + { + "id": "plugin:watcher@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.397Z" + }, + { + "id": "plugin:license_management@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.668Z" + }, + { + "id": "plugin:index_management@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.399Z" + }, + { + "id": "plugin:timelion@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.912Z" + }, + { + "id": "plugin:logtrail@0.1.29", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.919Z" + }, + { + "id": "plugin:monitoring@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.922Z" + }, + { + "id": "plugin:grokdebugger@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.400Z" + }, + { + "id": "plugin:dashboard_mode@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.928Z" + }, + { + "id": "plugin:logstash@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.401Z" + }, + { + "id": "plugin:apm@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.950Z" + }, + { + "id": "plugin:console@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.958Z" + }, + { + "id": "plugin:console_extensions@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.961Z" + }, + { + "id": "plugin:metrics@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.965Z" + }, + { + "id": "plugin:reporting@6.3.2", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.402Z" + }] + }, + "metrics": { + "last_updated": "2018-08-21T11:24:25.823Z", + "collection_interval_in_millis": 5000, + "uptime_in_millis": 2173595336, + "process": { + "mem": { + "heap_max_in_bytes": 149954560, + "heap_used_in_bytes": 126274392 + } + }, + "os": { + "cpu": { + "load_average": { + "1m": 0.1806640625, + "5m": 0.49658203125, + "15m": 0.458984375 + } + } + }, + "response_times": { + "avg_in_millis": 12.5, + "max_in_millis": 123 + }, + "requests": { + "total": 2, + "disconnects": 0, + "status_codes": { + "200": 2 + } + }, + "concurrent_connections": 10 + } +} +` + +var kibanaStatusExpected6_3 = map[string]interface{}{ + "status_code": 1, + "heap_total_bytes": int64(149954560), + "heap_max_bytes": int64(149954560), + "heap_used_bytes": int64(126274392), + "uptime_ms": int64(2173595336), + "response_time_avg_ms": float64(12.5), + "response_time_max_ms": int64(123), + "concurrent_connections": int64(10), + "requests_per_sec": float64(0.4), +} diff --git a/plugins/inputs/kibana/testdata_test6_5.go b/plugins/inputs/kibana/testdata_test6_5.go new file mode 100644 index 000000000..f47878b11 --- /dev/null +++ b/plugins/inputs/kibana/testdata_test6_5.go @@ -0,0 +1,227 @@ +package kibana + +const kibanaStatusResponse6_5 = ` +{ + "name": "my-kibana", + "uuid": "00000000-0000-0000-0000-000000000000", + "version": { + "number": "6.5.4", + "build_hash": "53d0c6758ac3fb38a3a1df198c1d4c87765e63f7", + "build_number": 17307, + "build_snapshot": false + }, + "status": { + "overall": { + "state": "green", + "title": "Green", + "nickname": "Looking good", + "icon": "success", + "since": "2018-07-27T07:37:42.567Z" + }, + "statuses": [{ + "id": "plugin:kibana@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.567Z" + }, + { + "id": "plugin:elasticsearch@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:04.920Z" + }, + { + "id": "plugin:xpack_main@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.393Z" + }, + { + "id": "plugin:searchprofiler@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.395Z" + }, + { + "id": "plugin:tilemap@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.396Z" + }, + { + "id": "plugin:watcher@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.397Z" + }, + { + "id": "plugin:license_management@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.668Z" + }, + { + "id": "plugin:index_management@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.399Z" + }, + { + "id": "plugin:timelion@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.912Z" + }, + { + "id": "plugin:logtrail@0.1.29", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.919Z" + }, + { + "id": "plugin:monitoring@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.922Z" + }, + { + "id": "plugin:grokdebugger@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.400Z" + }, + { + "id": "plugin:dashboard_mode@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.928Z" + }, + { + "id": "plugin:logstash@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.401Z" + }, + { + "id": "plugin:apm@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.950Z" + }, + { + "id": "plugin:console@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.958Z" + }, + { + "id": "plugin:console_extensions@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.961Z" + }, + { + "id": "plugin:metrics@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-27T07:37:42.965Z" + }, + { + "id": "plugin:reporting@6.5.4", + "state": "green", + "icon": "success", + "message": "Ready", + "since": "2018-07-28T10:07:02.402Z" + }] + }, + "metrics": { + "last_updated": "2020-01-15T09:40:17.733Z", + "collection_interval_in_millis": 5000, + "process": { + "memory": { + "heap": { + "total_in_bytes": 149954560, + "used_in_bytes": 126274392, + "size_limit": 1501560832 + }, + "resident_set_size_in_bytes": 286650368 + }, + "event_loop_delay": 0.5314235687255859, + "pid": 6, + "uptime_in_millis": 2173595336 + }, + "os": { + "load": { + "1m": 2.66015625, + "5m": 2.8173828125, + "15m": 2.51025390625 + }, + "memory": { + "total_in_bytes": 404355756032, + "free_in_bytes": 294494244864, + "used_in_bytes": 109861511168 + }, + "uptime_in_millis": 8220745000, + "cgroup": { + "cpuacct": { + "control_group": "/", + "usage_nanos": 1086527218898 + }, + "cpu": { + "control_group": "/", + "cfs_period_micros": 100000, + "cfs_quota_micros": -1, + "stat": { + "number_of_elapsed_periods": 0, + "number_of_times_throttled": 0, + "time_throttled_nanos": 0 + } + } + } + }, + "response_times": { + "avg_in_millis": 12.5, + "max_in_millis": 123 + }, + "requests": { + "total": 2, + "disconnects": 0, + "status_codes": { + "200": 1, + "304": 1 + } + }, + "concurrent_connections": 10 + } +} +` + +var kibanaStatusExpected6_5 = map[string]interface{}{ + "status_code": 1, + "heap_total_bytes": int64(149954560), + "heap_max_bytes": int64(149954560), + "heap_used_bytes": int64(126274392), + "uptime_ms": int64(2173595336), + "response_time_avg_ms": float64(12.5), + "response_time_max_ms": int64(123), + "concurrent_connections": int64(10), + "requests_per_sec": float64(0.4), +} diff --git a/plugins/inputs/kinesis_consumer/README.md b/plugins/inputs/kinesis_consumer/README.md new file mode 100644 index 000000000..7896557ac --- /dev/null +++ b/plugins/inputs/kinesis_consumer/README.md @@ -0,0 +1,90 @@ +# Kinesis Consumer Input Plugin + +The [Kinesis][kinesis] consumer plugin reads from a Kinesis data stream +and creates metrics using one of the supported [input data formats][]. + + +### Configuration + +```toml +[[inputs.kinesis_consumer]] + ## Amazon REGION of kinesis endpoint. + region = "ap-southeast-2" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Assumed credentials via STS if role_arn is specified + ## 2) explicit credentials from 'access_key' and 'secret_key' + ## 3) shared profile from 'profile' + ## 4) environment variables + ## 5) shared credentials file + ## 6) EC2 Instance Profile + # access_key = "" + # secret_key = "" + # token = "" + # role_arn = "" + # profile = "" + # shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Kinesis StreamName must exist prior to starting telegraf. + streamname = "StreamName" + + ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) + # shard_iterator_type = "TRIM_HORIZON" + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional + ## Configuration for a dynamodb checkpoint + [inputs.kinesis_consumer.checkpoint_dynamodb] + ## unique name for this consumer + app_name = "default" + table_name = "default" +``` + + +#### Required AWS IAM permissions + +Kinesis: + - DescribeStream + - GetRecords + - GetShardIterator + +DynamoDB: + - GetItem + - PutItem + + +#### DynamoDB Checkpoint + +The DynamoDB checkpoint stores the last processed record in a DynamoDB. To leverage +this functionality, create a table with the following string type keys: + +``` +Partition key: namespace +Sort key: shard_id +``` + + +[kinesis]: https://aws.amazon.com/kinesis/ +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/kinesis_consumer/kinesis_consumer.go b/plugins/inputs/kinesis_consumer/kinesis_consumer.go new file mode 100644 index 000000000..6a3b1c830 --- /dev/null +++ b/plugins/inputs/kinesis_consumer/kinesis_consumer.go @@ -0,0 +1,352 @@ +package kinesis_consumer + +import ( + "context" + "fmt" + "math/big" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/kinesis" + consumer "github.com/harlow/kinesis-consumer" + "github.com/harlow/kinesis-consumer/checkpoint/ddb" + + "github.com/influxdata/telegraf" + internalaws "github.com/influxdata/telegraf/config/aws" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers" +) + +type ( + DynamoDB struct { + AppName string `toml:"app_name"` + TableName string `toml:"table_name"` + } + + KinesisConsumer struct { + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` + StreamName string `toml:"streamname"` + ShardIteratorType string `toml:"shard_iterator_type"` + DynamoDB *DynamoDB `toml:"checkpoint_dynamodb"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + + Log telegraf.Logger + + cons *consumer.Consumer + parser parsers.Parser + cancel context.CancelFunc + ctx context.Context + acc telegraf.TrackingAccumulator + sem chan struct{} + + checkpoint consumer.Checkpoint + checkpoints map[string]checkpoint + records map[telegraf.TrackingID]string + checkpointTex sync.Mutex + recordsTex sync.Mutex + wg sync.WaitGroup + + lastSeqNum *big.Int + } + + checkpoint struct { + streamName string + shardID string + } +) + +const ( + defaultMaxUndeliveredMessages = 1000 +) + +// this is the largest sequence number allowed - https://docs.aws.amazon.com/kinesis/latest/APIReference/API_SequenceNumberRange.html +var maxSeq = strToBint(strings.Repeat("9", 129)) + +var sampleConfig = ` + ## Amazon REGION of kinesis endpoint. + region = "ap-southeast-2" + + ## Amazon Credentials + ## Credentials are loaded in the following order + ## 1) Assumed credentials via STS if role_arn is specified + ## 2) explicit credentials from 'access_key' and 'secret_key' + ## 3) shared profile from 'profile' + ## 4) environment variables + ## 5) shared credentials file + ## 6) EC2 Instance Profile + # access_key = "" + # secret_key = "" + # token = "" + # role_arn = "" + # profile = "" + # shared_credential_file = "" + + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + + ## Kinesis StreamName must exist prior to starting telegraf. + streamname = "StreamName" + + ## Shard iterator type (only 'TRIM_HORIZON' and 'LATEST' currently supported) + # shard_iterator_type = "TRIM_HORIZON" + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional + ## Configuration for a dynamodb checkpoint + [inputs.kinesis_consumer.checkpoint_dynamodb] + ## unique name for this consumer + app_name = "default" + table_name = "default" +` + +func (k *KinesisConsumer) SampleConfig() string { + return sampleConfig +} + +func (k *KinesisConsumer) Description() string { + return "Configuration for the AWS Kinesis input." +} + +func (k *KinesisConsumer) SetParser(parser parsers.Parser) { + k.parser = parser +} + +func (k *KinesisConsumer) connect(ac telegraf.Accumulator) error { + credentialConfig := &internalaws.CredentialConfig{ + Region: k.Region, + AccessKey: k.AccessKey, + SecretKey: k.SecretKey, + RoleARN: k.RoleARN, + Profile: k.Profile, + Filename: k.Filename, + Token: k.Token, + EndpointURL: k.EndpointURL, + } + configProvider := credentialConfig.Credentials() + client := kinesis.New(configProvider) + + k.checkpoint = &noopCheckpoint{} + if k.DynamoDB != nil { + var err error + k.checkpoint, err = ddb.New( + k.DynamoDB.AppName, + k.DynamoDB.TableName, + ddb.WithDynamoClient(dynamodb.New((&internalaws.CredentialConfig{ + Region: k.Region, + AccessKey: k.AccessKey, + SecretKey: k.SecretKey, + RoleARN: k.RoleARN, + Profile: k.Profile, + Filename: k.Filename, + Token: k.Token, + EndpointURL: k.EndpointURL, + }).Credentials())), + ddb.WithMaxInterval(time.Second*10), + ) + if err != nil { + return err + } + } + + cons, err := consumer.New( + k.StreamName, + consumer.WithClient(client), + consumer.WithShardIteratorType(k.ShardIteratorType), + consumer.WithCheckpoint(k), + ) + if err != nil { + return err + } + + k.cons = cons + + k.acc = ac.WithTracking(k.MaxUndeliveredMessages) + k.records = make(map[telegraf.TrackingID]string, k.MaxUndeliveredMessages) + k.checkpoints = make(map[string]checkpoint, k.MaxUndeliveredMessages) + k.sem = make(chan struct{}, k.MaxUndeliveredMessages) + + ctx := context.Background() + ctx, k.cancel = context.WithCancel(ctx) + + k.wg.Add(1) + go func() { + defer k.wg.Done() + k.onDelivery(ctx) + }() + + k.wg.Add(1) + go func() { + defer k.wg.Done() + err := k.cons.Scan(ctx, func(r *consumer.Record) consumer.ScanStatus { + select { + case <-ctx.Done(): + return consumer.ScanStatus{Error: ctx.Err()} + case k.sem <- struct{}{}: + break + } + err := k.onMessage(k.acc, r) + if err != nil { + k.sem <- struct{}{} + return consumer.ScanStatus{Error: err} + } + + return consumer.ScanStatus{} + }) + if err != nil { + k.cancel() + k.Log.Errorf("Scan encountered an error: %s", err.Error()) + k.cons = nil + } + }() + + return nil +} + +func (k *KinesisConsumer) Start(ac telegraf.Accumulator) error { + err := k.connect(ac) + if err != nil { + return err + } + + return nil +} + +func (k *KinesisConsumer) onMessage(acc telegraf.TrackingAccumulator, r *consumer.Record) error { + metrics, err := k.parser.Parse(r.Data) + if err != nil { + return err + } + + k.recordsTex.Lock() + id := acc.AddTrackingMetricGroup(metrics) + k.records[id] = *r.SequenceNumber + k.recordsTex.Unlock() + + return nil +} + +func (k *KinesisConsumer) onDelivery(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case info := <-k.acc.Delivered(): + k.recordsTex.Lock() + sequenceNum, ok := k.records[info.ID()] + if !ok { + k.recordsTex.Unlock() + continue + } + <-k.sem + delete(k.records, info.ID()) + k.recordsTex.Unlock() + + if info.Delivered() { + k.checkpointTex.Lock() + chk, ok := k.checkpoints[sequenceNum] + if !ok { + k.checkpointTex.Unlock() + continue + } + delete(k.checkpoints, sequenceNum) + k.checkpointTex.Unlock() + + // at least once + if strToBint(sequenceNum).Cmp(k.lastSeqNum) > 0 { + continue + } + + k.lastSeqNum = strToBint(sequenceNum) + k.checkpoint.Set(chk.streamName, chk.shardID, sequenceNum) + } else { + k.Log.Debug("Metric group failed to process") + } + } + } +} + +var negOne *big.Int + +func strToBint(s string) *big.Int { + n, ok := new(big.Int).SetString(s, 10) + if !ok { + return negOne + } + return n +} + +func (k *KinesisConsumer) Stop() { + k.cancel() + k.wg.Wait() +} + +func (k *KinesisConsumer) Gather(acc telegraf.Accumulator) error { + if k.cons == nil { + return k.connect(acc) + } + k.lastSeqNum = maxSeq + + return nil +} + +// Get wraps the checkpoint's Get function (called by consumer library) +func (k *KinesisConsumer) Get(streamName, shardID string) (string, error) { + return k.checkpoint.Get(streamName, shardID) +} + +// Set wraps the checkpoint's Set function (called by consumer library) +func (k *KinesisConsumer) Set(streamName, shardID, sequenceNumber string) error { + if sequenceNumber == "" { + return fmt.Errorf("sequence number should not be empty") + } + + k.checkpointTex.Lock() + k.checkpoints[sequenceNumber] = checkpoint{streamName: streamName, shardID: shardID} + k.checkpointTex.Unlock() + + return nil +} + +type noopCheckpoint struct{} + +func (n noopCheckpoint) Set(string, string, string) error { return nil } +func (n noopCheckpoint) Get(string, string) (string, error) { return "", nil } + +func init() { + negOne, _ = new(big.Int).SetString("-1", 10) + + inputs.Add("kinesis_consumer", func() telegraf.Input { + return &KinesisConsumer{ + ShardIteratorType: "TRIM_HORIZON", + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + lastSeqNum: maxSeq, + } + }) +} diff --git a/plugins/inputs/kube_inventory/README.md b/plugins/inputs/kube_inventory/README.md new file mode 100644 index 000000000..f017b18c6 --- /dev/null +++ b/plugins/inputs/kube_inventory/README.md @@ -0,0 +1,292 @@ +# Kube_Inventory Plugin + +This plugin generates metrics derived from the state of the following Kubernetes resources: + +- daemonsets +- deployments +- nodes +- persistentvolumes +- persistentvolumeclaims +- pods (containers) +- statefulsets + +Kubernetes is a fast moving project, with a new minor release every 3 months. As +such, we will aim to maintain support only for versions that are supported by +the major cloud providers; this is roughly 4 release / 2 years. + +**This plugin supports Kubernetes 1.11 and later.** + +#### Series Cardinality Warning + +This plugin may produce a high number of series which, when not controlled +for, will cause high load on your database. Use the following techniques to +avoid cardinality issues: + +- Use [metric filtering][] options to exclude unneeded measurements and tags. +- Write to a database with an appropriate [retention policy][]. +- Limit series cardinality in your database using the + [max-series-per-database][] and [max-values-per-tag][] settings. +- Consider using the [Time Series Index][tsi]. +- Monitor your databases [series cardinality][]. +- Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. + +### Configuration: + +```toml +[[inputs.kube_inventory]] + ## URL for the Kubernetes API + url = "https://127.0.0.1" + + ## Namespace to use. Set to "" to use all namespaces. + # namespace = "default" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional Resources to exclude from gathering + ## Leave them with blank with try to gather everything available. + ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", + ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" + # resource_exclude = [ "deployments", "nodes", "statefulsets" ] + + ## Optional Resources to include when gathering + ## Overrides resource_exclude if both set. + # resource_include = [ "deployments", "nodes", "statefulsets" ] + + ## Optional TLS Config + # tls_ca = "/path/to/cafile" + # tls_cert = "/path/to/certfile" + # tls_key = "/path/to/keyfile" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +#### Kubernetes Permissions + +If using [RBAC authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/), you will need to create a cluster role to list "persistentvolumes" and "nodes". You will then need to make an [aggregated ClusterRole](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles) that will eventually be bound to a user or group. + +```yaml +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: influx:cluster:viewer + labels: + rbac.authorization.k8s.io/aggregate-view-telegraf: "true" +rules: + - apiGroups: [""] + resources: ["persistentvolumes", "nodes"] + verbs: ["get", "list"] + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: influx:telegraf +aggregationRule: + clusterRoleSelectors: + - matchLabels: + rbac.authorization.k8s.io/aggregate-view-telegraf: "true" + - matchLabels: + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: [] # Rules are automatically filled in by the controller manager. +``` + +Bind the newly created aggregated ClusterRole with the following config file, updating the subjects as needed. + +```yaml +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: influx:telegraf:viewer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: influx:telegraf +subjects: + - kind: ServiceAccount + name: telegraf + namespace: default +``` + +### Metrics: + +- kubernetes_daemonset + - tags: + - daemonset_name + - namespace + - fields: + - generation + - current_number_scheduled + - desired_number_scheduled + - number_available + - number_misscheduled + - number_ready + - number_unavailable + - updated_number_scheduled + +* kubernetes_deployment + - tags: + - deployment_name + - namespace + - fields: + - replicas_available + - replicas_unavailable + - created + +- kubernetes_endpoints + - tags: + - endpoint_name + - namespace + - hostname + - node_name + - port_name + - port_protocol + - kind (\*varies) + - fields: + - created + - generation + - ready + - port + +* kubernetes_ingress + - tags: + - ingress_name + - namespace + - hostname + - ip + - backend_service_name + - path + - host + - fields: + - created + - generation + - backend_service_port + - tls + +- kubernetes_node + - tags: + - node_name + - fields: + - capacity_cpu_cores + - capacity_memory_bytes + - capacity_pods + - allocatable_cpu_cores + - allocatable_memory_bytes + - allocatable_pods + +* kubernetes_persistentvolume + - tags: + - pv_name + - phase + - storageclass + - fields: + - phase_type (int, [see below](#pv-phase_type)) + +- kubernetes_persistentvolumeclaim + - tags: + - pvc_name + - namespace + - phase + - storageclass + - fields: + - phase_type (int, [see below](#pvc-phase_type)) + +* kubernetes_pod_container + - tags: + - container_name + - namespace + - node_name + - pod_name + - fields: + - restarts_total + - state + - terminated_reason + - resource_requests_cpu_units + - resource_requests_memory_bytes + - resource_limits_cpu_units + - resource_limits_memory_bytes + +- kubernetes_service + - tags: + - service_name + - namespace + - port_name + - port_protocol + - external_name + - cluster_ip + - fields + - created + - generation + - port + - target_port + +* kubernetes_statefulset + - tags: + - statefulset_name + - namespace + - fields: + - created + - generation + - replicas + - replicas_current + - replicas_ready + - replicas_updated + - spec_replicas + - observed_generation + +#### pv `phase_type` + +The persistentvolume "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. + +| Tag value | Corresponding field value | +| --------- | ------------------------- | +| bound | 0 | +| failed | 1 | +| pending | 2 | +| released | 3 | +| available | 4 | +| unknown | 5 | + +#### pvc `phase_type` + +The persistentvolumeclaim "phase" is saved in the `phase` tag with a correlated numeric field called `phase_type` corresponding with that tag value. + +| Tag value | Corresponding field value | +| --------- | ------------------------- | +| bound | 0 | +| lost | 1 | +| pending | 2 | +| unknown | 3 | + + +### Example Output: + +``` +kubernetes_configmap,configmap_name=envoy-config,namespace=default,resource_version=56593031 created=1544103867000000000i 1547597616000000000 +kubernetes_daemonset,daemonset_name=telegraf,namespace=logging number_unavailable=0i,desired_number_scheduled=11i,number_available=11i,number_misscheduled=8i,number_ready=11i,updated_number_scheduled=11i,created=1527758699000000000i,generation=16i,current_number_scheduled=11i 1547597616000000000 +kubernetes_deployment,deployment_name=deployd,namespace=default replicas_unavailable=0i,created=1544103082000000000i,replicas_available=1i 1547597616000000000 +kubernetes_node,node_name=ip-172-17-0-2.internal allocatable_pods=110i,capacity_memory_bytes=128837533696,capacity_pods=110i,capacity_cpu_cores=16i,allocatable_cpu_cores=16i,allocatable_memory_bytes=128732676096 1547597616000000000 +kubernetes_persistentvolume,phase=Released,pv_name=pvc-aaaaaaaa-bbbb-cccc-1111-222222222222,storageclass=ebs-1-retain phase_type=3i 1547597616000000000 +kubernetes_persistentvolumeclaim,namespace=default,phase=Bound,pvc_name=data-etcd-0,storageclass=ebs-1-retain phase_type=0i 1547597615000000000 +kubernetes_pod,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1 last_transition_time=1547578322000000000i,ready="false" 1547597616000000000 +kubernetes_pod_container,container_name=telegraf,namespace=default,node_name=ip-172-17-0-2.internal,pod_name=tick1,state=running resource_requests_cpu_units=0.1,resource_limits_memory_bytes=524288000,resource_limits_cpu_units=0.5,restarts_total=0i,state_code=0i,terminated_reason="",resource_requests_memory_bytes=524288000 1547597616000000000 +kubernetes_statefulset,namespace=default,statefulset_name=etcd replicas_updated=3i,spec_replicas=3i,observed_generation=1i,created=1544101669000000000i,generation=1i,replicas=3i,replicas_current=3i,replicas_ready=3i 1547597616000000000 +``` + +[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering +[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ +[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 +[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 +[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ +[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality +[influx-docs]: https://docs.influxdata.com/influxdb/latest/ +[k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/ diff --git a/plugins/inputs/kube_inventory/client.go b/plugins/inputs/kube_inventory/client.go new file mode 100644 index 000000000..d16428c40 --- /dev/null +++ b/plugins/inputs/kube_inventory/client.go @@ -0,0 +1,118 @@ +package kube_inventory + +import ( + "context" + "time" + + "github.com/ericchiang/k8s" + v1APPS "github.com/ericchiang/k8s/apis/apps/v1" + v1 "github.com/ericchiang/k8s/apis/core/v1" + v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + + "github.com/influxdata/telegraf/internal/tls" +) + +type client struct { + namespace string + timeout time.Duration + *k8s.Client +} + +func newClient(baseURL, namespace, bearerToken string, timeout time.Duration, tlsConfig tls.ClientConfig) (*client, error) { + c, err := k8s.NewClient(&k8s.Config{ + Clusters: []k8s.NamedCluster{{Name: "cluster", Cluster: k8s.Cluster{ + Server: baseURL, + InsecureSkipTLSVerify: tlsConfig.InsecureSkipVerify, + CertificateAuthority: tlsConfig.TLSCA, + }}}, + Contexts: []k8s.NamedContext{{Name: "context", Context: k8s.Context{ + Cluster: "cluster", + AuthInfo: "auth", + Namespace: namespace, + }}}, + AuthInfos: []k8s.NamedAuthInfo{{Name: "auth", AuthInfo: k8s.AuthInfo{ + Token: bearerToken, + ClientCertificate: tlsConfig.TLSCert, + ClientKey: tlsConfig.TLSKey, + }}}, + }) + if err != nil { + return nil, err + } + + return &client{ + Client: c, + timeout: timeout, + namespace: namespace, + }, nil +} + +func (c *client) getDaemonSets(ctx context.Context) (*v1APPS.DaemonSetList, error) { + list := new(v1APPS.DaemonSetList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getDeployments(ctx context.Context) (*v1APPS.DeploymentList, error) { + list := &v1APPS.DeploymentList{} + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getEndpoints(ctx context.Context) (*v1.EndpointsList, error) { + list := new(v1.EndpointsList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getIngress(ctx context.Context) (*v1beta1EXT.IngressList, error) { + list := new(v1beta1EXT.IngressList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getNodes(ctx context.Context) (*v1.NodeList, error) { + list := new(v1.NodeList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, "", list) +} + +func (c *client) getPersistentVolumes(ctx context.Context) (*v1.PersistentVolumeList, error) { + list := new(v1.PersistentVolumeList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, "", list) +} + +func (c *client) getPersistentVolumeClaims(ctx context.Context) (*v1.PersistentVolumeClaimList, error) { + list := new(v1.PersistentVolumeClaimList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getPods(ctx context.Context) (*v1.PodList, error) { + list := new(v1.PodList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getServices(ctx context.Context) (*v1.ServiceList, error) { + list := new(v1.ServiceList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} + +func (c *client) getStatefulSets(ctx context.Context) (*v1APPS.StatefulSetList, error) { + list := new(v1APPS.StatefulSetList) + ctx, cancel := context.WithTimeout(ctx, c.timeout) + defer cancel() + return list, c.List(ctx, c.namespace, list) +} diff --git a/plugins/inputs/kube_inventory/client_test.go b/plugins/inputs/kube_inventory/client_test.go new file mode 100644 index 000000000..3e4eaf752 --- /dev/null +++ b/plugins/inputs/kube_inventory/client_test.go @@ -0,0 +1,43 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/util/intstr" + "github.com/influxdata/telegraf/internal/tls" +) + +type mockHandler struct { + responseMap map[string]interface{} +} + +func toStrPtr(s string) *string { + return &s +} + +func toInt32Ptr(i int32) *int32 { + return &i +} + +func toInt64Ptr(i int64) *int64 { + return &i +} + +func toBoolPtr(b bool) *bool { + return &b +} + +func toIntStrPtrS(s string) *intstr.IntOrString { + return &intstr.IntOrString{StrVal: &s} +} + +func toIntStrPtrI(i int32) *intstr.IntOrString { + return &intstr.IntOrString{IntVal: &i} +} +func TestNewClient(t *testing.T) { + _, err := newClient("https://127.0.0.1:443/", "default", "abc123", time.Second, tls.ClientConfig{}) + if err != nil { + t.Errorf("Failed to create new client - %s", err.Error()) + } +} diff --git a/plugins/inputs/kube_inventory/daemonset.go b/plugins/inputs/kube_inventory/daemonset.go new file mode 100644 index 000000000..15df586d6 --- /dev/null +++ b/plugins/inputs/kube_inventory/daemonset.go @@ -0,0 +1,49 @@ +package kube_inventory + +import ( + "context" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1" + + "github.com/influxdata/telegraf" +) + +func collectDaemonSets(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getDaemonSets(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, d := range list.Items { + if err = ki.gatherDaemonSet(*d, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherDaemonSet(d v1.DaemonSet, acc telegraf.Accumulator) error { + fields := map[string]interface{}{ + "generation": d.Metadata.GetGeneration(), + "current_number_scheduled": d.Status.GetCurrentNumberScheduled(), + "desired_number_scheduled": d.Status.GetDesiredNumberScheduled(), + "number_available": d.Status.GetNumberAvailable(), + "number_misscheduled": d.Status.GetNumberMisscheduled(), + "number_ready": d.Status.GetNumberReady(), + "number_unavailable": d.Status.GetNumberUnavailable(), + "updated_number_scheduled": d.Status.GetUpdatedNumberScheduled(), + } + tags := map[string]string{ + "daemonset_name": d.Metadata.GetName(), + "namespace": d.Metadata.GetNamespace(), + } + + if d.Metadata.CreationTimestamp.GetSeconds() != 0 { + fields["created"] = time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano() + } + + acc.AddFields(daemonSetMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/daemonset_test.go b/plugins/inputs/kube_inventory/daemonset_test.go new file mode 100644 index 000000000..bf4e934d3 --- /dev/null +++ b/plugins/inputs/kube_inventory/daemonset_test.go @@ -0,0 +1,123 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + + "github.com/influxdata/telegraf/testutil" +) + +func TestDaemonSet(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no daemon set", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/daemonsets/": &v1.DaemonSetList{}, + }, + }, + hasError: false, + }, + { + name: "collect daemonsets", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/daemonsets/": &v1.DaemonSetList{ + Items: []*v1.DaemonSet{ + { + Status: &v1.DaemonSetStatus{ + CurrentNumberScheduled: toInt32Ptr(3), + DesiredNumberScheduled: toInt32Ptr(5), + NumberAvailable: toInt32Ptr(2), + NumberMisscheduled: toInt32Ptr(2), + NumberReady: toInt32Ptr(1), + NumberUnavailable: toInt32Ptr(1), + UpdatedNumberScheduled: toInt32Ptr(2), + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(11221), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("daemon1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "generation": int64(11221), + "current_number_scheduled": int32(3), + "desired_number_scheduled": int32(5), + "number_available": int32(2), + "number_misscheduled": int32(2), + "number_ready": int32(1), + "number_unavailable": int32(1), + "updated_number_scheduled": int32(2), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "daemonset_name": "daemon1", + "namespace": "ns1", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, dset := range ((v.handler.responseMap["/daemonsets/"]).(*v1.DaemonSetList)).Items { + err := ks.gatherDaemonSet(*dset, acc) + if err != nil { + t.Errorf("Failed to gather daemonset - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/deployment.go b/plugins/inputs/kube_inventory/deployment.go new file mode 100644 index 000000000..5a0eb0b19 --- /dev/null +++ b/plugins/inputs/kube_inventory/deployment.go @@ -0,0 +1,39 @@ +package kube_inventory + +import ( + "context" + "time" + + v1 "github.com/ericchiang/k8s/apis/apps/v1" + "github.com/influxdata/telegraf" +) + +func collectDeployments(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getDeployments(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, d := range list.Items { + if err = ki.gatherDeployment(*d, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherDeployment(d v1.Deployment, acc telegraf.Accumulator) error { + fields := map[string]interface{}{ + "replicas_available": d.Status.GetAvailableReplicas(), + "replicas_unavailable": d.Status.GetUnavailableReplicas(), + "created": time.Unix(d.Metadata.CreationTimestamp.GetSeconds(), int64(d.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + } + tags := map[string]string{ + "deployment_name": d.Metadata.GetName(), + "namespace": d.Metadata.GetNamespace(), + } + + acc.AddFields(deploymentMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/deployment_test.go b/plugins/inputs/kube_inventory/deployment_test.go new file mode 100644 index 000000000..21b7bfd02 --- /dev/null +++ b/plugins/inputs/kube_inventory/deployment_test.go @@ -0,0 +1,131 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/ericchiang/k8s/util/intstr" + "github.com/influxdata/telegraf/testutil" +) + +func TestDeployment(t *testing.T) { + cli := &client{} + + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + outputMetric := &testutil.Metric{ + Fields: map[string]interface{}{ + "replicas_available": int32(1), + "replicas_unavailable": int32(4), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "namespace": "ns1", + "deployment_name": "deploy1", + }, + } + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no deployments", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/deployments/": &v1.DeploymentList{}, + }, + }, + hasError: false, + }, + { + name: "collect deployments", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/deployments/": &v1.DeploymentList{ + Items: []*v1.Deployment{ + { + Status: &v1.DeploymentStatus{ + Replicas: toInt32Ptr(3), + AvailableReplicas: toInt32Ptr(1), + UnavailableReplicas: toInt32Ptr(4), + UpdatedReplicas: toInt32Ptr(2), + ObservedGeneration: toInt64Ptr(9121), + }, + Spec: &v1.DeploymentSpec{ + Strategy: &v1.DeploymentStrategy{ + RollingUpdate: &v1.RollingUpdateDeployment{ + MaxUnavailable: &intstr.IntOrString{ + IntVal: toInt32Ptr(30), + }, + MaxSurge: &intstr.IntOrString{ + IntVal: toInt32Ptr(20), + }, + }, + }, + Replicas: toInt32Ptr(4), + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(11221), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("deploy1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + outputMetric, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, deployment := range ((v.handler.responseMap["/deployments/"]).(*v1.DeploymentList)).Items { + err := ks.gatherDeployment(*deployment, acc) + if err != nil { + t.Errorf("Failed to gather deployment - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/endpoint.go b/plugins/inputs/kube_inventory/endpoint.go new file mode 100644 index 000000000..7298789da --- /dev/null +++ b/plugins/inputs/kube_inventory/endpoint.go @@ -0,0 +1,82 @@ +package kube_inventory + +import ( + "context" + "strings" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectEndpoints(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getEndpoints(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, i := range list.Items { + if err = ki.gatherEndpoint(*i, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherEndpoint(e v1.Endpoints, acc telegraf.Accumulator) error { + if e.Metadata.CreationTimestamp.GetSeconds() == 0 && e.Metadata.CreationTimestamp.GetNanos() == 0 { + return nil + } + + fields := map[string]interface{}{ + "created": time.Unix(e.Metadata.CreationTimestamp.GetSeconds(), int64(e.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "generation": e.Metadata.GetGeneration(), + } + + tags := map[string]string{ + "endpoint_name": e.Metadata.GetName(), + "namespace": e.Metadata.GetNamespace(), + } + + for _, endpoint := range e.GetSubsets() { + for _, readyAddr := range endpoint.GetAddresses() { + fields["ready"] = true + + tags["hostname"] = readyAddr.GetHostname() + tags["node_name"] = readyAddr.GetNodeName() + if readyAddr.TargetRef != nil { + tags[strings.ToLower(readyAddr.GetTargetRef().GetKind())] = readyAddr.GetTargetRef().GetName() + } + + for _, port := range endpoint.GetPorts() { + fields["port"] = port.GetPort() + + tags["port_name"] = port.GetName() + tags["port_protocol"] = port.GetProtocol() + + acc.AddFields(endpointMeasurement, fields, tags) + } + } + for _, notReadyAddr := range endpoint.GetNotReadyAddresses() { + fields["ready"] = false + + tags["hostname"] = notReadyAddr.GetHostname() + tags["node_name"] = notReadyAddr.GetNodeName() + if notReadyAddr.TargetRef != nil { + tags[strings.ToLower(notReadyAddr.GetTargetRef().GetKind())] = notReadyAddr.GetTargetRef().GetName() + } + + for _, port := range endpoint.GetPorts() { + fields["port"] = port.GetPort() + + tags["port_name"] = port.GetName() + tags["port_protocol"] = port.GetProtocol() + + acc.AddFields(endpointMeasurement, fields, tags) + } + } + } + + return nil +} diff --git a/plugins/inputs/kube_inventory/endpoint_test.go b/plugins/inputs/kube_inventory/endpoint_test.go new file mode 100644 index 000000000..b88c38816 --- /dev/null +++ b/plugins/inputs/kube_inventory/endpoint_test.go @@ -0,0 +1,194 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/influxdata/telegraf/testutil" +) + +func TestEndpoint(t *testing.T) { + cli := &client{} + + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no endpoints", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/endpoints/": &v1.EndpointsList{}, + }, + }, + hasError: false, + }, + { + name: "collect ready endpoints", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/endpoints/": &v1.EndpointsList{ + Items: []*v1.Endpoints{ + { + Subsets: []*v1.EndpointSubset{ + { + Addresses: []*v1.EndpointAddress{ + { + Hostname: toStrPtr("storage-6"), + NodeName: toStrPtr("b.storage.internal"), + TargetRef: &v1.ObjectReference{ + Kind: toStrPtr("pod"), + Name: toStrPtr("storage-6"), + }, + }, + }, + Ports: []*v1.EndpointPort{ + { + Name: toStrPtr("server"), + Protocol: toStrPtr("TCP"), + Port: toInt32Ptr(8080), + }, + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(12), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("storage"), + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "ready": true, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "node_name": "b.storage.internal", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", + }, + }, + }, + }, + hasError: false, + }, + { + name: "collect notready endpoints", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/endpoints/": &v1.EndpointsList{ + Items: []*v1.Endpoints{ + { + Subsets: []*v1.EndpointSubset{ + { + NotReadyAddresses: []*v1.EndpointAddress{ + { + Hostname: toStrPtr("storage-6"), + NodeName: toStrPtr("b.storage.internal"), + TargetRef: &v1.ObjectReference{ + Kind: toStrPtr("pod"), + Name: toStrPtr("storage-6"), + }, + }, + }, + Ports: []*v1.EndpointPort{ + { + Name: toStrPtr("server"), + Protocol: toStrPtr("TCP"), + Port: toInt32Ptr(8080), + }, + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(12), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("storage"), + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "ready": false, + "port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "endpoint_name": "storage", + "namespace": "ns1", + "hostname": "storage-6", + "node_name": "b.storage.internal", + "port_name": "server", + "port_protocol": "TCP", + "pod": "storage-6", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, endpoint := range ((v.handler.responseMap["/endpoints/"]).(*v1.EndpointsList)).Items { + err := ks.gatherEndpoint(*endpoint, acc) + if err != nil { + t.Errorf("Failed to gather endpoint - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/ingress.go b/plugins/inputs/kube_inventory/ingress.go new file mode 100644 index 000000000..6d5c80199 --- /dev/null +++ b/plugins/inputs/kube_inventory/ingress.go @@ -0,0 +1,60 @@ +package kube_inventory + +import ( + "context" + "time" + + v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + + "github.com/influxdata/telegraf" +) + +func collectIngress(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getIngress(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, i := range list.Items { + if err = ki.gatherIngress(*i, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherIngress(i v1beta1EXT.Ingress, acc telegraf.Accumulator) error { + if i.Metadata.CreationTimestamp.GetSeconds() == 0 && i.Metadata.CreationTimestamp.GetNanos() == 0 { + return nil + } + + fields := map[string]interface{}{ + "created": time.Unix(i.Metadata.CreationTimestamp.GetSeconds(), int64(i.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "generation": i.Metadata.GetGeneration(), + } + + tags := map[string]string{ + "ingress_name": i.Metadata.GetName(), + "namespace": i.Metadata.GetNamespace(), + } + + for _, ingress := range i.GetStatus().GetLoadBalancer().GetIngress() { + tags["hostname"] = ingress.GetHostname() + tags["ip"] = ingress.GetIp() + + for _, rule := range i.GetSpec().GetRules() { + for _, path := range rule.GetIngressRuleValue().GetHttp().GetPaths() { + fields["backend_service_port"] = path.GetBackend().GetServicePort().GetIntVal() + fields["tls"] = i.GetSpec().GetTls() != nil + + tags["backend_service_name"] = path.GetBackend().GetServiceName() + tags["path"] = path.GetPath() + tags["host"] = rule.GetHost() + + acc.AddFields(ingressMeasurement, fields, tags) + } + } + } + + return nil +} diff --git a/plugins/inputs/kube_inventory/ingress_test.go b/plugins/inputs/kube_inventory/ingress_test.go new file mode 100644 index 000000000..2d111801a --- /dev/null +++ b/plugins/inputs/kube_inventory/ingress_test.go @@ -0,0 +1,142 @@ +package kube_inventory + +import ( + "testing" + "time" + + v1 "github.com/ericchiang/k8s/apis/core/v1" + v1beta1EXT "github.com/ericchiang/k8s/apis/extensions/v1beta1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/influxdata/telegraf/testutil" +) + +func TestIngress(t *testing.T) { + cli := &client{} + + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no ingress", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/ingress/": &v1beta1EXT.IngressList{}, + }, + }, + hasError: false, + }, + { + name: "collect ingress", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/ingress/": &v1beta1EXT.IngressList{ + Items: []*v1beta1EXT.Ingress{ + { + Status: &v1beta1EXT.IngressStatus{ + LoadBalancer: &v1.LoadBalancerStatus{ + Ingress: []*v1.LoadBalancerIngress{ + { + Hostname: toStrPtr("chron-1"), + Ip: toStrPtr("1.0.0.127"), + }, + }, + }, + }, + Spec: &v1beta1EXT.IngressSpec{ + Rules: []*v1beta1EXT.IngressRule{ + { + Host: toStrPtr("ui.internal"), + IngressRuleValue: &v1beta1EXT.IngressRuleValue{ + Http: &v1beta1EXT.HTTPIngressRuleValue{ + Paths: []*v1beta1EXT.HTTPIngressPath{ + { + Path: toStrPtr("/"), + Backend: &v1beta1EXT.IngressBackend{ + ServiceName: toStrPtr("chronografd"), + ServicePort: toIntStrPtrI(8080), + }, + }, + }, + }, + }, + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(12), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("ui-lb"), + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "tls": false, + "backend_service_port": int32(8080), + "generation": int64(12), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "ingress_name": "ui-lb", + "namespace": "ns1", + "ip": "1.0.0.127", + "hostname": "chron-1", + "backend_service_name": "chronografd", + "host": "ui.internal", + "path": "/", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, ingress := range ((v.handler.responseMap["/ingress/"]).(*v1beta1EXT.IngressList)).Items { + err := ks.gatherIngress(*ingress, acc) + if err != nil { + t.Errorf("Failed to gather ingress - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/kube_state.go b/plugins/inputs/kube_inventory/kube_state.go new file mode 100644 index 000000000..5aa51b6c5 --- /dev/null +++ b/plugins/inputs/kube_inventory/kube_state.go @@ -0,0 +1,193 @@ +package kube_inventory + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "strconv" + "strings" + "sync" + "time" + + "github.com/kubernetes/apimachinery/pkg/api/resource" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token" +) + +// KubernetesInventory represents the config object for the plugin. +type KubernetesInventory struct { + URL string `toml:"url"` + BearerToken string `toml:"bearer_token"` + BearerTokenString string `toml:"bearer_token_string"` + Namespace string `toml:"namespace"` + ResponseTimeout internal.Duration `toml:"response_timeout"` // Timeout specified as a string - 3s, 1m, 1h + ResourceExclude []string `toml:"resource_exclude"` + ResourceInclude []string `toml:"resource_include"` + MaxConfigMapAge internal.Duration `toml:"max_config_map_age"` + + tls.ClientConfig + client *client +} + +var sampleConfig = ` + ## URL for the Kubernetes API + url = "https://127.0.0.1" + + ## Namespace to use. Set to "" to use all namespaces. + # namespace = "default" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional Resources to exclude from gathering + ## Leave them with blank with try to gather everything available. + ## Values can be - "daemonsets", deployments", "endpoints", "ingress", "nodes", + ## "persistentvolumes", "persistentvolumeclaims", "pods", "services", "statefulsets" + # resource_exclude = [ "deployments", "nodes", "statefulsets" ] + + ## Optional Resources to include when gathering + ## Overrides resource_exclude if both set. + # resource_include = [ "deployments", "nodes", "statefulsets" ] + + ## Optional TLS Config + # tls_ca = "/path/to/cafile" + # tls_cert = "/path/to/certfile" + # tls_key = "/path/to/keyfile" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +// SampleConfig returns a sample config +func (ki *KubernetesInventory) SampleConfig() string { + return sampleConfig +} + +// Description returns the description of this plugin +func (ki *KubernetesInventory) Description() string { + return "Read metrics from the Kubernetes api" +} + +func (ki *KubernetesInventory) Init() error { + // If neither are provided, use the default service account. + if ki.BearerToken == "" && ki.BearerTokenString == "" { + ki.BearerToken = defaultServiceAccountPath + } + + if ki.BearerToken != "" { + token, err := ioutil.ReadFile(ki.BearerToken) + if err != nil { + return err + } + ki.BearerTokenString = strings.TrimSpace(string(token)) + } + + var err error + ki.client, err = newClient(ki.URL, ki.Namespace, ki.BearerTokenString, ki.ResponseTimeout.Duration, ki.ClientConfig) + + if err != nil { + return err + } + + return nil +} + +// Gather collects kubernetes metrics from a given URL. +func (ki *KubernetesInventory) Gather(acc telegraf.Accumulator) (err error) { + resourceFilter, err := filter.NewIncludeExcludeFilter(ki.ResourceInclude, ki.ResourceExclude) + if err != nil { + return err + } + + wg := sync.WaitGroup{} + ctx := context.Background() + + for collector, f := range availableCollectors { + if resourceFilter.Match(collector) { + wg.Add(1) + go func(f func(ctx context.Context, acc telegraf.Accumulator, k *KubernetesInventory)) { + defer wg.Done() + f(ctx, acc, ki) + }(f) + } + } + + wg.Wait() + + return nil +} + +var availableCollectors = map[string]func(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory){ + "daemonsets": collectDaemonSets, + "deployments": collectDeployments, + "endpoints": collectEndpoints, + "ingress": collectIngress, + "nodes": collectNodes, + "pods": collectPods, + "services": collectServices, + "statefulsets": collectStatefulSets, + "persistentvolumes": collectPersistentVolumes, + "persistentvolumeclaims": collectPersistentVolumeClaims, +} + +func atoi(s string) int64 { + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0 + } + return int64(i) +} + +func convertQuantity(s string, m float64) int64 { + q, err := resource.ParseQuantity(s) + if err != nil { + log.Printf("D! [inputs.kube_inventory] failed to parse quantity: %s", err.Error()) + return 0 + } + f, err := strconv.ParseFloat(fmt.Sprint(q.AsDec()), 64) + if err != nil { + log.Printf("D! [inputs.kube_inventory] failed to parse float: %s", err.Error()) + return 0 + } + if m < 1 { + m = 1 + } + return int64(f * m) +} + +var ( + daemonSetMeasurement = "kubernetes_daemonset" + deploymentMeasurement = "kubernetes_deployment" + endpointMeasurement = "kubernetes_endpoint" + ingressMeasurement = "kubernetes_ingress" + nodeMeasurement = "kubernetes_node" + persistentVolumeMeasurement = "kubernetes_persistentvolume" + persistentVolumeClaimMeasurement = "kubernetes_persistentvolumeclaim" + podContainerMeasurement = "kubernetes_pod_container" + serviceMeasurement = "kubernetes_service" + statefulSetMeasurement = "kubernetes_statefulset" +) + +func init() { + inputs.Add("kube_inventory", func() telegraf.Input { + return &KubernetesInventory{ + ResponseTimeout: internal.Duration{Duration: time.Second * 5}, + Namespace: "default", + } + }) +} diff --git a/plugins/inputs/kube_inventory/node.go b/plugins/inputs/kube_inventory/node.go new file mode 100644 index 000000000..cccf6897f --- /dev/null +++ b/plugins/inputs/kube_inventory/node.go @@ -0,0 +1,56 @@ +package kube_inventory + +import ( + "context" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectNodes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getNodes(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, n := range list.Items { + if err = ki.gatherNode(*n, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherNode(n v1.Node, acc telegraf.Accumulator) error { + fields := map[string]interface{}{} + tags := map[string]string{ + "node_name": *n.Metadata.Name, + } + + for resourceName, val := range n.Status.Capacity { + switch resourceName { + case "cpu": + fields["capacity_cpu_cores"] = atoi(val.GetString_()) + case "memory": + fields["capacity_memory_bytes"] = convertQuantity(val.GetString_(), 1) + case "pods": + fields["capacity_pods"] = atoi(val.GetString_()) + } + } + + for resourceName, val := range n.Status.Allocatable { + switch resourceName { + case "cpu": + fields["allocatable_cpu_cores"] = atoi(val.GetString_()) + case "memory": + fields["allocatable_memory_bytes"] = convertQuantity(val.GetString_(), 1) + case "pods": + fields["allocatable_pods"] = atoi(val.GetString_()) + } + } + + acc.AddFields(nodeMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/node_test.go b/plugins/inputs/kube_inventory/node_test.go new file mode 100644 index 000000000..7573dd2c0 --- /dev/null +++ b/plugins/inputs/kube_inventory/node_test.go @@ -0,0 +1,172 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/ericchiang/k8s/apis/resource" + + "github.com/influxdata/telegraf/testutil" +) + +func TestNode(t *testing.T) { + cli := &client{} + now := time.Now() + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no nodes", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/nodes/": &v1.NodeList{}, + }, + }, + hasError: false, + }, + { + name: "collect nodes", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/nodes/": &v1.NodeList{ + Items: []*v1.Node{ + { + Status: &v1.NodeStatus{ + NodeInfo: &v1.NodeSystemInfo{ + KernelVersion: toStrPtr("4.14.48-coreos-r2"), + OsImage: toStrPtr("Container Linux by CoreOS 1745.7.0 (Rhyolite)"), + ContainerRuntimeVersion: toStrPtr("docker://18.3.1"), + KubeletVersion: toStrPtr("v1.10.3"), + KubeProxyVersion: toStrPtr("v1.10.3"), + }, + Phase: toStrPtr("Running"), + Capacity: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("16")}, + "ephemeral_storage_bytes": {String_: toStrPtr("49536401408")}, + "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, + "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, + "memory": {String_: toStrPtr("125817904Ki")}, + "pods": {String_: toStrPtr("110")}, + }, + Allocatable: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("16")}, + "ephemeral_storage_bytes": {String_: toStrPtr("44582761194")}, + "hugepages_1Gi_bytes": {String_: toStrPtr("0")}, + "hugepages_2Mi_bytes": {String_: toStrPtr("0")}, + "memory": {String_: toStrPtr("125715504Ki")}, + "pods": {String_: toStrPtr("110")}, + }, + Conditions: []*v1.NodeCondition{ + {Type: toStrPtr("Ready"), Status: toStrPtr("true"), LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}}, + {Type: toStrPtr("OutOfDisk"), Status: toStrPtr("false"), LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}}, + }, + }, + Spec: &v1.NodeSpec{ + ProviderID: toStrPtr("aws:///us-east-1c/i-0c00"), + Taints: []*v1.Taint{ + { + Key: toStrPtr("k1"), + Value: toStrPtr("v1"), + Effect: toStrPtr("NoExecute"), + }, + { + Key: toStrPtr("k2"), + Value: toStrPtr("v2"), + Effect: toStrPtr("NoSchedule"), + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(int64(11232)), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("node1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Measurement: nodeMeasurement, + Fields: map[string]interface{}{ + "capacity_cpu_cores": int64(16), + "capacity_memory_bytes": int64(1.28837533696e+11), + "capacity_pods": int64(110), + "allocatable_cpu_cores": int64(16), + "allocatable_memory_bytes": int64(1.28732676096e+11), + "allocatable_pods": int64(110), + }, + Tags: map[string]string{ + "node_name": "node1", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, node := range ((v.handler.responseMap["/nodes/"]).(*v1.NodeList)).Items { + err := ks.gatherNode(*node, acc) + if err != nil { + t.Errorf("Failed to gather node - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + measurement := v.output.Metrics[i].Measurement + var keyTag string + switch measurement { + case nodeMeasurement: + keyTag = "node" + } + var j int + for j = range acc.Metrics { + if acc.Metrics[j].Measurement == measurement && + acc.Metrics[j].Tags[keyTag] == v.output.Metrics[i].Tags[keyTag] { + break + } + } + + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[j].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, measurement %s, j %d\n", v.name, k, m, acc.Metrics[j].Tags[k], measurement, j) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[j].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), measurement %s, j %d\n", v.name, k, m, m, acc.Metrics[j].Fields[k], acc.Metrics[i].Fields[k], measurement, j) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/persistentvolume.go b/plugins/inputs/kube_inventory/persistentvolume.go new file mode 100644 index 000000000..05600522b --- /dev/null +++ b/plugins/inputs/kube_inventory/persistentvolume.go @@ -0,0 +1,52 @@ +package kube_inventory + +import ( + "context" + "strings" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectPersistentVolumes(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getPersistentVolumes(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, pv := range list.Items { + if err = ki.gatherPersistentVolume(*pv, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherPersistentVolume(pv v1.PersistentVolume, acc telegraf.Accumulator) error { + phaseType := 5 + switch strings.ToLower(pv.Status.GetPhase()) { + case "bound": + phaseType = 0 + case "failed": + phaseType = 1 + case "pending": + phaseType = 2 + case "released": + phaseType = 3 + case "available": + phaseType = 4 + } + fields := map[string]interface{}{ + "phase_type": phaseType, + } + tags := map[string]string{ + "pv_name": pv.Metadata.GetName(), + "phase": pv.Status.GetPhase(), + "storageclass": pv.Spec.GetStorageClassName(), + } + + acc.AddFields(persistentVolumeMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/persistentvolume_test.go b/plugins/inputs/kube_inventory/persistentvolume_test.go new file mode 100644 index 000000000..a5d20d047 --- /dev/null +++ b/plugins/inputs/kube_inventory/persistentvolume_test.go @@ -0,0 +1,112 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + + "github.com/influxdata/telegraf/testutil" +) + +func TestPersistentVolume(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no pv", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumes/": &v1.PersistentVolumeList{}, + }, + }, + hasError: false, + }, + { + name: "collect pvs", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumes/": &v1.PersistentVolumeList{ + Items: []*v1.PersistentVolume{ + { + Status: &v1.PersistentVolumeStatus{ + Phase: toStrPtr("pending"), + }, + Spec: &v1.PersistentVolumeSpec{ + StorageClassName: toStrPtr("ebs-1"), + }, + Metadata: &metav1.ObjectMeta{ + Name: toStrPtr("pv1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "phase_type": 2, + }, + Tags: map[string]string{ + "pv_name": "pv1", + "storageclass": "ebs-1", + "phase": "pending", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, pv := range ((v.handler.responseMap["/persistentvolumes/"]).(*v1.PersistentVolumeList)).Items { + err := ks.gatherPersistentVolume(*pv, acc) + if err != nil { + t.Errorf("Failed to gather pv - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim.go b/plugins/inputs/kube_inventory/persistentvolumeclaim.go new file mode 100644 index 000000000..0663462ae --- /dev/null +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim.go @@ -0,0 +1,49 @@ +package kube_inventory + +import ( + "context" + "strings" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectPersistentVolumeClaims(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getPersistentVolumeClaims(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, pvc := range list.Items { + if err = ki.gatherPersistentVolumeClaim(*pvc, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherPersistentVolumeClaim(pvc v1.PersistentVolumeClaim, acc telegraf.Accumulator) error { + phaseType := 3 + switch strings.ToLower(pvc.Status.GetPhase()) { + case "bound": + phaseType = 0 + case "lost": + phaseType = 1 + case "pending": + phaseType = 2 + } + fields := map[string]interface{}{ + "phase_type": phaseType, + } + tags := map[string]string{ + "pvc_name": pvc.Metadata.GetName(), + "namespace": pvc.Metadata.GetNamespace(), + "phase": pvc.Status.GetPhase(), + "storageclass": pvc.Spec.GetStorageClassName(), + } + + acc.AddFields(persistentVolumeClaimMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go new file mode 100644 index 000000000..8a50c0f2e --- /dev/null +++ b/plugins/inputs/kube_inventory/persistentvolumeclaim_test.go @@ -0,0 +1,115 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + + "github.com/influxdata/telegraf/testutil" +) + +func TestPersistentVolumeClaim(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no pv claims", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{}, + }, + }, + hasError: false, + }, + { + name: "collect pv claims", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/persistentvolumeclaims/": &v1.PersistentVolumeClaimList{ + Items: []*v1.PersistentVolumeClaim{ + { + Status: &v1.PersistentVolumeClaimStatus{ + Phase: toStrPtr("bound"), + }, + Spec: &v1.PersistentVolumeClaimSpec{ + VolumeName: toStrPtr("pvc-dc870fd6-1e08-11e8-b226-02aa4bc06eb8"), + StorageClassName: toStrPtr("ebs-1"), + }, + Metadata: &metav1.ObjectMeta{ + Namespace: toStrPtr("ns1"), + Name: toStrPtr("pc1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "phase_type": 0, + }, + Tags: map[string]string{ + "pvc_name": "pc1", + "namespace": "ns1", + "storageclass": "ebs-1", + "phase": "bound", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, pvc := range ((v.handler.responseMap["/persistentvolumeclaims/"]).(*v1.PersistentVolumeClaimList)).Items { + err := ks.gatherPersistentVolumeClaim(*pvc, acc) + if err != nil { + t.Errorf("Failed to gather pvc - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/pod.go b/plugins/inputs/kube_inventory/pod.go new file mode 100644 index 000000000..7b5207616 --- /dev/null +++ b/plugins/inputs/kube_inventory/pod.go @@ -0,0 +1,87 @@ +package kube_inventory + +import ( + "context" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectPods(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getPods(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, p := range list.Items { + if err = ki.gatherPod(*p, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherPod(p v1.Pod, acc telegraf.Accumulator) error { + if p.Metadata.CreationTimestamp.GetSeconds() == 0 && p.Metadata.CreationTimestamp.GetNanos() == 0 { + return nil + } + + for i, cs := range p.Status.ContainerStatuses { + c := p.Spec.Containers[i] + gatherPodContainer(*p.Spec.NodeName, p, *cs, *c, acc) + } + + return nil +} + +func gatherPodContainer(nodeName string, p v1.Pod, cs v1.ContainerStatus, c v1.Container, acc telegraf.Accumulator) { + stateCode := 3 + state := "unknown" + switch { + case cs.State.Running != nil: + stateCode = 0 + state = "running" + case cs.State.Terminated != nil: + stateCode = 1 + state = "terminated" + case cs.State.Waiting != nil: + stateCode = 2 + state = "waiting" + } + + fields := map[string]interface{}{ + "restarts_total": cs.GetRestartCount(), + "state_code": stateCode, + "terminated_reason": cs.State.Terminated.GetReason(), + } + tags := map[string]string{ + "container_name": *c.Name, + "namespace": *p.Metadata.Namespace, + "node_name": *p.Spec.NodeName, + "pod_name": *p.Metadata.Name, + "state": state, + } + + req := c.Resources.Requests + lim := c.Resources.Limits + + for resourceName, val := range req { + switch resourceName { + case "cpu": + fields["resource_requests_millicpu_units"] = convertQuantity(val.GetString_(), 1000) + case "memory": + fields["resource_requests_memory_bytes"] = convertQuantity(val.GetString_(), 1) + } + } + for resourceName, val := range lim { + switch resourceName { + case "cpu": + fields["resource_limits_millicpu_units"] = convertQuantity(val.GetString_(), 1000) + case "memory": + fields["resource_limits_memory_bytes"] = convertQuantity(val.GetString_(), 1) + } + } + + acc.AddFields(podContainerMeasurement, fields, tags) +} diff --git a/plugins/inputs/kube_inventory/pod_test.go b/plugins/inputs/kube_inventory/pod_test.go new file mode 100644 index 000000000..50b093880 --- /dev/null +++ b/plugins/inputs/kube_inventory/pod_test.go @@ -0,0 +1,199 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/ericchiang/k8s/apis/resource" + "github.com/influxdata/telegraf/testutil" +) + +func TestPod(t *testing.T) { + cli := &client{} + now := time.Now() + started := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-1, 1, 36, 0, now.Location()) + created := time.Date(now.Year(), now.Month(), now.Day(), now.Hour()-2, 1, 36, 0, now.Location()) + cond1 := time.Date(now.Year(), 7, 5, 7, 53, 29, 0, now.Location()) + cond2 := time.Date(now.Year(), 7, 5, 7, 53, 31, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no pods", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/pods/": &v1.PodList{}, + }, + }, + hasError: false, + }, + { + name: "collect pods", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/pods/": &v1.PodList{ + Items: []*v1.Pod{ + { + Spec: &v1.PodSpec{ + NodeName: toStrPtr("node1"), + Containers: []*v1.Container{ + { + Name: toStrPtr("forwarder"), + Image: toStrPtr("image1"), + Ports: []*v1.ContainerPort{ + { + ContainerPort: toInt32Ptr(8080), + Protocol: toStrPtr("TCP"), + }, + }, + Resources: &v1.ResourceRequirements{ + Limits: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("100m")}, + }, + Requests: map[string]*resource.Quantity{ + "cpu": {String_: toStrPtr("100m")}, + }, + }, + }, + }, + Volumes: []*v1.Volume{ + { + Name: toStrPtr("vol1"), + VolumeSource: &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: toStrPtr("pc1"), + ReadOnly: toBoolPtr(true), + }, + }, + }, + { + Name: toStrPtr("vol2"), + }, + }, + }, + Status: &v1.PodStatus{ + Phase: toStrPtr("Running"), + HostIP: toStrPtr("180.12.10.18"), + PodIP: toStrPtr("10.244.2.15"), + StartTime: &metav1.Time{Seconds: toInt64Ptr(started.Unix())}, + Conditions: []*v1.PodCondition{ + { + Type: toStrPtr("Initialized"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + }, + { + Type: toStrPtr("Ready"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + }, + { + Type: toStrPtr("Scheduled"), + Status: toStrPtr("True"), + LastTransitionTime: &metav1.Time{Seconds: toInt64Ptr(cond1.Unix())}, + }, + }, + ContainerStatuses: []*v1.ContainerStatus{ + { + Name: toStrPtr("forwarder"), + State: &v1.ContainerState{ + Running: &v1.ContainerStateRunning{ + StartedAt: &metav1.Time{Seconds: toInt64Ptr(cond2.Unix())}, + }, + }, + Ready: toBoolPtr(true), + RestartCount: toInt32Ptr(3), + Image: toStrPtr("image1"), + ImageID: toStrPtr("image_id1"), + ContainerID: toStrPtr("docker://54abe32d0094479d3d"), + }, + }, + }, + Metadata: &metav1.ObjectMeta{ + OwnerReferences: []*metav1.OwnerReference{ + { + ApiVersion: toStrPtr("apps/v1"), + Kind: toStrPtr("DaemonSet"), + Name: toStrPtr("forwarder"), + Controller: toBoolPtr(true), + }, + }, + Generation: toInt64Ptr(11232), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("pod1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(created.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Measurement: podContainerMeasurement, + Fields: map[string]interface{}{ + "restarts_total": int32(3), + "state_code": 0, + "resource_requests_millicpu_units": int64(100), + "resource_limits_millicpu_units": int64(100), + }, + Tags: map[string]string{ + "namespace": "ns1", + "container_name": "forwarder", + "node_name": "node1", + "pod_name": "pod1", + "state": "running", + }, + }, + }, + }, + hasError: false, + }, + } + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, pod := range ((v.handler.responseMap["/pods/"]).(*v1.PodList)).Items { + err := ks.gatherPod(*pod, acc) + if err != nil { + t.Errorf("Failed to gather pod - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s, i %d\n", v.name, k, m, acc.Metrics[i].Tags[k], i) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T), i %d\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k], i) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/service.go b/plugins/inputs/kube_inventory/service.go new file mode 100644 index 000000000..4b0cc0845 --- /dev/null +++ b/plugins/inputs/kube_inventory/service.go @@ -0,0 +1,70 @@ +package kube_inventory + +import ( + "context" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + + "github.com/influxdata/telegraf" +) + +func collectServices(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getServices(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, i := range list.Items { + if err = ki.gatherService(*i, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherService(s v1.Service, acc telegraf.Accumulator) error { + if s.Metadata.CreationTimestamp.GetSeconds() == 0 && s.Metadata.CreationTimestamp.GetNanos() == 0 { + return nil + } + + fields := map[string]interface{}{ + "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "generation": s.Metadata.GetGeneration(), + } + + tags := map[string]string{ + "service_name": s.Metadata.GetName(), + "namespace": s.Metadata.GetNamespace(), + } + + var getPorts = func() { + for _, port := range s.GetSpec().GetPorts() { + fields["port"] = port.GetPort() + fields["target_port"] = port.GetTargetPort().GetIntVal() + + tags["port_name"] = port.GetName() + tags["port_protocol"] = port.GetProtocol() + + if s.GetSpec().GetType() == "ExternalName" { + tags["external_name"] = s.GetSpec().GetExternalName() + } else { + tags["cluster_ip"] = s.GetSpec().GetClusterIP() + } + + acc.AddFields(serviceMeasurement, fields, tags) + } + } + + if externIPs := s.GetSpec().GetExternalIPs(); externIPs != nil { + for _, ip := range externIPs { + tags["ip"] = ip + + getPorts() + } + } else { + getPorts() + } + + return nil +} diff --git a/plugins/inputs/kube_inventory/service_test.go b/plugins/inputs/kube_inventory/service_test.go new file mode 100644 index 000000000..6c0c8787a --- /dev/null +++ b/plugins/inputs/kube_inventory/service_test.go @@ -0,0 +1,123 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + "github.com/influxdata/telegraf/testutil" +) + +func TestService(t *testing.T) { + cli := &client{} + + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no service", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/service/": &v1.ServiceList{}, + }, + }, + hasError: false, + }, + { + name: "collect service", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/service/": &v1.ServiceList{ + Items: []*v1.Service{ + { + Spec: &v1.ServiceSpec{ + Ports: []*v1.ServicePort{ + { + Port: toInt32Ptr(8080), + TargetPort: toIntStrPtrI(1234), + Name: toStrPtr("diagnostic"), + Protocol: toStrPtr("TCP"), + }, + }, + ExternalIPs: []string{"1.0.0.127"}, + ClusterIP: toStrPtr("127.0.0.1"), + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(12), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("checker"), + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "port": int32(8080), + "target_port": int32(1234), + "generation": int64(12), + "created": now.UnixNano(), + }, + Tags: map[string]string{ + "service_name": "checker", + "namespace": "ns1", + "port_name": "diagnostic", + "port_protocol": "TCP", + "cluster_ip": "127.0.0.1", + "ip": "1.0.0.127", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, service := range ((v.handler.responseMap["/service/"]).(*v1.ServiceList)).Items { + err := ks.gatherService(*service, acc) + if err != nil { + t.Errorf("Failed to gather service - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got '%v'\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kube_inventory/statefulset.go b/plugins/inputs/kube_inventory/statefulset.go new file mode 100644 index 000000000..c95e566c2 --- /dev/null +++ b/plugins/inputs/kube_inventory/statefulset.go @@ -0,0 +1,46 @@ +package kube_inventory + +import ( + "context" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1" + + "github.com/influxdata/telegraf" +) + +func collectStatefulSets(ctx context.Context, acc telegraf.Accumulator, ki *KubernetesInventory) { + list, err := ki.client.getStatefulSets(ctx) + if err != nil { + acc.AddError(err) + return + } + for _, s := range list.Items { + if err = ki.gatherStatefulSet(*s, acc); err != nil { + acc.AddError(err) + return + } + } +} + +func (ki *KubernetesInventory) gatherStatefulSet(s v1.StatefulSet, acc telegraf.Accumulator) error { + status := s.Status + fields := map[string]interface{}{ + "created": time.Unix(s.Metadata.CreationTimestamp.GetSeconds(), int64(s.Metadata.CreationTimestamp.GetNanos())).UnixNano(), + "generation": *s.Metadata.Generation, + "replicas": *status.Replicas, + "replicas_current": *status.CurrentReplicas, + "replicas_ready": *status.ReadyReplicas, + "replicas_updated": *status.UpdatedReplicas, + "spec_replicas": *s.Spec.Replicas, + "observed_generation": *s.Status.ObservedGeneration, + } + tags := map[string]string{ + "statefulset_name": *s.Metadata.Name, + "namespace": *s.Metadata.Namespace, + } + + acc.AddFields(statefulSetMeasurement, fields, tags) + + return nil +} diff --git a/plugins/inputs/kube_inventory/statefulset_test.go b/plugins/inputs/kube_inventory/statefulset_test.go new file mode 100644 index 000000000..1a971b7b6 --- /dev/null +++ b/plugins/inputs/kube_inventory/statefulset_test.go @@ -0,0 +1,123 @@ +package kube_inventory + +import ( + "testing" + "time" + + "github.com/ericchiang/k8s/apis/apps/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" + + "github.com/influxdata/telegraf/testutil" +) + +func TestStatefulSet(t *testing.T) { + cli := &client{} + now := time.Now() + now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 1, 36, 0, now.Location()) + tests := []struct { + name string + handler *mockHandler + output *testutil.Accumulator + hasError bool + }{ + { + name: "no statefulsets", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/statefulsets/": &v1.StatefulSetList{}, + }, + }, + hasError: false, + }, + { + name: "collect statefulsets", + handler: &mockHandler{ + responseMap: map[string]interface{}{ + "/statefulsets/": &v1.StatefulSetList{ + Items: []*v1.StatefulSet{ + { + Status: &v1.StatefulSetStatus{ + Replicas: toInt32Ptr(2), + CurrentReplicas: toInt32Ptr(4), + ReadyReplicas: toInt32Ptr(1), + UpdatedReplicas: toInt32Ptr(3), + ObservedGeneration: toInt64Ptr(119), + }, + Spec: &v1.StatefulSetSpec{ + Replicas: toInt32Ptr(3), + }, + Metadata: &metav1.ObjectMeta{ + Generation: toInt64Ptr(332), + Namespace: toStrPtr("ns1"), + Name: toStrPtr("sts1"), + Labels: map[string]string{ + "lab1": "v1", + "lab2": "v2", + }, + CreationTimestamp: &metav1.Time{Seconds: toInt64Ptr(now.Unix())}, + }, + }, + }, + }, + }, + }, + output: &testutil.Accumulator{ + Metrics: []*testutil.Metric{ + { + Fields: map[string]interface{}{ + "generation": int64(332), + "observed_generation": int64(119), + "created": now.UnixNano(), + "spec_replicas": int32(3), + "replicas": int32(2), + "replicas_current": int32(4), + "replicas_ready": int32(1), + "replicas_updated": int32(3), + }, + Tags: map[string]string{ + "namespace": "ns1", + "statefulset_name": "sts1", + }, + }, + }, + }, + hasError: false, + }, + } + + for _, v := range tests { + ks := &KubernetesInventory{ + client: cli, + } + acc := new(testutil.Accumulator) + for _, ss := range ((v.handler.responseMap["/statefulsets/"]).(*v1.StatefulSetList)).Items { + err := ks.gatherStatefulSet(*ss, acc) + if err != nil { + t.Errorf("Failed to gather ss - %s", err.Error()) + } + } + + err := acc.FirstError() + if err == nil && v.hasError { + t.Fatalf("%s failed, should have error", v.name) + } else if err != nil && !v.hasError { + t.Fatalf("%s failed, err: %v", v.name, err) + } + if v.output == nil && len(acc.Metrics) > 0 { + t.Fatalf("%s: collected extra data", v.name) + } else if v.output != nil && len(v.output.Metrics) > 0 { + for i := range v.output.Metrics { + for k, m := range v.output.Metrics[i].Tags { + if acc.Metrics[i].Tags[k] != m { + t.Fatalf("%s: tag %s metrics unmatch Expected %s, got %s\n", v.name, k, m, acc.Metrics[i].Tags[k]) + } + } + for k, m := range v.output.Metrics[i].Fields { + if acc.Metrics[i].Fields[k] != m { + t.Fatalf("%s: field %s metrics unmatch Expected %v(%T), got %v(%T)\n", v.name, k, m, m, acc.Metrics[i].Fields[k], acc.Metrics[i].Fields[k]) + } + } + } + } + } +} diff --git a/plugins/inputs/kubernetes/README.md b/plugins/inputs/kubernetes/README.md index 099cf1526..a574bed06 100644 --- a/plugins/inputs/kubernetes/README.md +++ b/plugins/inputs/kubernetes/README.md @@ -1,265 +1,166 @@ # Kubernetes Input Plugin -**This plugin is experimental and may cause high cardinality issues with moderate to large Kubernetes deployments** - -This input plugin talks to the kubelet api using the `/stats/summary` endpoint to gather metrics about the running pods and containers for a single host. It is assumed that this plugin is running as part of a `daemonset` within a kubernetes installation. This means that telegraf is running on every node within the cluster. Therefore, you should configure this plugin to talk to its locally running kubelet. +The Kubernetes plugin talks to the Kubelet API and gathers metrics about the +running pods and containers for a single host. It is assumed that this plugin +is running as part of a `daemonset` within a kubernetes installation. This +means that telegraf is running on every node within the cluster. Therefore, you +should configure this plugin to talk to its locally running kubelet. To find the ip address of the host you are running on you can issue a command like the following: + ``` $ curl -s $API_URL/api/v1/namespaces/$POD_NAMESPACE/pods/$HOSTNAME --header "Authorization: Bearer $TOKEN" --insecure | jq -r '.status.hostIP' ``` + In this case we used the downward API to pass in the `$POD_NAMESPACE` and `$HOSTNAME` is the hostname of the pod which is set by the kubernetes API. -## Summary Data +Kubernetes is a fast moving project, with a new minor release every 3 months. As +such, we will aim to maintain support only for versions that are supported by +the major cloud providers; this is roughly 4 release / 2 years. -```json -{ - "node": { - "nodeName": "node1", - "systemContainers": [ - { - "name": "kubelet", - "startTime": "2016-08-25T18:46:52Z", - "cpu": { - "time": "2016-09-27T16:57:31Z", - "usageNanoCores": 56652446, - "usageCoreNanoSeconds": 101437561712262 - }, - "memory": { - "time": "2016-09-27T16:57:31Z", - "usageBytes": 62529536, - "workingSetBytes": 62349312, - "rssBytes": 47509504, - "pageFaults": 4769397409, - "majorPageFaults": 13 - }, - "rootfs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800 - }, - "logs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800 - }, - "userDefinedMetrics": null - }, - { - "name": "bar", - "startTime": "2016-08-25T18:46:52Z", - "cpu": { - "time": "2016-09-27T16:57:31Z", - "usageNanoCores": 56652446, - "usageCoreNanoSeconds": 101437561712262 - }, - "memory": { - "time": "2016-09-27T16:57:31Z", - "usageBytes": 62529536, - "workingSetBytes": 62349312, - "rssBytes": 47509504, - "pageFaults": 4769397409, - "majorPageFaults": 13 - }, - "rootfs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800 - }, - "logs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800 - }, - "userDefinedMetrics": null - } - ], - "startTime": "2016-08-25T18:46:52Z", - "cpu": { - "time": "2016-09-27T16:57:41Z", - "usageNanoCores": 576996212, - "usageCoreNanoSeconds": 774129887054161 - }, - "memory": { - "time": "2016-09-27T16:57:41Z", - "availableBytes": 10726387712, - "usageBytes": 12313182208, - "workingSetBytes": 5081538560, - "rssBytes": 35586048, - "pageFaults": 351742, - "majorPageFaults": 1236 - }, - "network": { - "time": "2016-09-27T16:57:41Z", - "rxBytes": 213281337459, - "rxErrors": 0, - "txBytes": 292869995684, - "txErrors": 0 - }, - "fs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800, - "usedBytes": 16754286592 - }, - "runtime": { - "imageFs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800, - "usedBytes": 5809371475 - } - } - }, - "pods": [ - { - "podRef": { - "name": "foopod", - "namespace": "foons", - "uid": "6d305b06-8419-11e6-825c-42010af000ae" - }, - "startTime": "2016-09-26T18:45:42Z", - "containers": [ - { - "name": "foocontainer", - "startTime": "2016-09-26T18:46:43Z", - "cpu": { - "time": "2016-09-27T16:57:32Z", - "usageNanoCores": 846503, - "usageCoreNanoSeconds": 56507553554 - }, - "memory": { - "time": "2016-09-27T16:57:32Z", - "usageBytes": 30789632, - "workingSetBytes": 30789632, - "rssBytes": 30695424, - "pageFaults": 10761, - "majorPageFaults": 0 - }, - "rootfs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800, - "usedBytes": 57344 - }, - "logs": { - "availableBytes": 84379979776, - "capacityBytes": 105553100800, - "usedBytes": 24576 - }, - "userDefinedMetrics": null - } - ], - "network": { - "time": "2016-09-27T16:57:34Z", - "rxBytes": 70749124, - "rxErrors": 0, - "txBytes": 47813506, - "txErrors": 0 - }, - "volume": [ - { - "availableBytes": 7903948800, - "capacityBytes": 7903961088, - "usedBytes": 12288, - "name": "volume1" - }, - { - "availableBytes": 7903956992, - "capacityBytes": 7903961088, - "usedBytes": 4096, - "name": "volume2" - }, - { - "availableBytes": 7903948800, - "capacityBytes": 7903961088, - "usedBytes": 12288, - "name": "volume3" - }, - { - "availableBytes": 7903952896, - "capacityBytes": 7903961088, - "usedBytes": 8192, - "name": "volume4" - } - ] - } - ] - } - ``` +**This plugin supports Kubernetes 1.11 and later.** - ### Daemonset YAML +#### Series Cardinality Warning -```yaml -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: telegraf - namespace: telegraf -spec: - template: - metadata: - labels: - app: telegraf - spec: - serviceAccount: telegraf - containers: - - name: telegraf - image: quay.io/org/image:latest - imagePullPolicy: IfNotPresent - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: "HOST_PROC" - value: "/rootfs/proc" - - name: "HOST_SYS" - value: "/rootfs/sys" - volumeMounts: - - name: sysro - mountPath: /rootfs/sys - readOnly: true - - name: procro - mountPath: /rootfs/proc - readOnly: true - - name: varrunutmpro - mountPath: /var/run/utmp - readOnly: true - - name: logger-redis-creds - mountPath: /var/run/secrets/deis/redis/creds - volumes: - - name: sysro - hostPath: - path: /sys - - name: procro - hostPath: - path: /proc - - name: varrunutmpro - hostPath: - path: /var/run/utmp +This plugin may produce a high number of series which, when not controlled +for, will cause high load on your database. Use the following techniques to +avoid cardinality issues: + +- Use [metric filtering][] options to exclude unneeded measurements and tags. +- Write to a database with an appropriate [retention policy][]. +- Limit series cardinality in your database using the + [max-series-per-database][] and [max-values-per-tag][] settings. +- Consider using the [Time Series Index][tsi]. +- Monitor your databases [series cardinality][]. +- Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. + +### Configuration + +```toml +[[inputs.kubernetes]] + ## URL for the kubelet + url = "http://127.0.0.1:10255" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Pod labels to be added as tags. An empty array for both include and + ## exclude will include all labels. + # label_include = [] + # label_exclude = ["*"] + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` -### Line Protocol +### DaemonSet -#### kubernetes_pod_container -``` -kubernetes_pod_container,host=ip-10-0-0-0.ec2.internal, -container_name=deis-controller,namespace=deis, -node_name=ip-10-0-0-0.ec2.internal, pod_name=deis-controller-3058870187-xazsr, cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i, -logsfs_avaialble_bytes=121128271872i,logsfs_capacity_bytes=153567944704i, -logsfs_used_bytes=20787200i,memory_major_page_faults=0i, -memory_page_faults=175i,memory_rss_bytes=0i, -memory_usage_bytes=0i,memory_working_set_bytes=0i, -rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i, -rootfs_used_bytes=1110016i 1476477530000000000 - ``` +For recommendations on running Telegraf as a DaemonSet see [Monitoring Kubernetes +Architecture][k8s-telegraf] or view the Helm charts: + +- [Telegraf][] +- [InfluxDB][] +- [Chronograf][] +- [Kapacitor][] + +### Metrics + +- kubernetes_node + - tags: + - node_name + - fields: + - cpu_usage_nanocores + - cpu_usage_core_nanoseconds + - memory_available_bytes + - memory_usage_bytes + - memory_working_set_bytes + - memory_rss_bytes + - memory_page_faults + - memory_major_page_faults + - network_rx_bytes + - network_rx_errors + - network_tx_bytes + - network_tx_errors + - fs_available_bytes + - fs_capacity_bytes + - fs_used_bytes + - runtime_image_fs_available_bytes + - runtime_image_fs_capacity_bytes + - runtime_image_fs_used_bytes + +* kubernetes_pod_container + - tags: + - container_name + - namespace + - node_name + - pod_name + - fields: + - cpu_usage_nanocores + - cpu_usage_core_nanoseconds + - memory_usage_bytes + - memory_working_set_bytes + - memory_rss_bytes + - memory_page_faults + - memory_major_page_faults + - rootfs_available_bytes + - rootfs_capacity_bytes + - rootfs_used_bytes + - logsfs_available_bytes + - logsfs_capacity_bytes + - logsfs_used_bytes + +- kubernetes_pod_volume + - tags: + - volume_name + - namespace + - node_name + - pod_name + - fields: + - available_bytes + - capacity_bytes + - used_bytes + +* kubernetes_pod_network + - tags: + - namespace + - node_name + - pod_name + - fields: + - rx_bytes + - rx_errors + - tx_bytes + - tx_errors + +### Example Output -#### kubernetes_pod_volume ``` -kubernetes_pod_volume,host=ip-10-0-0-0.ec2.internal,name=default-token-f7wts, -namespace=kube-system,node_name=ip-10-0-0-0.ec2.internal, -pod_name=kubernetes-dashboard-v1.1.1-t4x4t, available_bytes=8415240192i, -capacity_bytes=8415252480i,used_bytes=12288i 1476477530000000000 +kubernetes_node +kubernetes_pod_container,container_name=deis-controller,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr cpu_usage_core_nanoseconds=2432835i,cpu_usage_nanocores=0i,logsfs_available_bytes=121128271872i,logsfs_capacity_bytes=153567944704i,logsfs_used_bytes=20787200i,memory_major_page_faults=0i,memory_page_faults=175i,memory_rss_bytes=0i,memory_usage_bytes=0i,memory_working_set_bytes=0i,rootfs_available_bytes=121128271872i,rootfs_capacity_bytes=153567944704i,rootfs_used_bytes=1110016i 1476477530000000000 +kubernetes_pod_network,namespace=deis,node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr rx_bytes=120671099i,rx_errors=0i,tx_bytes=102451983i,tx_errors=0i 1476477530000000000 +kubernetes_pod_volume,volume_name=default-token-f7wts,namespace=default,node_name=ip-172-17-0-1.internal,pod_name=storage-7 available_bytes=8415240192i,capacity_bytes=8415252480i,used_bytes=12288i 1546910783000000000 +kubernetes_system_container ``` -#### kubernetes_pod_network -``` -kubernetes_pod_network,host=ip-10-0-0-0.ec2.internal,namespace=deis, -node_name=ip-10-0-0-0.ec2.internal,pod_name=deis-controller-3058870187-xazsr, -rx_bytes=120671099i,rx_errors=0i, -tx_bytes=102451983i,tx_errors=0i 1476477530000000000 -``` +[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering +[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ +[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 +[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 +[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ +[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality +[influx-docs]: https://docs.influxdata.com/influxdb/latest/ +[k8s-telegraf]: https://www.influxdata.com/blog/monitoring-kubernetes-architecture/ +[telegraf]: https://github.com/helm/charts/tree/master/stable/telegraf +[influxdb]: https://github.com/helm/charts/tree/master/stable/influxdb +[chronograf]: https://github.com/helm/charts/tree/master/stable/chronograf +[kapacitor]: https://github.com/helm/charts/tree/master/stable/kapacitor diff --git a/plugins/inputs/kubernetes/kubernetes.go b/plugins/inputs/kubernetes/kubernetes.go index 870524a80..412db1dc3 100644 --- a/plugins/inputs/kubernetes/kubernetes.go +++ b/plugins/inputs/kubernetes/kubernetes.go @@ -6,10 +6,11 @@ import ( "io/ioutil" "net/http" "net/url" - "sync" + "strings" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" @@ -20,7 +21,13 @@ type Kubernetes struct { URL string // Bearer Token authorization file path - BearerToken string `toml:"bearer_token"` + BearerToken string `toml:"bearer_token"` + BearerTokenString string `toml:"bearer_token_string"` + + LabelInclude []string `toml:"label_include"` + LabelExclude []string `toml:"label_exclude"` + + labelFilter filter.Filter // HTTP Timeout specified as a string - 3s, 1m, 1h ResponseTimeout internal.Duration @@ -32,10 +39,19 @@ type Kubernetes struct { var sampleConfig = ` ## URL for the kubelet - url = "http://1.1.1.1:10255" + url = "http://127.0.0.1:10255" - ## Use bearer token for authorization - # bearer_token = /path/to/bearer/token + ## Use bearer token for authorization. ('bearer_token' takes priority) + ## If both of these are empty, we'll use the default serviceaccount: + ## at: /run/secrets/kubernetes.io/serviceaccount/token + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## Pod labels to be added as tags. An empty array for both include and + ## exclude will include all labels. + # label_include = [] + # label_exclude = ["*"] ## Set response_timeout (default 5 seconds) # response_timeout = "5s" @@ -49,12 +65,16 @@ var sampleConfig = ` ` const ( - summaryEndpoint = `%s/stats/summary` + summaryEndpoint = `%s/stats/summary` + defaultServiceAccountPath = "/run/secrets/kubernetes.io/serviceaccount/token" ) func init() { inputs.Add("kubernetes", func() telegraf.Input { - return &Kubernetes{} + return &Kubernetes{ + LabelInclude: []string{}, + LabelExclude: []string{"*"}, + } }) } @@ -68,15 +88,33 @@ func (k *Kubernetes) Description() string { return "Read metrics from the kubernetes kubelet api" } +func (k *Kubernetes) Init() error { + + // If neither are provided, use the default service account. + if k.BearerToken == "" && k.BearerTokenString == "" { + k.BearerToken = defaultServiceAccountPath + } + + if k.BearerToken != "" { + token, err := ioutil.ReadFile(k.BearerToken) + if err != nil { + return err + } + k.BearerTokenString = strings.TrimSpace(string(token)) + } + + labelFilter, err := filter.NewIncludeExcludeFilter(k.LabelInclude, k.LabelExclude) + if err != nil { + return err + } + k.labelFilter = labelFilter + + return nil +} + //Gather collects kubernetes metrics from a given URL func (k *Kubernetes) Gather(acc telegraf.Accumulator) error { - var wg sync.WaitGroup - wg.Add(1) - go func(k *Kubernetes) { - defer wg.Done() - acc.AddError(k.gatherSummary(k.URL, acc)) - }(k) - wg.Wait() + acc.AddError(k.gatherSummary(k.URL, acc)) return nil } @@ -90,54 +128,19 @@ func buildURL(endpoint string, base string) (*url.URL, error) { } func (k *Kubernetes) gatherSummary(baseURL string, acc telegraf.Accumulator) error { - url := fmt.Sprintf("%s/stats/summary", baseURL) - var req, err = http.NewRequest("GET", url, nil) - var token []byte - var resp *http.Response - - tlsCfg, err := k.ClientConfig.TLSConfig() + summaryMetrics := &SummaryMetrics{} + err := k.LoadJson(fmt.Sprintf("%s/stats/summary", baseURL), summaryMetrics) if err != nil { return err } - if k.RoundTripper == nil { - // Set default values - if k.ResponseTimeout.Duration < time.Second { - k.ResponseTimeout.Duration = time.Second * 5 - } - k.RoundTripper = &http.Transport{ - TLSHandshakeTimeout: 5 * time.Second, - TLSClientConfig: tlsCfg, - ResponseHeaderTimeout: k.ResponseTimeout.Duration, - } - } - - if k.BearerToken != "" { - token, err = ioutil.ReadFile(k.BearerToken) - if err != nil { - return err - } - req.Header.Set("Authorization", "Bearer "+string(token)) - } - - resp, err = k.RoundTripper.RoundTrip(req) + podInfos, err := k.gatherPodInfo(baseURL) if err != nil { - return fmt.Errorf("error making HTTP request to %s: %s", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status %s", url, resp.Status) - } - - summaryMetrics := &SummaryMetrics{} - err = json.NewDecoder(resp.Body).Decode(summaryMetrics) - if err != nil { - return fmt.Errorf(`Error parsing response: %s`, err) + return err } buildSystemContainerMetrics(summaryMetrics, acc) buildNodeMetrics(summaryMetrics, acc) - buildPodMetrics(summaryMetrics, acc) + buildPodMetrics(baseURL, summaryMetrics, podInfos, k.labelFilter, acc) return nil } @@ -157,7 +160,7 @@ func buildSystemContainerMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Ac fields["memory_major_page_faults"] = container.Memory.MajorPageFaults fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes - fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes + fields["logsfs_available_bytes"] = container.LogsFS.AvailableBytes fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes acc.AddFields("kubernetes_system_container", fields, tags) } @@ -189,7 +192,56 @@ func buildNodeMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) acc.AddFields("kubernetes_node", fields, tags) } -func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) { +func (k *Kubernetes) gatherPodInfo(baseURL string) ([]Metadata, error) { + var podApi Pods + err := k.LoadJson(fmt.Sprintf("%s/pods", baseURL), &podApi) + if err != nil { + return nil, err + } + var podInfos []Metadata + for _, podMetadata := range podApi.Items { + podInfos = append(podInfos, podMetadata.Metadata) + } + return podInfos, nil +} + +func (k *Kubernetes) LoadJson(url string, v interface{}) error { + var req, err = http.NewRequest("GET", url, nil) + var resp *http.Response + tlsCfg, err := k.ClientConfig.TLSConfig() + if err != nil { + return err + } + if k.RoundTripper == nil { + if k.ResponseTimeout.Duration < time.Second { + k.ResponseTimeout.Duration = time.Second * 5 + } + k.RoundTripper = &http.Transport{ + TLSHandshakeTimeout: 5 * time.Second, + TLSClientConfig: tlsCfg, + ResponseHeaderTimeout: k.ResponseTimeout.Duration, + } + } + req.Header.Set("Authorization", "Bearer "+k.BearerTokenString) + req.Header.Add("Accept", "application/json") + resp, err = k.RoundTripper.RoundTrip(req) + if err != nil { + return fmt.Errorf("error making HTTP request to %s: %s", url, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + } + + err = json.NewDecoder(resp.Body).Decode(v) + if err != nil { + return fmt.Errorf(`Error parsing response: %s`, err) + } + + return nil +} + +func buildPodMetrics(baseURL string, summaryMetrics *SummaryMetrics, podInfo []Metadata, labelFilter filter.Filter, acc telegraf.Accumulator) { for _, pod := range summaryMetrics.Pods { for _, container := range pod.Containers { tags := map[string]string{ @@ -198,6 +250,16 @@ func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) { "container_name": container.Name, "pod_name": pod.PodRef.Name, } + for _, info := range podInfo { + if info.Name == pod.PodRef.Name && info.Namespace == pod.PodRef.Namespace { + for k, v := range info.Labels { + if labelFilter.Match(k) { + tags[k] = v + } + } + } + } + fields := make(map[string]interface{}) fields["cpu_usage_nanocores"] = container.CPU.UsageNanoCores fields["cpu_usage_core_nanoseconds"] = container.CPU.UsageCoreNanoSeconds @@ -209,7 +271,7 @@ func buildPodMetrics(summaryMetrics *SummaryMetrics, acc telegraf.Accumulator) { fields["rootfs_available_bytes"] = container.RootFS.AvailableBytes fields["rootfs_capacity_bytes"] = container.RootFS.CapacityBytes fields["rootfs_used_bytes"] = container.RootFS.UsedBytes - fields["logsfs_avaialble_bytes"] = container.LogsFS.AvailableBytes + fields["logsfs_available_bytes"] = container.LogsFS.AvailableBytes fields["logsfs_capacity_bytes"] = container.LogsFS.CapacityBytes fields["logsfs_used_bytes"] = container.LogsFS.UsedBytes acc.AddFields("kubernetes_pod_container", fields, tags) diff --git a/plugins/inputs/kubernetes/kubernetes_metrics.go b/plugins/inputs/kubernetes/kubernetes_metrics.go index 96814bcbe..d45d4b5f1 100644 --- a/plugins/inputs/kubernetes/kubernetes_metrics.go +++ b/plugins/inputs/kubernetes/kubernetes_metrics.go @@ -2,7 +2,7 @@ package kubernetes import "time" -// SummaryMetrics represents all the summary data about a paritcular node retrieved from a kubelet +// SummaryMetrics represents all the summary data about a particular node retrieved from a kubelet type SummaryMetrics struct { Node NodeMetrics `json:"node"` Pods []PodMetrics `json:"pods"` diff --git a/plugins/inputs/kubernetes/kubernetes_pods.go b/plugins/inputs/kubernetes/kubernetes_pods.go new file mode 100644 index 000000000..672608e54 --- /dev/null +++ b/plugins/inputs/kubernetes/kubernetes_pods.go @@ -0,0 +1,17 @@ +package kubernetes + +type Pods struct { + Kind string `json:"kind"` + ApiVersion string `json:"apiVersion"` + Items []Item `json:"items"` +} + +type Item struct { + Metadata Metadata `json:"metadata"` +} + +type Metadata struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + Labels map[string]string `json:"labels"` +} diff --git a/plugins/inputs/kubernetes/kubernetes_test.go b/plugins/inputs/kubernetes/kubernetes_test.go index 289e36ae4..faf40be3e 100644 --- a/plugins/inputs/kubernetes/kubernetes_test.go +++ b/plugins/inputs/kubernetes/kubernetes_test.go @@ -2,6 +2,7 @@ package kubernetes import ( "fmt" + "github.com/influxdata/telegraf/filter" "net/http" "net/http/httptest" "testing" @@ -12,13 +13,23 @@ import ( func TestKubernetesStats(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, response) + if r.RequestURI == "/stats/summary" { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, responseStatsSummery) + } + if r.RequestURI == "/pods" { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, responsePods) + } + })) defer ts.Close() + labelFilter, _ := filter.NewIncludeExcludeFilter([]string{"app", "superkey"}, nil) + k := &Kubernetes{ - URL: ts.URL, + URL: ts.URL, + labelFilter: labelFilter, } var acc testutil.Accumulator @@ -35,7 +46,7 @@ func TestKubernetesStats(t *testing.T) { "memory_major_page_faults": int64(13), "rootfs_available_bytes": int64(84379979776), "rootfs_capacity_bytes": int64(105553100800), - "logsfs_avaialble_bytes": int64(84379979776), + "logsfs_available_bytes": int64(84379979776), "logsfs_capacity_bytes": int64(105553100800), } tags := map[string]string{ @@ -80,7 +91,7 @@ func TestKubernetesStats(t *testing.T) { "rootfs_available_bytes": int64(84379979776), "rootfs_capacity_bytes": int64(105553100800), "rootfs_used_bytes": int64(57344), - "logsfs_avaialble_bytes": int64(84379979776), + "logsfs_available_bytes": int64(84379979776), "logsfs_capacity_bytes": int64(105553100800), "logsfs_used_bytes": int64(24576), } @@ -89,6 +100,8 @@ func TestKubernetesStats(t *testing.T) { "container_name": "foocontainer", "namespace": "foons", "pod_name": "foopod", + "app": "foo", + "superkey": "foobar", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags) @@ -103,7 +116,7 @@ func TestKubernetesStats(t *testing.T) { "rootfs_available_bytes": int64(0), "rootfs_capacity_bytes": int64(0), "rootfs_used_bytes": int64(0), - "logsfs_avaialble_bytes": int64(0), + "logsfs_available_bytes": int64(0), "logsfs_capacity_bytes": int64(0), "logsfs_used_bytes": int64(0), } @@ -112,6 +125,8 @@ func TestKubernetesStats(t *testing.T) { "container_name": "stopped-container", "namespace": "foons", "pod_name": "stopped-pod", + "app": "foo-stop", + "superkey": "superfoo", } acc.AssertContainsTaggedFields(t, "kubernetes_pod_container", fields, tags) @@ -143,7 +158,39 @@ func TestKubernetesStats(t *testing.T) { } -var response = ` +var responsePods = ` +{ + "kind": "PodList", + "apiVersion": "v1", + "metadata": {}, + "items": [ + { + "metadata": { + "name": "foopod", + "namespace": "foons", + "labels": { + "superkey": "foobar", + "app": "foo", + "exclude": "exclude0" + } + } + }, + { + "metadata": { + "name": "stopped-pod", + "namespace": "foons", + "labels": { + "superkey": "superfoo", + "app": "foo-stop", + "exclude": "exclude1" + } + } + } + ] +} +` + +var responseStatsSummery = ` { "node": { "nodeName": "node1", diff --git a/plugins/inputs/lanz/README.md b/plugins/inputs/lanz/README.md new file mode 100644 index 000000000..32033d6ab --- /dev/null +++ b/plugins/inputs/lanz/README.md @@ -0,0 +1,87 @@ +# Arista LANZ Consumer Input Plugin + +This plugin provides a consumer for use with Arista Networks’ Latency Analyzer (LANZ) + +Metrics are read from a stream of data via TCP through port 50001 on the +switches management IP. The data is in Protobuffers format. For more information on Arista LANZ + +- https://www.arista.com/en/um-eos/eos-latency-analyzer-lanz + +This plugin uses Arista's sdk. + +- https://github.com/aristanetworks/goarista + +### Configuration + +You will need to configure LANZ and enable streaming LANZ data. + +- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz +- https://www.arista.com/en/um-eos/eos-section-44-3-configuring-lanz#ww1149292 + +```toml +[[inputs.lanz]] + servers = [ + "tcp://switch1.int.example.com:50001", + "tcp://switch2.int.example.com:50001", + ] +``` + +### Metrics + +For more details on the metrics see https://github.com/aristanetworks/goarista/blob/master/lanz/proto/lanz.proto + +- lanz_congestion_record: + - tags: + - intf_name + - switch_id + - port_id + - entry_type + - traffic_class + - fabric_peer_intf_name + - source + - port + - fields: + - timestamp (integer) + - queue_size (integer) + - time_of_max_qlen (integer) + - tx_latency (integer) + - q_drop_count (integer) + ++ lanz_global_buffer_usage_record + - tags: + - entry_type + - source + - port + - fields: + - timestamp (integer) + - buffer_size (integer) + - duration (integer) + + + +### Sample Queries + +Get the max tx_latency for the last hour for all interfaces on all switches. +``` +SELECT max("tx_latency") AS "max_tx_latency" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name" +``` + +Get the max tx_latency for the last hour for all interfaces on all switches. +``` +SELECT max("queue_size") AS "max_queue_size" FROM "congestion_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname", "intf_name" +``` + +Get the max buffer_size for over the last hour for all switches. +``` +SELECT max("buffer_size") AS "max_buffer_size" FROM "global_buffer_usage_record" WHERE time > now() - 1h GROUP BY time(10s), "hostname" +``` + +### Example output +``` +lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=505i,duration=0i 1583341058300643815 +lanz_congestion_record,entry_type=2,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 time_of_max_qlen=0i,tx_latency=564480i,q_drop_count=0i,timestamp=158334105824919i,queue_size=225i 1583341058300636045 +lanz_global_buffer_usage_record,entry_type=2,host=telegraf.int.example.com,port=50001,source=switch01.int.example.com timestamp=158334105824919i,buffer_size=589i,duration=0i 1583341058300457464 +lanz_congestion_record,entry_type=1,host=telegraf.int.example.com,intf_name=Ethernet36,port=50001,port_id=61,source=switch01.int.example.com,switch_id=0,traffic_class=1 q_drop_count=0i,timestamp=158334105824919i,queue_size=232i,time_of_max_qlen=0i,tx_latency=584640i 1583341058300450302 +``` + + diff --git a/plugins/inputs/lanz/lanz.go b/plugins/inputs/lanz/lanz.go new file mode 100644 index 000000000..7553c33c7 --- /dev/null +++ b/plugins/inputs/lanz/lanz.go @@ -0,0 +1,137 @@ +package lanz + +import ( + "net/url" + "strconv" + "sync" + "time" + + "github.com/aristanetworks/goarista/lanz" + pb "github.com/aristanetworks/goarista/lanz/proto" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +var sampleConfig = ` + ## URL to Arista LANZ endpoint + servers = [ + "tcp://127.0.0.1:50001" + ] +` + +func init() { + inputs.Add("lanz", func() telegraf.Input { + return NewLanz() + }) +} + +type Lanz struct { + Servers []string `toml:"servers"` + clients []lanz.Client + wg sync.WaitGroup +} + +func NewLanz() *Lanz { + return &Lanz{} +} + +func (l *Lanz) SampleConfig() string { + return sampleConfig +} + +func (l *Lanz) Description() string { + return "Read metrics off Arista LANZ, via socket" +} + +func (l *Lanz) Gather(acc telegraf.Accumulator) error { + return nil +} + +func (l *Lanz) Start(acc telegraf.Accumulator) error { + + if len(l.Servers) == 0 { + l.Servers = append(l.Servers, "tcp://127.0.0.1:50001") + } + + for _, server := range l.Servers { + deviceUrl, err := url.Parse(server) + if err != nil { + return err + } + client := lanz.New( + lanz.WithAddr(deviceUrl.Host), + lanz.WithBackoff(1*time.Second), + lanz.WithTimeout(10*time.Second), + ) + l.clients = append(l.clients, client) + + in := make(chan *pb.LanzRecord) + go func() { + client.Run(in) + }() + l.wg.Add(1) + go func() { + l.wg.Done() + receive(acc, in, deviceUrl) + }() + } + return nil +} + +func (l *Lanz) Stop() { + for _, client := range l.clients { + client.Stop() + } + l.wg.Wait() +} + +func receive(acc telegraf.Accumulator, in <-chan *pb.LanzRecord, deviceUrl *url.URL) { + for { + select { + case msg, ok := <-in: + if !ok { + return + } + msgToAccumulator(acc, msg, deviceUrl) + } + } +} + +func msgToAccumulator(acc telegraf.Accumulator, msg *pb.LanzRecord, deviceUrl *url.URL) { + cr := msg.GetCongestionRecord() + if cr != nil { + vals := map[string]interface{}{ + "timestamp": int64(cr.GetTimestamp()), + "queue_size": int64(cr.GetQueueSize()), + "time_of_max_qlen": int64(cr.GetTimeOfMaxQLen()), + "tx_latency": int64(cr.GetTxLatency()), + "q_drop_count": int64(cr.GetQDropCount()), + } + tags := map[string]string{ + "intf_name": cr.GetIntfName(), + "switch_id": strconv.FormatInt(int64(cr.GetSwitchId()), 10), + "port_id": strconv.FormatInt(int64(cr.GetPortId()), 10), + "entry_type": strconv.FormatInt(int64(cr.GetEntryType()), 10), + "traffic_class": strconv.FormatInt(int64(cr.GetTrafficClass()), 10), + "fabric_peer_intf_name": cr.GetFabricPeerIntfName(), + "source": deviceUrl.Hostname(), + "port": deviceUrl.Port(), + } + acc.AddFields("lanz_congestion_record", vals, tags) + } + + gbur := msg.GetGlobalBufferUsageRecord() + if gbur != nil { + vals := map[string]interface{}{ + "timestamp": int64(gbur.GetTimestamp()), + "buffer_size": int64(gbur.GetBufferSize()), + "duration": int64(gbur.GetDuration()), + } + tags := map[string]string{ + "entry_type": strconv.FormatInt(int64(gbur.GetEntryType()), 10), + "source": deviceUrl.Hostname(), + "port": deviceUrl.Port(), + } + acc.AddFields("lanz_global_buffer_usage_record", vals, tags) + } +} diff --git a/plugins/inputs/lanz/lanz_test.go b/plugins/inputs/lanz/lanz_test.go new file mode 100644 index 000000000..5f9c7ab24 --- /dev/null +++ b/plugins/inputs/lanz/lanz_test.go @@ -0,0 +1,137 @@ +package lanz + +import ( + "net/url" + "strconv" + "testing" + + pb "github.com/aristanetworks/goarista/lanz/proto" + "github.com/golang/protobuf/proto" + "github.com/influxdata/telegraf/testutil" +) + +var testProtoBufCongestionRecord1 = &pb.LanzRecord{ + CongestionRecord: &pb.CongestionRecord{ + Timestamp: proto.Uint64(100000000000000), + IntfName: proto.String("eth1"), + SwitchId: proto.Uint32(1), + PortId: proto.Uint32(1), + QueueSize: proto.Uint32(1), + EntryType: pb.CongestionRecord_EntryType.Enum(1), + TrafficClass: proto.Uint32(1), + TimeOfMaxQLen: proto.Uint64(100000000000000), + TxLatency: proto.Uint32(100), + QDropCount: proto.Uint32(1), + FabricPeerIntfName: proto.String("FabricPeerIntfName1"), + }, +} +var testProtoBufCongestionRecord2 = &pb.LanzRecord{ + CongestionRecord: &pb.CongestionRecord{ + Timestamp: proto.Uint64(200000000000000), + IntfName: proto.String("eth2"), + SwitchId: proto.Uint32(2), + PortId: proto.Uint32(2), + QueueSize: proto.Uint32(2), + EntryType: pb.CongestionRecord_EntryType.Enum(2), + TrafficClass: proto.Uint32(2), + TimeOfMaxQLen: proto.Uint64(200000000000000), + TxLatency: proto.Uint32(200), + QDropCount: proto.Uint32(2), + FabricPeerIntfName: proto.String("FabricPeerIntfName2"), + }, +} + +var testProtoBufGlobalBufferUsageRecord = &pb.LanzRecord{ + GlobalBufferUsageRecord: &pb.GlobalBufferUsageRecord{ + EntryType: pb.GlobalBufferUsageRecord_EntryType.Enum(1), + Timestamp: proto.Uint64(100000000000000), + BufferSize: proto.Uint32(1), + Duration: proto.Uint32(10), + }, +} + +func TestLanzGeneratesMetrics(t *testing.T) { + + var acc testutil.Accumulator + + l := NewLanz() + + l.Servers = append(l.Servers, "tcp://switch01.int.example.com:50001") + l.Servers = append(l.Servers, "tcp://switch02.int.example.com:50001") + deviceUrl1, err := url.Parse(l.Servers[0]) + if err != nil { + t.Fail() + } + deviceUrl2, err := url.Parse(l.Servers[1]) + if err != nil { + t.Fail() + } + + msgToAccumulator(&acc, testProtoBufCongestionRecord1, deviceUrl1) + acc.Wait(1) + + vals1 := map[string]interface{}{ + "timestamp": int64(100000000000000), + "queue_size": int64(1), + "time_of_max_qlen": int64(100000000000000), + "tx_latency": int64(100), + "q_drop_count": int64(1), + } + tags1 := map[string]string{ + "intf_name": "eth1", + "switch_id": strconv.FormatInt(int64(1), 10), + "port_id": strconv.FormatInt(int64(1), 10), + "entry_type": strconv.FormatInt(int64(1), 10), + "traffic_class": strconv.FormatInt(int64(1), 10), + "fabric_peer_intf_name": "FabricPeerIntfName1", + "source": "switch01.int.example.com", + "port": "50001", + } + + acc.AssertContainsFields(t, "lanz_congestion_record", vals1) + acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals1, tags1) + + acc.ClearMetrics() + msgToAccumulator(&acc, testProtoBufCongestionRecord2, deviceUrl2) + acc.Wait(1) + + vals2 := map[string]interface{}{ + "timestamp": int64(200000000000000), + "queue_size": int64(2), + "time_of_max_qlen": int64(200000000000000), + "tx_latency": int64(200), + "q_drop_count": int64(2), + } + tags2 := map[string]string{ + "intf_name": "eth2", + "switch_id": strconv.FormatInt(int64(2), 10), + "port_id": strconv.FormatInt(int64(2), 10), + "entry_type": strconv.FormatInt(int64(2), 10), + "traffic_class": strconv.FormatInt(int64(2), 10), + "fabric_peer_intf_name": "FabricPeerIntfName2", + "source": "switch02.int.example.com", + "port": "50001", + } + + acc.AssertContainsFields(t, "lanz_congestion_record", vals2) + acc.AssertContainsTaggedFields(t, "lanz_congestion_record", vals2, tags2) + + acc.ClearMetrics() + msgToAccumulator(&acc, testProtoBufGlobalBufferUsageRecord, deviceUrl1) + acc.Wait(1) + + gburVals1 := map[string]interface{}{ + "timestamp": int64(100000000000000), + "buffer_size": int64(1), + "duration": int64(10), + } + gburTags1 := map[string]string{ + "entry_type": strconv.FormatInt(int64(1), 10), + "source": "switch01.int.example.com", + "port": "50001", + } + + acc.AssertContainsFields(t, "lanz_global_buffer_usage_record", gburVals1) + acc.AssertContainsTaggedFields(t, "lanz_global_buffer_usage_record", gburVals1, gburTags1) + +} diff --git a/plugins/inputs/system/LINUX_SYSCTL_FS_README.md b/plugins/inputs/linux_sysctl_fs/README.md similarity index 100% rename from plugins/inputs/system/LINUX_SYSCTL_FS_README.md rename to plugins/inputs/linux_sysctl_fs/README.md diff --git a/plugins/inputs/system/linux_sysctl_fs.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go similarity index 98% rename from plugins/inputs/system/linux_sysctl_fs.go rename to plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go index 55ebcb668..ed2496340 100644 --- a/plugins/inputs/system/linux_sysctl_fs.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs.go @@ -1,4 +1,4 @@ -package system +package linux_sysctl_fs import ( "bytes" diff --git a/plugins/inputs/system/linux_sysctl_fs_test.go b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go similarity index 98% rename from plugins/inputs/system/linux_sysctl_fs_test.go rename to plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go index 6561465cb..78011e288 100644 --- a/plugins/inputs/system/linux_sysctl_fs_test.go +++ b/plugins/inputs/linux_sysctl_fs/linux_sysctl_fs_test.go @@ -1,4 +1,4 @@ -package system +package linux_sysctl_fs import ( "io/ioutil" diff --git a/plugins/inputs/logparser/README.md b/plugins/inputs/logparser/README.md index 1caa3830c..0abdba2c9 100644 --- a/plugins/inputs/logparser/README.md +++ b/plugins/inputs/logparser/README.md @@ -4,7 +4,41 @@ The `logparser` plugin streams and parses the given logfiles. Currently it has the capability of parsing "grok" patterns from logfiles, which also supports regex patterns. -### Configuration: +**Deprecated in Telegraf 1.15**: Please use the [tail][] plugin along with the [`grok` data format][grok parser]. + +The `tail` plugin now provides all the functionality of the `logparser` plugin. +Most options can be translated directly to the `tail` plugin: +- For options in the `[inputs.logparser.grok]` section, the equivalent option + will have add the `grok_` prefix when using them in the `tail` input. +- The grok `measurement` option can be replaced using the standard plugin + `name_override` option. + +Migration Example: +```diff +- [[inputs.logparser]] +- files = ["/var/log/apache/access.log"] +- from_beginning = false +- [inputs.logparser.grok] +- patterns = ["%{COMBINED_LOG_FORMAT}"] +- measurement = "apache_access_log" +- custom_pattern_files = [] +- custom_patterns = ''' +- ''' +- timezone = "Canada/Eastern" + ++ [[inputs.tail]] ++ files = ["/var/log/apache/access.log"] ++ from_beginning = false ++ grok_patterns = ["%{COMBINED_LOG_FORMAT}"] ++ name_override = "apache_access_log" ++ grok_custom_pattern_files = [] ++ grok_custom_patterns = ''' ++ ''' ++ grok_timezone = "Canada/Eastern" ++ data_format = "grok" +``` + +### Configuration ```toml [[inputs.logparser]] @@ -25,7 +59,6 @@ regex patterns. # watch_method = "inotify" ## Parse logstash-style "grok" patterns: - ## Telegraf built-in parsing patterns: https://goo.gl/dkay10 [inputs.logparser.grok] ## This is a list of patterns to check the given log file(s) for. ## Note that adding patterns here increases processing time. The most @@ -54,188 +87,18 @@ regex patterns. ## 1. Local -- interpret based on machine localtime ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones ## 3. UTC -- or blank/unspecified, will return timestamp in UTC - timezone = "Canada/Eastern" + # timezone = "Canada/Eastern" ``` ### Grok Parser -The best way to get acquainted with grok patterns is to read the logstash docs, -which are available here: - https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html +Reference the [grok parser][] documentation to setup the grok section of the +configuration. -The Telegraf grok parser uses a slightly modified version of logstash "grok" -patterns, with the format - -``` -%{[:][:]} -``` - -The `capture_syntax` defines the grok pattern that's used to parse the input -line and the `semantic_name` is used to name the field or tag. The extension -`modifier` controls the data type that the parsed item is converted to or -other special handling. - -By default all named captures are converted into string fields. -Timestamp modifiers can be used to convert captures to the timestamp of the -parsed metric. If no timestamp is parsed the metric will be created using the -current time. - -You must capture at least one field per line. - -- Available modifiers: - - string (default if nothing is specified) - - int - - float - - duration (ie, 5.23ms gets converted to int nanoseconds) - - tag (converts the field into a tag) - - drop (drops the field completely) -- Timestamp modifiers: - - ts (This will auto-learn the timestamp format) - - ts-ansic ("Mon Jan _2 15:04:05 2006") - - ts-unix ("Mon Jan _2 15:04:05 MST 2006") - - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") - - ts-rfc822 ("02 Jan 06 15:04 MST") - - ts-rfc822z ("02 Jan 06 15:04 -0700") - - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") - - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") - - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") - - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") - - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") - - ts-httpd ("02/Jan/2006:15:04:05 -0700") - - ts-epoch (seconds since unix epoch, may contain decimal) - - ts-epochnano (nanoseconds since unix epoch) - - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) - - ts-"CUSTOM" - -CUSTOM time layouts must be within quotes and be the representation of the -"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. -To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` -To match a comma decimal point you can use a period in the pattern string. -See https://golang.org/pkg/time/#Parse for more details. - -Telegraf has many of its own [built-in patterns](./grok/patterns/influx-patterns), -as well as support for most of -[logstash's builtin patterns](https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns). -_Golang regular expressions do not support lookahead or lookbehind. -logstash patterns that depend on these are not supported._ - -If you need help building patterns to match your logs, -you will find the https://grokdebug.herokuapp.com application quite useful! - -#### Timestamp Examples - -This example input and config parses a file using a custom timestamp conversion: - -``` -2017-02-21 13:10:34 value=42 -``` - -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] -``` - -This example input and config parses a file using a timestamp in unix time: - -``` -1466004605 value=42 -1466004605.123456789 value=42 -``` - -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] -``` - -This example parses a file using a built-in conversion and a custom pattern: - -``` -Wed Apr 12 13:10:34 PST 2017 value=42 -``` - -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] - custom_patterns = ''' - TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} - ''' -``` - -For cases where the timestamp itself is without offset, the `timezone` config var is available -to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times -are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp -will be processed based on the current machine timezone configuration. Lastly, if using a -timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), the logparser grok will attempt to offset -the timestamp accordingly. See test cases for more detailed examples. - -#### TOML Escaping - -When saving patterns to the configuration file, keep in mind the different TOML -[string](https://github.com/toml-lang/toml#string) types and the escaping -rules for each. These escaping rules must be applied in addition to the -escaping required by the grok syntax. Using the Multi-line line literal -syntax with `'''` may be useful. - -The following config examples will parse this input file: - -``` -|42|\uD83D\uDC2F|'telegraf'| -``` - -Since `|` is a special character in the grok language, we must escape it to -get a literal `|`. With a basic TOML string, special characters such as -backslash must be escaped, requiring us to escape the backslash a second time. - -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] - custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" -``` - -We cannot use a literal TOML string for the pattern, because we cannot match a -`'` within it. However, it works well for the custom pattern. -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] - custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' -``` - -A multi-line literal string allows us to encode the pattern: -```toml -[[inputs.logparser]] - [inputs.logparser.grok] - patterns = [''' - \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| - '''] - custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' -``` - -### Tips for creating patterns - -Writing complex patterns can be difficult, here is some advice for writing a -new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com). - -Create a file output that writes to stdout, and disable other outputs while -testing. This will allow you to see the captured metrics. Keep in mind that -the file output will only print once per `flush_interval`. - -```toml -[[outputs.file]] - files = ["stdout"] -``` - -- Start with a file containing only a single line of your input. -- Remove all but the first token or piece of the line. -- Add the section of your pattern to match this piece to your configuration file. -- Verify that the metric is parsed successfully by running Telegraf. -- If successful, add the next token, update the pattern and retest. -- Continue one token at a time until the entire line is successfully parsed. ### Additional Resources - https://www.influxdata.com/telegraf-correlate-log-metrics-data-performance-bottlenecks/ + +[tail]: /plugins/inputs/tail/README.md +[grok parser]: /plugins/parsers/grok/README.md diff --git a/plugins/inputs/logparser/grok/patterns/influx-patterns b/plugins/inputs/logparser/grok/patterns/influx-patterns deleted file mode 100644 index 931b61bc8..000000000 --- a/plugins/inputs/logparser/grok/patterns/influx-patterns +++ /dev/null @@ -1,73 +0,0 @@ -# Captures are a slightly modified version of logstash "grok" patterns, with -# the format %{[:][:]} -# By default all named captures are converted into string fields. -# Modifiers can be used to convert captures to other types or tags. -# Timestamp modifiers can be used to convert captures to the timestamp of the -# parsed metric. - -# View logstash grok pattern docs here: -# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html -# All default logstash patterns are supported, these can be viewed here: -# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns - -# Available modifiers: -# string (default if nothing is specified) -# int -# float -# duration (ie, 5.23ms gets converted to int nanoseconds) -# tag (converts the field into a tag) -# drop (drops the field completely) -# Timestamp modifiers: -# ts-ansic ("Mon Jan _2 15:04:05 2006") -# ts-unix ("Mon Jan _2 15:04:05 MST 2006") -# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") -# ts-rfc822 ("02 Jan 06 15:04 MST") -# ts-rfc822z ("02 Jan 06 15:04 -0700") -# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") -# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") -# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") -# ts-rfc3339 ("2006-01-02T15:04:05Z07:00") -# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") -# ts-httpd ("02/Jan/2006:15:04:05 -0700") -# ts-epoch (seconds since unix epoch) -# ts-epochnano (nanoseconds since unix epoch) -# ts-"CUSTOM" -# CUSTOM time layouts must be within quotes and be the representation of the -# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006 -# See https://golang.org/pkg/time/#Parse for more details. - -# Example log file pattern, example log looks like this: -# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs -# Breakdown of the DURATION pattern below: -# NUMBER is a builtin logstash grok pattern matching float & int numbers. -# [nuµm]? is a regex specifying 0 or 1 of the characters within brackets. -# s is also regex, this pattern must end in "s". -# so DURATION will match something like '5.324ms' or '6.1µs' or '10s' -DURATION %{NUMBER}[nuµm]?s -RESPONSE_CODE %{NUMBER:response_code:tag} -RESPONSE_TIME %{DURATION:response_time_ns:duration} -EXAMPLE_LOG \[%{HTTPDATE:ts:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} - -# Wider-ranging username matching vs. logstash built-in %{USER} -NGUSERNAME [a-zA-Z0-9\.\@\-\+_%]+ -NGUSER %{NGUSERNAME} -# Wider-ranging client IP matching -CLIENT (?:%{IPORHOST}|%{HOSTPORT}|::1) - -## -## COMMON LOG PATTERNS -## - -# apache & nginx logs, this is also known as the "common log format" -# see https://en.wikipedia.org/wiki/Common_Log_Format -COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTTPDATE:ts:ts-httpd}\] "(?:%{WORD:verb:tag} %{NOTSPACE:request}(?: HTTP/%{NUMBER:http_version:float})?|%{DATA})" %{NUMBER:resp_code:tag} (?:%{NUMBER:resp_bytes:int}|-) - -# Combined log format is the same as the common log format but with the addition -# of two quoted strings at the end for "referrer" and "agent" -# See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html -COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent} - -# HTTPD log formats -HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg} -HTTPD24_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{WORD:module}:%{LOGLEVEL:loglevel:tag}\] \[pid %{POSINT:pid:int}:tid %{NUMBER:tid:int}\]( \(%{POSINT:proxy_errorcode:int}\)%{DATA:proxy_errormessage}:)?( \[client %{IPORHOST:client}:%{POSINT:clientport}\])? %{DATA:errorcode}: %{GREEDYDATA:message} -HTTPD_ERRORLOG %{HTTPD20_ERRORLOG}|%{HTTPD24_ERRORLOG} diff --git a/plugins/inputs/logparser/logparser.go b/plugins/inputs/logparser/logparser.go index 8eb866084..4fbd2e90d 100644 --- a/plugins/inputs/logparser/logparser.go +++ b/plugins/inputs/logparser/logparser.go @@ -4,29 +4,34 @@ package logparser import ( "fmt" - "log" - "reflect" "strings" "sync" "github.com/influxdata/tail" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" - - // Parsers - "github.com/influxdata/telegraf/plugins/inputs/logparser/grok" + "github.com/influxdata/telegraf/plugins/parsers" ) const ( defaultWatchMethod = "inotify" ) +var ( + offsets = make(map[string]int64) + offsetsMutex = new(sync.Mutex) +) + // LogParser in the primary interface for the plugin -type LogParser interface { - ParseLine(line string) (telegraf.Metric, error) - Compile() error +type GrokConfig struct { + MeasurementName string `toml:"measurement"` + Patterns []string + NamedPatterns []string + CustomPatterns string + CustomPatternFiles []string + Timezone string + UniqueTimestamp string } type logEntry struct { @@ -40,16 +45,33 @@ type LogParserPlugin struct { FromBeginning bool WatchMethod string + Log telegraf.Logger + tailers map[string]*tail.Tail + offsets map[string]int64 lines chan logEntry done chan struct{} wg sync.WaitGroup acc telegraf.Accumulator - parsers []LogParser sync.Mutex - GrokParser *grok.Parser `toml:"grok"` + GrokParser parsers.Parser + GrokConfig GrokConfig `toml:"grok"` +} + +func NewLogParser() *LogParserPlugin { + offsetsMutex.Lock() + offsetsCopy := make(map[string]int64, len(offsets)) + for k, v := range offsets { + offsetsCopy[k] = v + } + offsetsMutex.Unlock() + + return &LogParserPlugin{ + WatchMethod: defaultWatchMethod, + offsets: offsetsCopy, + } } const sampleConfig = ` @@ -87,6 +109,7 @@ const sampleConfig = ` ## Custom patterns can also be defined here. Put one pattern per line. custom_patterns = ''' + ''' ## Timezone allows you to provide an override for timestamps that ## don't already include an offset @@ -97,8 +120,11 @@ const sampleConfig = ` ## 1. Local -- interpret based on machine localtime ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones ## 3. UTC -- or blank/unspecified, will return timestamp in UTC - timezone = "Canada/Eastern" - ''' + # timezone = "Canada/Eastern" + + ## When set to "disable", timestamp will not incremented if there is a + ## duplicate. + # unique_timestamp = "auto" ` // SampleConfig returns the sample configuration for the plugin @@ -111,6 +137,11 @@ func (l *LogParserPlugin) Description() string { return "Stream and parse log file(s)." } +func (l *LogParserPlugin) Init() error { + l.Log.Warnf(`The logparser plugin is deprecated; please use the 'tail' input with the 'grok' data_format`) + return nil +} + // Gather is the primary function to collect the metrics for the plugin func (l *LogParserPlugin) Gather(acc telegraf.Accumulator) error { l.Lock() @@ -130,50 +161,47 @@ func (l *LogParserPlugin) Start(acc telegraf.Accumulator) error { l.done = make(chan struct{}) l.tailers = make(map[string]*tail.Tail) + mName := "logparser" + if l.GrokConfig.MeasurementName != "" { + mName = l.GrokConfig.MeasurementName + } + // Looks for fields which implement LogParser interface - l.parsers = []LogParser{} - s := reflect.ValueOf(l).Elem() - for i := 0; i < s.NumField(); i++ { - f := s.Field(i) - - if !f.CanInterface() { - continue - } - - if lpPlugin, ok := f.Interface().(LogParser); ok { - if reflect.ValueOf(lpPlugin).IsNil() { - continue - } - l.parsers = append(l.parsers, lpPlugin) - } + config := &parsers.Config{ + MetricName: mName, + GrokPatterns: l.GrokConfig.Patterns, + GrokNamedPatterns: l.GrokConfig.NamedPatterns, + GrokCustomPatterns: l.GrokConfig.CustomPatterns, + GrokCustomPatternFiles: l.GrokConfig.CustomPatternFiles, + GrokTimezone: l.GrokConfig.Timezone, + GrokUniqueTimestamp: l.GrokConfig.UniqueTimestamp, + DataFormat: "grok", } - if len(l.parsers) == 0 { - return fmt.Errorf("logparser input plugin: no parser defined") - } - - // compile log parser patterns: - for _, parser := range l.parsers { - if err := parser.Compile(); err != nil { - return err - } + var err error + l.GrokParser, err = parsers.NewParser(config) + if err != nil { + return err } l.wg.Add(1) go l.parser() - return l.tailNewfiles(l.FromBeginning) + err = l.tailNewfiles(l.FromBeginning) + + // clear offsets + l.offsets = make(map[string]int64) + // assumption that once Start is called, all parallel plugins have already been initialized + offsetsMutex.Lock() + offsets = make(map[string]int64) + offsetsMutex.Unlock() + + return err } // check the globs against files on disk, and start tailing any new files. // Assumes l's lock is held! func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { - var seek tail.SeekInfo - if !fromBeginning { - seek.Whence = 2 - seek.Offset = 0 - } - var poll bool if l.WatchMethod == "poll" { poll = true @@ -183,35 +211,49 @@ func (l *LogParserPlugin) tailNewfiles(fromBeginning bool) error { for _, filepath := range l.Files { g, err := globpath.Compile(filepath) if err != nil { - log.Printf("E! Error Glob %s failed to compile, %s", filepath, err) + l.Log.Errorf("Glob %q failed to compile: %s", filepath, err) continue } files := g.Match() - for file := range files { + for _, file := range files { if _, ok := l.tailers[file]; ok { // we're already tailing this file continue } + var seek *tail.SeekInfo + if !fromBeginning { + if offset, ok := l.offsets[file]; ok { + l.Log.Debugf("Using offset %d for file: %v", offset, file) + seek = &tail.SeekInfo{ + Whence: 0, + Offset: offset, + } + } else { + seek = &tail.SeekInfo{ + Whence: 2, + Offset: 0, + } + } + } + tailer, err := tail.TailFile(file, tail.Config{ ReOpen: true, Follow: true, - Location: &seek, + Location: seek, MustExist: true, Poll: poll, Logger: tail.DiscardingLogger, }) - - //add message saying a new tailer was added for the file - log.Printf("D! tail added for file: %v", file) - if err != nil { l.acc.AddError(err) continue } + l.Log.Debugf("Tail added for file: %v", file) + // create a goroutine for each "tailer" l.wg.Add(1) go l.receiver(tailer) @@ -231,7 +273,7 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { for line = range tailer.Lines { if line.Err != nil { - log.Printf("E! Error tailing file %s, Error: %s\n", + l.Log.Errorf("Error tailing file %s, Error: %s", tailer.Filename, line.Err) continue } @@ -251,8 +293,8 @@ func (l *LogParserPlugin) receiver(tailer *tail.Tail) { } } -// parser is launched as a goroutine to watch the l.lines channel. -// when a line is available, parser parses it and adds the metric(s) to the +// parse is launched as a goroutine to watch the l.lines channel. +// when a line is available, parse parses it and adds the metric(s) to the // accumulator. func (l *LogParserPlugin) parser() { defer l.wg.Done() @@ -269,18 +311,17 @@ func (l *LogParserPlugin) parser() { continue } } - for _, parser := range l.parsers { - m, err = parser.ParseLine(entry.line) - if err == nil { - if m != nil { - tags := m.Tags() - tags["path"] = entry.path - l.acc.AddFields(m.Name(), m.Fields(), tags, m.Time()) - } - } else { - log.Println("E! Error parsing log line: " + err.Error()) + m, err = l.GrokParser.ParseLine(entry.line) + if err == nil { + if m != nil { + tags := m.Tags() + tags["path"] = entry.path + l.acc.AddFields(m.Name(), m.Fields(), tags, m.Time()) } + } else { + l.Log.Errorf("Error parsing log line: %s", err.Error()) } + } } @@ -290,24 +331,38 @@ func (l *LogParserPlugin) Stop() { defer l.Unlock() for _, t := range l.tailers { + if !l.FromBeginning { + // store offset for resume + offset, err := t.Tell() + if err == nil { + l.offsets[t.Filename] = offset + l.Log.Debugf("Recording offset %d for file: %v", offset, t.Filename) + } else { + l.acc.AddError(fmt.Errorf("error recording offset for file %s", t.Filename)) + } + } err := t.Stop() //message for a stopped tailer - log.Printf("D! tail dropped for file: %v", t.Filename) + l.Log.Debugf("Tail dropped for file: %v", t.Filename) if err != nil { - log.Printf("E! Error stopping tail on file %s\n", t.Filename) + l.Log.Errorf("Error stopping tail on file %s", t.Filename) } - t.Cleanup() } close(l.done) l.wg.Wait() + + // persist offsets + offsetsMutex.Lock() + for k, v := range l.offsets { + offsets[k] = v + } + offsetsMutex.Unlock() } func init() { inputs.Add("logparser", func() telegraf.Input { - return &LogParserPlugin{ - WatchMethod: defaultWatchMethod, - } + return NewLogParser() }) } diff --git a/plugins/inputs/logparser/logparser_test.go b/plugins/inputs/logparser/logparser_test.go index 98567b4c2..142f78d46 100644 --- a/plugins/inputs/logparser/logparser_test.go +++ b/plugins/inputs/logparser/logparser_test.go @@ -6,18 +6,19 @@ import ( "runtime" "strings" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - - "github.com/influxdata/telegraf/plugins/inputs/logparser/grok" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestStartNoParsers(t *testing.T) { logparser := &LogParserPlugin{ + Log: testutil.Logger{}, FromBeginning: true, - Files: []string{"grok/testdata/*.log"}, + Files: []string{"testdata/*.log"}, } acc := testutil.Accumulator{} @@ -26,15 +27,15 @@ func TestStartNoParsers(t *testing.T) { func TestGrokParseLogFilesNonExistPattern(t *testing.T) { thisdir := getCurrentDir() - p := &grok.Parser{ - Patterns: []string{"%{FOOBAR}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, - } logparser := &LogParserPlugin{ + Log: testutil.Logger{}, FromBeginning: true, - Files: []string{thisdir + "grok/testdata/*.log"}, - GrokParser: p, + Files: []string{thisdir + "testdata/*.log"}, + GrokConfig: GrokConfig{ + Patterns: []string{"%{FOOBAR}"}, + CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + }, } acc := testutil.Accumulator{} @@ -44,45 +45,69 @@ func TestGrokParseLogFilesNonExistPattern(t *testing.T) { func TestGrokParseLogFiles(t *testing.T) { thisdir := getCurrentDir() - p := &grok.Parser{ - Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, - } logparser := &LogParserPlugin{ + Log: testutil.Logger{}, + GrokConfig: GrokConfig{ + MeasurementName: "logparser_grok", + Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}", "%{TEST_LOG_C}"}, + CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + }, FromBeginning: true, - Files: []string{thisdir + "grok/testdata/*.log"}, - GrokParser: p, + Files: []string{thisdir + "testdata/*.log"}, } acc := testutil.Accumulator{} - assert.NoError(t, logparser.Start(&acc)) - - acc.Wait(2) + require.NoError(t, logparser.Start(&acc)) + acc.Wait(3) logparser.Stop() - acc.AssertContainsTaggedFields(t, "logparser_grok", - map[string]interface{}{ - "clientip": "192.168.1.1", - "myfloat": float64(1.25), - "response_time": int64(5432), - "myint": int64(101), - }, - map[string]string{ - "response_code": "200", - "path": thisdir + "grok/testdata/test_a.log", - }) + expected := []telegraf.Metric{ + testutil.MustMetric( + "logparser_grok", + map[string]string{ + "response_code": "200", + "path": thisdir + "testdata/test_a.log", + }, + map[string]interface{}{ + "clientip": "192.168.1.1", + "myfloat": float64(1.25), + "response_time": int64(5432), + "myint": int64(101), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "logparser_grok", + map[string]string{ + "path": thisdir + "testdata/test_b.log", + }, + map[string]interface{}{ + "myfloat": 1.25, + "mystring": "mystring", + "nomodifier": "nomodifier", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "logparser_grok", + map[string]string{ + "path": thisdir + "testdata/test_c.log", + "response_code": "200", + }, + map[string]interface{}{ + "clientip": "192.168.1.1", + "myfloat": 1.25, + "myint": 101, + "response_time": 5432, + }, + time.Unix(0, 0), + ), + } - acc.AssertContainsTaggedFields(t, "logparser_grok", - map[string]interface{}{ - "myfloat": 1.25, - "mystring": "mystring", - "nomodifier": "nomodifier", - }, - map[string]string{ - "path": thisdir + "grok/testdata/test_b.log", - }) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), testutil.SortMetrics()) } func TestGrokParseLogFilesAppearLater(t *testing.T) { @@ -91,15 +116,16 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { assert.NoError(t, err) thisdir := getCurrentDir() - p := &grok.Parser{ - Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, - } logparser := &LogParserPlugin{ + Log: testutil.Logger{}, FromBeginning: true, Files: []string{emptydir + "/*.log"}, - GrokParser: p, + GrokConfig: GrokConfig{ + MeasurementName: "logparser_grok", + Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, + CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + }, } acc := testutil.Accumulator{} @@ -107,7 +133,7 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { assert.Equal(t, acc.NFields(), 0) - _ = os.Symlink(thisdir+"grok/testdata/test_a.log", emptydir+"/test_a.log") + _ = os.Symlink(thisdir+"testdata/test_a.log", emptydir+"/test_a.log") assert.NoError(t, acc.GatherError(logparser.Gather)) acc.Wait(1) @@ -130,16 +156,16 @@ func TestGrokParseLogFilesAppearLater(t *testing.T) { // pattern available for test_b.log func TestGrokParseLogFilesOneBad(t *testing.T) { thisdir := getCurrentDir() - p := &grok.Parser{ - Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_BAD}"}, - CustomPatternFiles: []string{thisdir + "grok/testdata/test-patterns"}, - } - assert.NoError(t, p.Compile()) logparser := &LogParserPlugin{ + Log: testutil.Logger{}, FromBeginning: true, - Files: []string{thisdir + "grok/testdata/test_a.log"}, - GrokParser: p, + Files: []string{thisdir + "testdata/test_a.log"}, + GrokConfig: GrokConfig{ + MeasurementName: "logparser_grok", + Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_BAD}"}, + CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + }, } acc := testutil.Accumulator{} @@ -158,7 +184,41 @@ func TestGrokParseLogFilesOneBad(t *testing.T) { }, map[string]string{ "response_code": "200", - "path": thisdir + "grok/testdata/test_a.log", + "path": thisdir + "testdata/test_a.log", + }) +} + +func TestGrokParseLogFiles_TimestampInEpochMilli(t *testing.T) { + thisdir := getCurrentDir() + + logparser := &LogParserPlugin{ + Log: testutil.Logger{}, + GrokConfig: GrokConfig{ + MeasurementName: "logparser_grok", + Patterns: []string{"%{TEST_LOG_C}"}, + CustomPatternFiles: []string{thisdir + "testdata/test-patterns"}, + }, + FromBeginning: true, + Files: []string{thisdir + "testdata/test_c.log"}, + } + + acc := testutil.Accumulator{} + acc.SetDebug(true) + assert.NoError(t, logparser.Start(&acc)) + acc.Wait(1) + + logparser.Stop() + + acc.AssertContainsTaggedFields(t, "logparser_grok", + map[string]interface{}{ + "clientip": "192.168.1.1", + "myfloat": float64(1.25), + "response_time": int64(5432), + "myint": int64(101), + }, + map[string]string{ + "response_code": "200", + "path": thisdir + "testdata/test_c.log", }) } diff --git a/plugins/inputs/logparser/testdata/test-patterns b/plugins/inputs/logparser/testdata/test-patterns new file mode 100644 index 000000000..45970a9c8 --- /dev/null +++ b/plugins/inputs/logparser/testdata/test-patterns @@ -0,0 +1,18 @@ +# Test A log line: +# [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101 +DURATION %{NUMBER}[nuµm]?s +RESPONSE_CODE %{NUMBER:response_code:tag} +RESPONSE_TIME %{DURATION:response_time:duration} +TEST_LOG_A \[%{HTTPDATE:timestamp:ts-httpd}\] %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} %{NUMBER:myint:int} + +# Test B log line: +# [04/06/2016--12:41:45] 1.25 mystring dropme nomodifier +TEST_TIMESTAMP %{MONTHDAY}/%{MONTHNUM}/%{YEAR}--%{TIME} +TEST_LOG_B \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:myfloat:float} %{WORD:mystring:string} %{WORD:dropme:drop} %{WORD:nomodifier} + +TEST_TIMESTAMP %{MONTHDAY}/%{MONTHNUM}/%{YEAR}--%{TIME} +TEST_LOG_BAD \[%{TEST_TIMESTAMP:timestamp:ts-"02/01/2006--15:04:05"}\] %{NUMBER:myfloat:float} %{WORD:mystring:int} %{WORD:dropme:drop} %{WORD:nomodifier} + +# Test C log line: +# 1568723594631 1.25 200 192.168.1.1 5.432µs 101 +TEST_LOG_C %{POSINT:timestamp:ts-epochmilli} %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} %{NUMBER:myint:int} diff --git a/plugins/inputs/logparser/grok/testdata/test_a.log b/plugins/inputs/logparser/testdata/test_a.log similarity index 100% rename from plugins/inputs/logparser/grok/testdata/test_a.log rename to plugins/inputs/logparser/testdata/test_a.log diff --git a/plugins/inputs/logparser/grok/testdata/test_b.log b/plugins/inputs/logparser/testdata/test_b.log similarity index 100% rename from plugins/inputs/logparser/grok/testdata/test_b.log rename to plugins/inputs/logparser/testdata/test_b.log diff --git a/plugins/inputs/logparser/testdata/test_c.log b/plugins/inputs/logparser/testdata/test_c.log new file mode 100644 index 000000000..f814c0c30 --- /dev/null +++ b/plugins/inputs/logparser/testdata/test_c.log @@ -0,0 +1 @@ +1568723594631 1.25 200 192.168.1.1 5.432µs 101 diff --git a/plugins/inputs/logstash/README.md b/plugins/inputs/logstash/README.md new file mode 100644 index 000000000..9571de5fd --- /dev/null +++ b/plugins/inputs/logstash/README.md @@ -0,0 +1,154 @@ +# Logstash Input Plugin + +This plugin reads metrics exposed by +[Logstash Monitoring API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html). + +Logstash 5 and later is supported. + +### Configuration + +```toml +[[inputs.logstash]] + ## The URL of the exposed Logstash API endpoint. + url = "http://127.0.0.1:9600" + + ## Use Logstash 5 single pipeline API, set to true when monitoring + ## Logstash 5. + # single_pipeline = false + + ## Enable optional collection components. Can contain + ## "pipelines", "process", and "jvm". + # collect = ["pipelines", "process", "jvm"] + + ## Timeout for HTTP requests. + # timeout = "5s" + + ## Optional HTTP Basic Auth credentials. + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Use TLS but skip chain & host verification. + # insecure_skip_verify = false + + ## Optional HTTP headers. + # [inputs.logstash.headers] + # "X-Special-Header" = "Special-Value" +``` + +### Metrics + +- logstash_jvm + - tags: + - node_id + - node_name + - node_host + - node_version + - fields: + - threads_peak_count + - mem_pools_survivor_peak_max_in_bytes + - mem_pools_survivor_max_in_bytes + - mem_pools_old_peak_used_in_bytes + - mem_pools_young_used_in_bytes + - mem_non_heap_committed_in_bytes + - threads_count + - mem_pools_old_committed_in_bytes + - mem_pools_young_peak_max_in_bytes + - mem_heap_used_percent + - gc_collectors_young_collection_time_in_millis + - mem_pools_survivor_peak_used_in_bytes + - mem_pools_young_committed_in_bytes + - gc_collectors_old_collection_time_in_millis + - gc_collectors_old_collection_count + - mem_pools_survivor_used_in_bytes + - mem_pools_old_used_in_bytes + - mem_pools_young_max_in_bytes + - mem_heap_max_in_bytes + - mem_non_heap_used_in_bytes + - mem_pools_survivor_committed_in_bytes + - mem_pools_old_max_in_bytes + - mem_heap_committed_in_bytes + - mem_pools_old_peak_max_in_bytes + - mem_pools_young_peak_used_in_bytes + - mem_heap_used_in_bytes + - gc_collectors_young_collection_count + - uptime_in_millis + ++ logstash_process + - tags: + - node_id + - node_name + - source + - node_version + - fields: + - open_file_descriptors + - cpu_load_average_1m + - cpu_load_average_5m + - cpu_load_average_15m + - cpu_total_in_millis + - cpu_percent + - peak_open_file_descriptors + - max_file_descriptors + - mem_total_virtual_in_bytes + - mem_total_virtual_in_bytes + +- logstash_events + - tags: + - node_id + - node_name + - source + - node_version + - pipeline (for Logstash 6+) + - fields: + - queue_push_duration_in_millis + - duration_in_millis + - in + - filtered + - out + ++ logstash_plugins + - tags: + - node_id + - node_name + - source + - node_version + - pipeline (for Logstash 6+) + - plugin_id + - plugin_name + - plugin_type + - fields: + - queue_push_duration_in_millis (for input plugins only) + - duration_in_millis + - in + - out + +- logstash_queue + - tags: + - node_id + - node_name + - source + - node_version + - pipeline (for Logstash 6+) + - queue_type + - fields: + - events + - free_space_in_bytes + - max_queue_size_in_bytes + - max_unread_events + - page_capacity_in_bytes + - queue_size_in_bytes + +### Example Output + +``` +logstash_jvm,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt gc_collectors_old_collection_count=2,gc_collectors_old_collection_time_in_millis=100,gc_collectors_young_collection_count=26,gc_collectors_young_collection_time_in_millis=1028,mem_heap_committed_in_bytes=1056309248,mem_heap_max_in_bytes=1056309248,mem_heap_used_in_bytes=207216328,mem_heap_used_percent=19,mem_non_heap_committed_in_bytes=160878592,mem_non_heap_used_in_bytes=140838184,mem_pools_old_committed_in_bytes=899284992,mem_pools_old_max_in_bytes=899284992,mem_pools_old_peak_max_in_bytes=899284992,mem_pools_old_peak_used_in_bytes=189468088,mem_pools_old_used_in_bytes=189468088,mem_pools_survivor_committed_in_bytes=17432576,mem_pools_survivor_max_in_bytes=17432576,mem_pools_survivor_peak_max_in_bytes=17432576,mem_pools_survivor_peak_used_in_bytes=17432576,mem_pools_survivor_used_in_bytes=12572640,mem_pools_young_committed_in_bytes=139591680,mem_pools_young_max_in_bytes=139591680,mem_pools_young_peak_max_in_bytes=139591680,mem_pools_young_peak_used_in_bytes=139591680,mem_pools_young_used_in_bytes=5175600,threads_count=20,threads_peak_count=24,uptime_in_millis=739089 1566425244000000000 +logstash_process,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,source=debian-stretch-logstash6.virt cpu_load_average_15m=0.03,cpu_load_average_1m=0.01,cpu_load_average_5m=0.04,cpu_percent=0,cpu_total_in_millis=83230,max_file_descriptors=16384,mem_total_virtual_in_bytes=3689132032,open_file_descriptors=118,peak_open_file_descriptors=118 1566425244000000000 +logstash_events,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,source=debian-stretch-logstash6.virt duration_in_millis=0,filtered=0,in=0,out=0,queue_push_duration_in_millis=0 1566425244000000000 +logstash_plugins,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,plugin_id=2807cb8610ba7854efa9159814fcf44c3dda762b43bd088403b30d42c88e69ab,plugin_name=beats,plugin_type=input,source=debian-stretch-logstash6.virt out=0,queue_push_duration_in_millis=0 1566425244000000000 +logstash_plugins,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,plugin_id=7a6c973366186a695727c73935634a00bccd52fceedf30d0746983fce572d50c,plugin_name=file,plugin_type=output,source=debian-stretch-logstash6.virt duration_in_millis=0,in=0,out=0 1566425244000000000 +logstash_queue,node_id=3da53ed0-a946-4a33-9cdb-33013f2273f6,node_name=debian-stretch-logstash6.virt,node_version=6.8.1,pipeline=main,queue_type=memory,source=debian-stretch-logstash6.virt events=0 1566425244000000000 +``` diff --git a/plugins/inputs/logstash/logstash.go b/plugins/inputs/logstash/logstash.go new file mode 100644 index 000000000..1abcfa3a3 --- /dev/null +++ b/plugins/inputs/logstash/logstash.go @@ -0,0 +1,482 @@ +package logstash + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/choice" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + jsonParser "github.com/influxdata/telegraf/plugins/parsers/json" +) + +const sampleConfig = ` + ## The URL of the exposed Logstash API endpoint. + url = "http://127.0.0.1:9600" + + ## Use Logstash 5 single pipeline API, set to true when monitoring + ## Logstash 5. + # single_pipeline = false + + ## Enable optional collection components. Can contain + ## "pipelines", "process", and "jvm". + # collect = ["pipelines", "process", "jvm"] + + ## Timeout for HTTP requests. + # timeout = "5s" + + ## Optional HTTP Basic Auth credentials. + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## Use TLS but skip chain & host verification. + # insecure_skip_verify = false + + ## Optional HTTP headers. + # [inputs.logstash.headers] + # "X-Special-Header" = "Special-Value" +` + +type Logstash struct { + URL string `toml:"url"` + + SinglePipeline bool `toml:"single_pipeline"` + Collect []string `toml:"collect"` + + Username string `toml:"username"` + Password string `toml:"password"` + Headers map[string]string `toml:"headers"` + Timeout internal.Duration `toml:"timeout"` + tls.ClientConfig + + client *http.Client +} + +// NewLogstash create an instance of the plugin with default settings +func NewLogstash() *Logstash { + return &Logstash{ + URL: "http://127.0.0.1:9600", + SinglePipeline: false, + Collect: []string{"pipelines", "process", "jvm"}, + Headers: make(map[string]string), + Timeout: internal.Duration{Duration: time.Second * 5}, + } +} + +// Description returns short info about plugin +func (logstash *Logstash) Description() string { + return "Read metrics exposed by Logstash" +} + +// SampleConfig returns details how to configure plugin +func (logstash *Logstash) SampleConfig() string { + return sampleConfig +} + +type ProcessStats struct { + ID string `json:"id"` + Process interface{} `json:"process"` + Name string `json:"name"` + Host string `json:"host"` + Version string `json:"version"` +} + +type JVMStats struct { + ID string `json:"id"` + JVM interface{} `json:"jvm"` + Name string `json:"name"` + Host string `json:"host"` + Version string `json:"version"` +} + +type PipelinesStats struct { + ID string `json:"id"` + Pipelines map[string]Pipeline `json:"pipelines"` + Name string `json:"name"` + Host string `json:"host"` + Version string `json:"version"` +} + +type PipelineStats struct { + ID string `json:"id"` + Pipeline Pipeline `json:"pipeline"` + Name string `json:"name"` + Host string `json:"host"` + Version string `json:"version"` +} + +type Pipeline struct { + Events interface{} `json:"events"` + Plugins PipelinePlugins `json:"plugins"` + Reloads interface{} `json:"reloads"` + Queue PipelineQueue `json:"queue"` +} + +type Plugin struct { + ID string `json:"id"` + Events interface{} `json:"events"` + Name string `json:"name"` +} + +type PipelinePlugins struct { + Inputs []Plugin `json:"inputs"` + Filters []Plugin `json:"filters"` + Outputs []Plugin `json:"outputs"` +} + +type PipelineQueue struct { + Events float64 `json:"events"` + Type string `json:"type"` + Capacity interface{} `json:"capacity"` + Data interface{} `json:"data"` +} + +const jvmStats = "/_node/stats/jvm" +const processStats = "/_node/stats/process" +const pipelinesStats = "/_node/stats/pipelines" +const pipelineStats = "/_node/stats/pipeline" + +func (i *Logstash) Init() error { + err := choice.CheckSlice(i.Collect, []string{"pipelines", "process", "jvm"}) + if err != nil { + return fmt.Errorf(`cannot verify "collect" setting: %v`, err) + } + return nil +} + +// createHttpClient create a clients to access API +func (logstash *Logstash) createHttpClient() (*http.Client, error) { + tlsConfig, err := logstash.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: logstash.Timeout.Duration, + } + + return client, nil +} + +// gatherJsonData query the data source and parse the response JSON +func (logstash *Logstash) gatherJsonData(url string, value interface{}) error { + request, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + if (logstash.Username != "") || (logstash.Password != "") { + request.SetBasicAuth(logstash.Username, logstash.Password) + } + + for header, value := range logstash.Headers { + if strings.ToLower(header) == "host" { + request.Host = value + } else { + request.Header.Add(header, value) + } + } + + response, err := logstash.client.Do(request) + if err != nil { + return err + } + + defer response.Body.Close() + if response.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + } + + err = json.NewDecoder(response.Body).Decode(value) + if err != nil { + return err + } + + return nil +} + +// gatherJVMStats gather the JVM metrics and add results to the accumulator +func (logstash *Logstash) gatherJVMStats(url string, accumulator telegraf.Accumulator) error { + jvmStats := &JVMStats{} + + err := logstash.gatherJsonData(url, jvmStats) + if err != nil { + return err + } + + tags := map[string]string{ + "node_id": jvmStats.ID, + "node_name": jvmStats.Name, + "node_version": jvmStats.Version, + "source": jvmStats.Host, + } + + flattener := jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", jvmStats.JVM) + if err != nil { + return err + } + accumulator.AddFields("logstash_jvm", flattener.Fields, tags) + + return nil +} + +// gatherJVMStats gather the Process metrics and add results to the accumulator +func (logstash *Logstash) gatherProcessStats(url string, accumulator telegraf.Accumulator) error { + processStats := &ProcessStats{} + + err := logstash.gatherJsonData(url, processStats) + if err != nil { + return err + } + + tags := map[string]string{ + "node_id": processStats.ID, + "node_name": processStats.Name, + "node_version": processStats.Version, + "source": processStats.Host, + } + + flattener := jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", processStats.Process) + if err != nil { + return err + } + accumulator.AddFields("logstash_process", flattener.Fields, tags) + + return nil +} + +// gatherPluginsStats go through a list of plugins and add their metrics to the accumulator +func (logstash *Logstash) gatherPluginsStats( + plugins []Plugin, + pluginType string, + tags map[string]string, + accumulator telegraf.Accumulator) error { + + for _, plugin := range plugins { + pluginTags := map[string]string{ + "plugin_name": plugin.Name, + "plugin_id": plugin.ID, + "plugin_type": pluginType, + } + for tag, value := range tags { + pluginTags[tag] = value + } + flattener := jsonParser.JSONFlattener{} + err := flattener.FlattenJSON("", plugin.Events) + if err != nil { + return err + } + accumulator.AddFields("logstash_plugins", flattener.Fields, pluginTags) + } + + return nil +} + +func (logstash *Logstash) gatherQueueStats( + queue *PipelineQueue, + tags map[string]string, + accumulator telegraf.Accumulator) error { + + var err error + queueTags := map[string]string{ + "queue_type": queue.Type, + } + for tag, value := range tags { + queueTags[tag] = value + } + + queueFields := map[string]interface{}{ + "events": queue.Events, + } + + if queue.Type != "memory" { + flattener := jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", queue.Capacity) + if err != nil { + return err + } + err = flattener.FlattenJSON("", queue.Data) + if err != nil { + return err + } + for field, value := range flattener.Fields { + queueFields[field] = value + } + } + + accumulator.AddFields("logstash_queue", queueFields, queueTags) + + return nil +} + +// gatherJVMStats gather the Pipeline metrics and add results to the accumulator (for Logstash < 6) +func (logstash *Logstash) gatherPipelineStats(url string, accumulator telegraf.Accumulator) error { + pipelineStats := &PipelineStats{} + + err := logstash.gatherJsonData(url, pipelineStats) + if err != nil { + return err + } + + tags := map[string]string{ + "node_id": pipelineStats.ID, + "node_name": pipelineStats.Name, + "node_version": pipelineStats.Version, + "source": pipelineStats.Host, + } + + flattener := jsonParser.JSONFlattener{} + err = flattener.FlattenJSON("", pipelineStats.Pipeline.Events) + if err != nil { + return err + } + accumulator.AddFields("logstash_events", flattener.Fields, tags) + + err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Inputs, "input", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Filters, "filter", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipelineStats.Pipeline.Plugins.Outputs, "output", tags, accumulator) + if err != nil { + return err + } + + err = logstash.gatherQueueStats(&pipelineStats.Pipeline.Queue, tags, accumulator) + if err != nil { + return err + } + + return nil +} + +// gatherJVMStats gather the Pipelines metrics and add results to the accumulator (for Logstash >= 6) +func (logstash *Logstash) gatherPipelinesStats(url string, accumulator telegraf.Accumulator) error { + pipelinesStats := &PipelinesStats{} + + err := logstash.gatherJsonData(url, pipelinesStats) + if err != nil { + return err + } + + for pipelineName, pipeline := range pipelinesStats.Pipelines { + tags := map[string]string{ + "node_id": pipelinesStats.ID, + "node_name": pipelinesStats.Name, + "node_version": pipelinesStats.Version, + "pipeline": pipelineName, + "source": pipelinesStats.Host, + } + + flattener := jsonParser.JSONFlattener{} + err := flattener.FlattenJSON("", pipeline.Events) + if err != nil { + return err + } + accumulator.AddFields("logstash_events", flattener.Fields, tags) + + err = logstash.gatherPluginsStats(pipeline.Plugins.Inputs, "input", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipeline.Plugins.Filters, "filter", tags, accumulator) + if err != nil { + return err + } + err = logstash.gatherPluginsStats(pipeline.Plugins.Outputs, "output", tags, accumulator) + if err != nil { + return err + } + + err = logstash.gatherQueueStats(&pipeline.Queue, tags, accumulator) + if err != nil { + return err + } + } + + return nil +} + +// Gather ask this plugin to start gathering metrics +func (logstash *Logstash) Gather(accumulator telegraf.Accumulator) error { + if logstash.client == nil { + client, err := logstash.createHttpClient() + + if err != nil { + return err + } + logstash.client = client + } + + if choice.Contains("jvm", logstash.Collect) { + jvmUrl, err := url.Parse(logstash.URL + jvmStats) + if err != nil { + return err + } + if err := logstash.gatherJVMStats(jvmUrl.String(), accumulator); err != nil { + return err + } + } + + if choice.Contains("process", logstash.Collect) { + processUrl, err := url.Parse(logstash.URL + processStats) + if err != nil { + return err + } + if err := logstash.gatherProcessStats(processUrl.String(), accumulator); err != nil { + return err + } + } + + if choice.Contains("pipelines", logstash.Collect) { + if logstash.SinglePipeline { + pipelineUrl, err := url.Parse(logstash.URL + pipelineStats) + if err != nil { + return err + } + if err := logstash.gatherPipelineStats(pipelineUrl.String(), accumulator); err != nil { + return err + } + } else { + pipelinesUrl, err := url.Parse(logstash.URL + pipelinesStats) + if err != nil { + return err + } + if err := logstash.gatherPipelinesStats(pipelinesUrl.String(), accumulator); err != nil { + return err + } + } + } + + return nil +} + +// init registers this plugin instance +func init() { + inputs.Add("logstash", func() telegraf.Input { + return NewLogstash() + }) +} diff --git a/plugins/inputs/logstash/logstash_test.go b/plugins/inputs/logstash/logstash_test.go new file mode 100644 index 000000000..aeb4e46f8 --- /dev/null +++ b/plugins/inputs/logstash/logstash_test.go @@ -0,0 +1,691 @@ +package logstash + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" +) + +var logstashTest = NewLogstash() + +var ( + logstash5accPipelineStats testutil.Accumulator + logstash6accPipelinesStats testutil.Accumulator + logstash5accProcessStats testutil.Accumulator + logstash6accProcessStats testutil.Accumulator + logstash5accJVMStats testutil.Accumulator + logstash6accJVMStats testutil.Accumulator +) + +func Test_Logstash5GatherProcessStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash5ProcessJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash5accProcessStats); err != nil { + test.Logf("Can't gather Process stats") + } + + logstash5accProcessStats.AssertContainsTaggedFields( + test, + "logstash_process", + map[string]interface{}{ + "open_file_descriptors": float64(89.0), + "max_file_descriptors": float64(1.048576e+06), + "cpu_percent": float64(3.0), + "cpu_load_average_5m": float64(0.61), + "cpu_load_average_15m": float64(0.54), + "mem_total_virtual_in_bytes": float64(4.809506816e+09), + "cpu_total_in_millis": float64(1.5526e+11), + "cpu_load_average_1m": float64(0.49), + "peak_open_file_descriptors": float64(100.0), + }, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "source": string("node-5"), + "node_version": string("5.3.0"), + }, + ) +} + +func Test_Logstash6GatherProcessStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash6ProcessJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherProcessStats(logstashTest.URL+processStats, &logstash6accProcessStats); err != nil { + test.Logf("Can't gather Process stats") + } + + logstash6accProcessStats.AssertContainsTaggedFields( + test, + "logstash_process", + map[string]interface{}{ + "open_file_descriptors": float64(133.0), + "max_file_descriptors": float64(262144.0), + "cpu_percent": float64(0.0), + "cpu_load_average_5m": float64(42.4), + "cpu_load_average_15m": float64(38.95), + "mem_total_virtual_in_bytes": float64(17923452928.0), + "cpu_total_in_millis": float64(5841460), + "cpu_load_average_1m": float64(48.2), + "peak_open_file_descriptors": float64(145.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + }, + ) +} + +func Test_Logstash5GatherPipelineStats(test *testing.T) { + //logstash5accPipelineStats.SetDebug(true) + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash5PipelineJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherPipelineStats(logstashTest.URL+pipelineStats, &logstash5accPipelineStats); err != nil { + test.Logf("Can't gather Pipeline stats") + } + + logstash5accPipelineStats.AssertContainsTaggedFields( + test, + "logstash_events", + map[string]interface{}{ + "duration_in_millis": float64(1151.0), + "in": float64(1269.0), + "filtered": float64(1269.0), + "out": float64(1269.0), + }, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "source": string("node-5"), + "node_version": string("5.3.0"), + }, + ) + + fields := make(map[string]interface{}) + fields["queue_push_duration_in_millis"] = float64(32.0) + fields["out"] = float64(2.0) + + logstash5accPipelineStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + fields, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "source": string("node-5"), + "node_version": string("5.3.0"), + "plugin_name": string("beats"), + "plugin_id": string("a35197a509596954e905e38521bae12b1498b17d-1"), + "plugin_type": string("input"), + }, + ) + + logstash5accPipelineStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(360.0), + "in": float64(1269.0), + "out": float64(1269.0), + }, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "source": string("node-5"), + "node_version": string("5.3.0"), + "plugin_name": string("stdout"), + "plugin_id": string("582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-2"), + "plugin_type": string("output"), + }, + ) + + logstash5accPipelineStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(228.0), + "in": float64(1269.0), + "out": float64(1269.0), + }, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "source": string("node-5"), + "node_version": string("5.3.0"), + "plugin_name": string("s3"), + "plugin_id": string("582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-3"), + "plugin_type": string("output"), + }, + ) +} + +func Test_Logstash6GatherPipelinesStats(test *testing.T) { + //logstash6accPipelinesStats.SetDebug(true) + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash6PipelinesJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherPipelinesStats(logstashTest.URL+pipelineStats, &logstash6accPipelinesStats); err != nil { + test.Logf("Can't gather Pipeline stats") + } + + fields := make(map[string]interface{}) + fields["duration_in_millis"] = float64(8540751.0) + fields["queue_push_duration_in_millis"] = float64(366.0) + fields["in"] = float64(180659.0) + fields["filtered"] = float64(180659.0) + fields["out"] = float64(180659.0) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_events", + fields, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + }, + ) + + fields = make(map[string]interface{}) + fields["queue_push_duration_in_millis"] = float64(366.0) + fields["out"] = float64(180659.0) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + fields, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("kafka"), + "plugin_id": string("input-kafka"), + "plugin_type": string("input"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2117.0), + "in": float64(27641.0), + "out": float64(27641.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("155b0ad18abbf3df1e0cb7bddef0d77c5ba699efe5a0f8a28502d140549baf54"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2117.0), + "in": float64(27641.0), + "out": float64(27641.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("155b0ad18abbf3df1e0cb7bddef0d77c5ba699efe5a0f8a28502d140549baf54"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(13149.0), + "in": float64(180659.0), + "out": float64(177549.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("date"), + "plugin_id": string("d079424bb6b7b8c7c61d9c5e0ddae445e92fa9ffa2e8690b0a669f7c690542f0"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(2814.0), + "in": float64(76602.0), + "out": float64(76602.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("25afa60ab6dc30512fe80efa3493e4928b5b1b109765b7dc46a3e4bbf293d2d4"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(9.0), + "in": float64(934.0), + "out": float64(934.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("2d9fa8f74eeb137bfa703b8050bad7d76636fface729e4585b789b5fc9bed668"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(173.0), + "in": float64(3110.0), + "out": float64(0.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("drop"), + "plugin_id": string("4ed14c9ef0198afe16c31200041e98d321cb5c2e6027e30b077636b8c4842110"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(5605.0), + "in": float64(75482.0), + "out": float64(75482.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("358ce1eb387de7cd5711c2fb4de64cd3b12e5ca9a4c45f529516bcb053a31df4"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(313992.0), + "in": float64(180659.0), + "out": float64(180659.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("csv"), + "plugin_id": string("82a9bbb02fff37a63c257c1f146b0a36273c7cbbebe83c0a51f086e5280bf7bb"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(0.0), + "in": float64(0.0), + "out": float64(0.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("mutate"), + "plugin_id": string("8fb13a8cdd4257b52724d326aa1549603ffdd4e4fde6d20720c96b16238c18c3"), + "plugin_type": string("filter"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(651386.0), + "in": float64(177549.0), + "out": float64(177549.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("elasticsearch"), + "plugin_id": string("output-elk"), + "plugin_type": string("output"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(186751.0), + "in": float64(177549.0), + "out": float64(177549.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("kafka"), + "plugin_id": string("output-kafka1"), + "plugin_type": string("output"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_plugins", + map[string]interface{}{ + "duration_in_millis": float64(7335196.0), + "in": float64(177549.0), + "out": float64(177549.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "plugin_name": string("kafka"), + "plugin_id": string("output-kafka2"), + "plugin_type": string("output"), + }, + ) + + logstash6accPipelinesStats.AssertContainsTaggedFields( + test, + "logstash_queue", + map[string]interface{}{ + "events": float64(103), + "free_space_in_bytes": float64(36307369984), + "max_queue_size_in_bytes": float64(1073741824), + "max_unread_events": float64(0), + "page_capacity_in_bytes": float64(67108864), + "queue_size_in_bytes": float64(1872391), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + "pipeline": string("main"), + "queue_type": string("persisted"), + }, + ) + +} + +func Test_Logstash5GatherJVMStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash5JvmJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash5accJVMStats); err != nil { + test.Logf("Can't gather JVM stats") + } + + logstash5accJVMStats.AssertContainsTaggedFields( + test, + "logstash_jvm", + map[string]interface{}{ + "mem_pools_young_max_in_bytes": float64(5.5836672e+08), + "mem_pools_young_committed_in_bytes": float64(1.43261696e+08), + "mem_heap_committed_in_bytes": float64(5.1904512e+08), + "threads_count": float64(29.0), + "mem_pools_old_peak_used_in_bytes": float64(1.27900864e+08), + "mem_pools_old_peak_max_in_bytes": float64(7.2482816e+08), + "mem_heap_used_percent": float64(16.0), + "gc_collectors_young_collection_time_in_millis": float64(3235.0), + "mem_pools_survivor_committed_in_bytes": float64(1.7825792e+07), + "mem_pools_young_used_in_bytes": float64(7.6049384e+07), + "mem_non_heap_committed_in_bytes": float64(2.91487744e+08), + "mem_pools_survivor_peak_max_in_bytes": float64(3.4865152e+07), + "mem_pools_young_peak_max_in_bytes": float64(2.7918336e+08), + "uptime_in_millis": float64(4.803461e+06), + "mem_pools_survivor_peak_used_in_bytes": float64(8.912896e+06), + "mem_pools_survivor_max_in_bytes": float64(6.9730304e+07), + "gc_collectors_old_collection_count": float64(2.0), + "mem_pools_survivor_used_in_bytes": float64(9.419672e+06), + "mem_pools_old_used_in_bytes": float64(2.55801728e+08), + "mem_pools_old_max_in_bytes": float64(1.44965632e+09), + "mem_pools_young_peak_used_in_bytes": float64(7.1630848e+07), + "mem_heap_used_in_bytes": float64(3.41270784e+08), + "mem_heap_max_in_bytes": float64(2.077753344e+09), + "gc_collectors_young_collection_count": float64(616.0), + "threads_peak_count": float64(31.0), + "mem_pools_old_committed_in_bytes": float64(3.57957632e+08), + "gc_collectors_old_collection_time_in_millis": float64(114.0), + "mem_non_heap_used_in_bytes": float64(2.68905936e+08), + }, + map[string]string{ + "node_id": string("a360d8cf-6289-429d-8419-6145e324b574"), + "node_name": string("node-5-test"), + "source": string("node-5"), + "node_version": string("5.3.0"), + }, + ) + +} + +func Test_Logstash6GatherJVMStats(test *testing.T) { + fakeServer := httptest.NewUnstartedServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + writer.Header().Set("Content-Type", "application/json") + fmt.Fprintf(writer, "%s", string(logstash6JvmJSON)) + })) + requestURL, err := url.Parse(logstashTest.URL) + if err != nil { + test.Logf("Can't connect to: %s", logstashTest.URL) + } + fakeServer.Listener, _ = net.Listen("tcp", fmt.Sprintf("%s:%s", requestURL.Hostname(), requestURL.Port())) + fakeServer.Start() + defer fakeServer.Close() + + if logstashTest.client == nil { + client, err := logstashTest.createHttpClient() + + if err != nil { + test.Logf("Can't createHttpClient") + } + logstashTest.client = client + } + + if err := logstashTest.gatherJVMStats(logstashTest.URL+jvmStats, &logstash6accJVMStats); err != nil { + test.Logf("Can't gather JVM stats") + } + + logstash6accJVMStats.AssertContainsTaggedFields( + test, + "logstash_jvm", + map[string]interface{}{ + "mem_pools_young_max_in_bytes": float64(1605304320.0), + "mem_pools_young_committed_in_bytes": float64(71630848.0), + "mem_heap_committed_in_bytes": float64(824963072.0), + "threads_count": float64(60.0), + "mem_pools_old_peak_used_in_bytes": float64(696572600.0), + "mem_pools_old_peak_max_in_bytes": float64(6583418880.0), + "mem_heap_used_percent": float64(2.0), + "gc_collectors_young_collection_time_in_millis": float64(107321.0), + "mem_pools_survivor_committed_in_bytes": float64(8912896.0), + "mem_pools_young_used_in_bytes": float64(11775120.0), + "mem_non_heap_committed_in_bytes": float64(222986240.0), + "mem_pools_survivor_peak_max_in_bytes": float64(200605696), + "mem_pools_young_peak_max_in_bytes": float64(1605304320.0), + "uptime_in_millis": float64(281850926.0), + "mem_pools_survivor_peak_used_in_bytes": float64(8912896.0), + "mem_pools_survivor_max_in_bytes": float64(200605696.0), + "gc_collectors_old_collection_count": float64(37.0), + "mem_pools_survivor_used_in_bytes": float64(835008.0), + "mem_pools_old_used_in_bytes": float64(189750576.0), + "mem_pools_old_max_in_bytes": float64(6583418880.0), + "mem_pools_young_peak_used_in_bytes": float64(71630848.0), + "mem_heap_used_in_bytes": float64(202360704.0), + "mem_heap_max_in_bytes": float64(8389328896.0), + "gc_collectors_young_collection_count": float64(2094.0), + "threads_peak_count": float64(62.0), + "mem_pools_old_committed_in_bytes": float64(744419328.0), + "gc_collectors_old_collection_time_in_millis": float64(7492.0), + "mem_non_heap_used_in_bytes": float64(197878896.0), + }, + map[string]string{ + "node_id": string("3044f675-21ce-4335-898a-8408aa678245"), + "node_name": string("node-6-test"), + "source": string("node-6"), + "node_version": string("6.4.2"), + }, + ) + +} diff --git a/plugins/inputs/logstash/samples_logstash5.go b/plugins/inputs/logstash/samples_logstash5.go new file mode 100644 index 000000000..598f6dab5 --- /dev/null +++ b/plugins/inputs/logstash/samples_logstash5.go @@ -0,0 +1,156 @@ +package logstash + +const logstash5ProcessJSON = ` +{ + "host" : "node-5", + "version" : "5.3.0", + "http_address" : "0.0.0.0:9600", + "id" : "a360d8cf-6289-429d-8419-6145e324b574", + "name" : "node-5-test", + "process" : { + "open_file_descriptors" : 89, + "peak_open_file_descriptors" : 100, + "max_file_descriptors" : 1048576, + "mem" : { + "total_virtual_in_bytes" : 4809506816 + }, + "cpu" : { + "total_in_millis" : 155260000000, + "percent" : 3, + "load_average" : { + "1m" : 0.49, + "5m" : 0.61, + "15m" : 0.54 + } + } + } +} +` + +const logstash5JvmJSON = ` +{ + "host" : "node-5", + "version" : "5.3.0", + "http_address" : "0.0.0.0:9600", + "id" : "a360d8cf-6289-429d-8419-6145e324b574", + "name" : "node-5-test", + "jvm" : { + "threads" : { + "count" : 29, + "peak_count" : 31 + }, + "mem" : { + "heap_used_in_bytes" : 341270784, + "heap_used_percent" : 16, + "heap_committed_in_bytes" : 519045120, + "heap_max_in_bytes" : 2077753344, + "non_heap_used_in_bytes" : 268905936, + "non_heap_committed_in_bytes" : 291487744, + "pools" : { + "survivor" : { + "peak_used_in_bytes" : 8912896, + "used_in_bytes" : 9419672, + "peak_max_in_bytes" : 34865152, + "max_in_bytes" : 69730304, + "committed_in_bytes" : 17825792 + }, + "old" : { + "peak_used_in_bytes" : 127900864, + "used_in_bytes" : 255801728, + "peak_max_in_bytes" : 724828160, + "max_in_bytes" : 1449656320, + "committed_in_bytes" : 357957632 + }, + "young" : { + "peak_used_in_bytes" : 71630848, + "used_in_bytes" : 76049384, + "peak_max_in_bytes" : 279183360, + "max_in_bytes" : 558366720, + "committed_in_bytes" : 143261696 + } + } + }, + "gc" : { + "collectors" : { + "old" : { + "collection_time_in_millis" : 114, + "collection_count" : 2 + }, + "young" : { + "collection_time_in_millis" : 3235, + "collection_count" : 616 + } + } + }, + "uptime_in_millis" : 4803461 + } +} +` + +const logstash5PipelineJSON = ` +{ + "host" : "node-5", + "version" : "5.3.0", + "http_address" : "0.0.0.0:9600", + "id" : "a360d8cf-6289-429d-8419-6145e324b574", + "name" : "node-5-test", + "pipeline" : { + "events" : { + "duration_in_millis" : 1151, + "in" : 1269, + "filtered" : 1269, + "out" : 1269 + }, + "plugins" : { + "inputs" : [ { + "id" : "a35197a509596954e905e38521bae12b1498b17d-1", + "events" : { + "out" : 2, + "queue_push_duration_in_millis" : 32 + }, + "name" : "beats" + } ], + "filters" : [ ], + "outputs" : [ { + "id" : "582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-3", + "events" : { + "duration_in_millis" : 228, + "in" : 1269, + "out" : 1269 + }, + "name" : "s3" + }, { + "id" : "582d5c2becb582a053e1e9a6bcc11d49b69a6dfd-2", + "events" : { + "duration_in_millis" : 360, + "in" : 1269, + "out" : 1269 + }, + "name" : "stdout" + } ] + }, + "reloads" : { + "last_error" : null, + "successes" : 0, + "last_success_timestamp" : null, + "last_failure_timestamp" : null, + "failures" : 0 + }, + "queue" : { + "events" : 208, + "type" : "persisted", + "capacity" : { + "page_capacity_in_bytes" : 262144000, + "max_queue_size_in_bytes" : 8589934592, + "max_unread_events" : 0 + }, + "data" : { + "path" : "/path/to/data/queue", + "free_space_in_bytes" : 89280552960, + "storage_type" : "hfs" + } + }, + "id" : "main" + } +} +` diff --git a/plugins/inputs/logstash/samples_logstash6.go b/plugins/inputs/logstash/samples_logstash6.go new file mode 100644 index 000000000..16df2b0fd --- /dev/null +++ b/plugins/inputs/logstash/samples_logstash6.go @@ -0,0 +1,256 @@ +package logstash + +const logstash6ProcessJSON = ` +{ + "host" : "node-6", + "version" : "6.4.2", + "http_address" : "127.0.0.1:9600", + "id" : "3044f675-21ce-4335-898a-8408aa678245", + "name" : "node-6-test", + "process" : { + "open_file_descriptors" : 133, + "peak_open_file_descriptors" : 145, + "max_file_descriptors" : 262144, + "mem" : { + "total_virtual_in_bytes" : 17923452928 + }, + "cpu" : { + "total_in_millis" : 5841460, + "percent" : 0, + "load_average" : { + "1m" : 48.2, + "5m" : 42.4, + "15m" : 38.95 + } + } + } +} +` +const logstash6JvmJSON = ` +{ + "host" : "node-6", + "version" : "6.4.2", + "http_address" : "127.0.0.1:9600", + "id" : "3044f675-21ce-4335-898a-8408aa678245", + "name" : "node-6-test", + "jvm" : { + "threads" : { + "count" : 60, + "peak_count" : 62 + }, + "mem" : { + "heap_used_percent" : 2, + "heap_committed_in_bytes" : 824963072, + "heap_max_in_bytes" : 8389328896, + "heap_used_in_bytes" : 202360704, + "non_heap_used_in_bytes" : 197878896, + "non_heap_committed_in_bytes" : 222986240, + "pools" : { + "survivor" : { + "peak_used_in_bytes" : 8912896, + "used_in_bytes" : 835008, + "peak_max_in_bytes" : 200605696, + "max_in_bytes" : 200605696, + "committed_in_bytes" : 8912896 + }, + "old" : { + "peak_used_in_bytes" : 696572600, + "used_in_bytes" : 189750576, + "peak_max_in_bytes" : 6583418880, + "max_in_bytes" : 6583418880, + "committed_in_bytes" : 744419328 + }, + "young" : { + "peak_used_in_bytes" : 71630848, + "used_in_bytes" : 11775120, + "peak_max_in_bytes" : 1605304320, + "max_in_bytes" : 1605304320, + "committed_in_bytes" : 71630848 + } + } + }, + "gc" : { + "collectors" : { + "old" : { + "collection_time_in_millis" : 7492, + "collection_count" : 37 + }, + "young" : { + "collection_time_in_millis" : 107321, + "collection_count" : 2094 + } + } + }, + "uptime_in_millis" : 281850926 + } +} +` + +const logstash6PipelinesJSON = ` +{ + "host" : "node-6", + "version" : "6.4.2", + "http_address" : "127.0.0.1:9600", + "id" : "3044f675-21ce-4335-898a-8408aa678245", + "name" : "node-6-test", + "pipelines" : { + "main" : { + "events" : { + "duration_in_millis" : 8540751, + "in" : 180659, + "out" : 180659, + "filtered" : 180659, + "queue_push_duration_in_millis" : 366 + }, + "plugins" : { + "inputs" : [ + { + "id" : "input-kafka", + "events" : { + "out" : 180659, + "queue_push_duration_in_millis" : 366 + }, + "name" : "kafka" + } + ], + "filters" : [ + { + "id" : "155b0ad18abbf3df1e0cb7bddef0d77c5ba699efe5a0f8a28502d140549baf54", + "events" : { + "duration_in_millis" : 2117, + "in" : 27641, + "out" : 27641 + }, + "name" : "mutate" + }, + { + "id" : "d079424bb6b7b8c7c61d9c5e0ddae445e92fa9ffa2e8690b0a669f7c690542f0", + "events" : { + "duration_in_millis" : 13149, + "in" : 180659, + "out" : 177549 + }, + "matches" : 177546, + "failures" : 2, + "name" : "date" + }, + { + "id" : "25afa60ab6dc30512fe80efa3493e4928b5b1b109765b7dc46a3e4bbf293d2d4", + "events" : { + "duration_in_millis" : 2814, + "in" : 76602, + "out" : 76602 + }, + "name" : "mutate" + }, + { + "id" : "2d9fa8f74eeb137bfa703b8050bad7d76636fface729e4585b789b5fc9bed668", + "events" : { + "duration_in_millis" : 9, + "in" : 934, + "out" : 934 + }, + "name" : "mutate" + }, + { + "id" : "4ed14c9ef0198afe16c31200041e98d321cb5c2e6027e30b077636b8c4842110", + "events" : { + "duration_in_millis" : 173, + "in" : 3110, + "out" : 0 + }, + "name" : "drop" + }, + { + "id" : "358ce1eb387de7cd5711c2fb4de64cd3b12e5ca9a4c45f529516bcb053a31df4", + "events" : { + "duration_in_millis" : 5605, + "in" : 75482, + "out" : 75482 + }, + "name" : "mutate" + }, + { + "id" : "82a9bbb02fff37a63c257c1f146b0a36273c7cbbebe83c0a51f086e5280bf7bb", + "events" : { + "duration_in_millis" : 313992, + "in" : 180659, + "out" : 180659 + }, + "name" : "csv" + }, + { + "id" : "8fb13a8cdd4257b52724d326aa1549603ffdd4e4fde6d20720c96b16238c18c3", + "events" : { + "duration_in_millis" : 0, + "in" : 0, + "out" : 0 + }, + "name" : "mutate" + } + ], + "outputs" : [ + { + "id" : "output-elk", + "documents" : { + "successes" : 221 + }, + "events" : { + "duration_in_millis" : 651386, + "in" : 177549, + "out" : 177549 + }, + "bulk_requests" : { + "successes" : 1, + "responses" : { + "200" : 748 + } + }, + "name" : "elasticsearch" + }, + { + "id" : "output-kafka1", + "events" : { + "duration_in_millis" : 186751, + "in" : 177549, + "out" : 177549 + }, + "name" : "kafka" + }, + { + "id" : "output-kafka2", + "events" : { + "duration_in_millis" : 7335196, + "in" : 177549, + "out" : 177549 + }, + "name" : "kafka" + } + ] + }, + "reloads" : { + "last_error" : null, + "successes" : 0, + "last_success_timestamp" : null, + "last_failure_timestamp" : null, + "failures" : 0 + }, + "queue": { + "events": 103, + "type": "persisted", + "capacity": { + "queue_size_in_bytes": 1872391, + "page_capacity_in_bytes": 67108864, + "max_queue_size_in_bytes": 1073741824, + "max_unread_events": 0 + }, + "data": { + "path": "/var/lib/logstash/queue/main", + "free_space_in_bytes": 36307369984, + "storage_type": "ext4" + } + } + } + } +} +` diff --git a/plugins/inputs/lustre2/README.md b/plugins/inputs/lustre2/README.md new file mode 100644 index 000000000..dbdf58f73 --- /dev/null +++ b/plugins/inputs/lustre2/README.md @@ -0,0 +1,133 @@ +# Lustre Input Plugin + +The [Lustre][]® file system is an open-source, parallel file system that supports +many requirements of leadership class HPC simulation environments. + +This plugin monitors the Lustre file system using its entries in the proc filesystem. + +### Configuration + +```toml +# Read metrics from local Lustre service on OST, MDS +[[inputs.lustre2]] + ## An array of /proc globs to search for Lustre stats + ## If not specified, the default will work on Lustre 2.5.x + ## + # ost_procfiles = [ + # "/proc/fs/lustre/obdfilter/*/stats", + # "/proc/fs/lustre/osd-ldiskfs/*/stats", + # "/proc/fs/lustre/obdfilter/*/job_stats", + # ] + # mds_procfiles = [ + # "/proc/fs/lustre/mdt/*/md_stats", + # "/proc/fs/lustre/mdt/*/job_stats", + # ] +``` + +### Metrics + +From `/proc/fs/lustre/obdfilter/*/stats` and `/proc/fs/lustre/osd-ldiskfs/*/stats`: + +- lustre2 + - tags: + - name + - fields: + - write_bytes + - write_calls + - read_bytes + - read_calls + - cache_hit + - cache_miss + - cache_access + +From `/proc/fs/lustre/obdfilter/*/job_stats`: + +- lustre2 + - tags: + - name + - jobid + - fields: + - jobstats_ost_getattr + - jobstats_ost_setattr + - jobstats_ost_sync + - jobstats_punch + - jobstats_destroy + - jobstats_create + - jobstats_ost_statfs + - jobstats_get_info + - jobstats_set_info + - jobstats_quotactl + - jobstats_read_bytes + - jobstats_read_calls + - jobstats_read_max_size + - jobstats_read_min_size + - jobstats_write_bytes + - jobstats_write_calls + - jobstats_write_max_size + - jobstats_write_min_size + +From `/proc/fs/lustre/mdt/*/md_stats`: + +- lustre2 + - tags: + - name + - fields: + - open + - close + - mknod + - link + - unlink + - mkdir + - rmdir + - rename + - getattr + - setattr + - getxattr + - setxattr + - statfs + - sync + - samedir_rename + - crossdir_rename + +From `/proc/fs/lustre/mdt/*/job_stats`: + +- lustre2 + - tags: + - name + - jobid + - fields: + - jobstats_close + - jobstats_crossdir_rename + - jobstats_getattr + - jobstats_getxattr + - jobstats_link + - jobstats_mkdir + - jobstats_mknod + - jobstats_open + - jobstats_rename + - jobstats_rmdir + - jobstats_samedir_rename + - jobstats_setattr + - jobstats_setxattr + - jobstats_statfs + - jobstats_sync + - jobstats_unlink + + +### Troubleshooting + +Check for the default or custom procfiles in the proc filesystem, and reference +the [Lustre Monitoring and Statistics Guide][guide]. This plugin does not +report all information from these files, only a limited set of items +corresponding to the above metric fields. + +### Example Output + +``` +lustre2,host=oss2,jobid=42990218,name=wrk-OST0041 jobstats_ost_setattr=0i,jobstats_ost_sync=0i,jobstats_punch=0i,jobstats_read_bytes=4096i,jobstats_read_calls=1i,jobstats_read_max_size=4096i,jobstats_read_min_size=4096i,jobstats_write_bytes=310206488i,jobstats_write_calls=7423i,jobstats_write_max_size=53048i,jobstats_write_min_size=8820i 1556525847000000000 +lustre2,host=mds1,jobid=42992017,name=wrk-MDT0000 jobstats_close=31798i,jobstats_crossdir_rename=0i,jobstats_getattr=34146i,jobstats_getxattr=15i,jobstats_link=0i,jobstats_mkdir=658i,jobstats_mknod=0i,jobstats_open=31797i,jobstats_rename=0i,jobstats_rmdir=0i,jobstats_samedir_rename=0i,jobstats_setattr=1788i,jobstats_setxattr=0i,jobstats_statfs=0i,jobstats_sync=0i,jobstats_unlink=0i 1556525828000000000 + +``` + +[lustre]: http://lustre.org/ +[guide]: http://wiki.lustre.org/Lustre_Monitoring_and_Statistics_Guide diff --git a/plugins/inputs/lustre2/lustre2.go b/plugins/inputs/lustre2/lustre2.go index 8ef9223b5..611ba294d 100644 --- a/plugins/inputs/lustre2/lustre2.go +++ b/plugins/inputs/lustre2/lustre2.go @@ -9,23 +9,27 @@ for HPC environments. It stores statistics about its activity in package lustre2 import ( + "io/ioutil" "path/filepath" "strconv" "strings" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" ) +type tags struct { + name, job string +} + // Lustre proc files can change between versions, so we want to future-proof // by letting people choose what to look at. type Lustre2 struct { - Ost_procfiles []string - Mds_procfiles []string + Ost_procfiles []string `toml:"ost_procfiles"` + Mds_procfiles []string `toml:"mds_procfiles"` // allFields maps and OST name to the metric fields associated with that OST - allFields map[string]map[string]interface{} + allFields map[tags]map[string]interface{} } var sampleConfig = ` @@ -353,7 +357,7 @@ var wanted_mdt_jobstats_fields = []*mapping{ }, } -func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, acc telegraf.Accumulator) error { +func (l *Lustre2) GetLustreProcStats(fileglob string, wantedFields []*mapping, acc telegraf.Accumulator) error { files, err := filepath.Glob(fileglob) if err != nil { return err @@ -362,48 +366,61 @@ func (l *Lustre2) GetLustreProcStats(fileglob string, wanted_fields []*mapping, for _, file := range files { /* Turn /proc/fs/lustre/obdfilter//stats and similar * into just the object store target name - * Assumpion: the target name is always second to last, + * Assumption: the target name is always second to last, * which is true in Lustre 2.1->2.8 */ path := strings.Split(file, "/") name := path[len(path)-2] - var fields map[string]interface{} - fields, ok := l.allFields[name] - if !ok { - fields = make(map[string]interface{}) - l.allFields[name] = fields - } - lines, err := internal.ReadLines(file) + //lines, err := internal.ReadLines(file) + wholeFile, err := ioutil.ReadFile(file) if err != nil { return err } + jobs := strings.Split(string(wholeFile), "- ") + for _, job := range jobs { + lines := strings.Split(string(job), "\n") + jobid := "" - for _, line := range lines { - parts := strings.Fields(line) - if strings.HasPrefix(line, "- job_id:") { - // Set the job_id explicitly if present - fields["jobid"] = parts[2] + // figure out if the data should be tagged with job_id here + parts := strings.Fields(lines[0]) + if strings.TrimSuffix(parts[0], ":") == "job_id" { + jobid = parts[1] } - for _, wanted := range wanted_fields { - var data uint64 - if strings.TrimSuffix(parts[0], ":") == wanted.inProc { - wanted_field := wanted.field - // if not set, assume field[1]. Shouldn't be field[0], as - // that's a string - if wanted_field == 0 { - wanted_field = 1 + for _, line := range lines { + // skip any empty lines + if len(line) < 1 { + continue + } + parts := strings.Fields(line) + + var fields map[string]interface{} + fields, ok := l.allFields[tags{name, jobid}] + if !ok { + fields = make(map[string]interface{}) + l.allFields[tags{name, jobid}] = fields + } + + for _, wanted := range wantedFields { + var data uint64 + if strings.TrimSuffix(parts[0], ":") == wanted.inProc { + wantedField := wanted.field + // if not set, assume field[1]. Shouldn't be field[0], as + // that's a string + if wantedField == 0 { + wantedField = 1 + } + data, err = strconv.ParseUint(strings.TrimSuffix((parts[wantedField]), ","), 10, 64) + if err != nil { + return err + } + reportName := wanted.inProc + if wanted.reportAs != "" { + reportName = wanted.reportAs + } + fields[reportName] = data } - data, err = strconv.ParseUint(strings.TrimSuffix((parts[wanted_field]), ","), 10, 64) - if err != nil { - return err - } - report_name := wanted.inProc - if wanted.reportAs != "" { - report_name = wanted.reportAs - } - fields[report_name] = data } } } @@ -423,7 +440,8 @@ func (l *Lustre2) Description() string { // Gather reads stats from all lustre targets func (l *Lustre2) Gather(acc telegraf.Accumulator) error { - l.allFields = make(map[string]map[string]interface{}) + //l.allFields = make(map[string]map[string]interface{}) + l.allFields = make(map[tags]map[string]interface{}) if len(l.Ost_procfiles) == 0 { // read/write bytes are in obdfilter//stats @@ -483,15 +501,13 @@ func (l *Lustre2) Gather(acc telegraf.Accumulator) error { } } - for name, fields := range l.allFields { + for tgs, fields := range l.allFields { + tags := map[string]string{ - "name": name, + "name": tgs.name, } - if _, ok := fields["jobid"]; ok { - if jobid, ok := fields["jobid"].(string); ok { - tags["jobid"] = jobid - } - delete(fields, "jobid") + if len(tgs.job) > 0 { + tags["jobid"] = tgs.job } acc.AddFields("lustre2", fields, tags) } diff --git a/plugins/inputs/lustre2/lustre2_test.go b/plugins/inputs/lustre2/lustre2_test.go index 5cc9c0e43..8e93da8e8 100644 --- a/plugins/inputs/lustre2/lustre2_test.go +++ b/plugins/inputs/lustre2/lustre2_test.go @@ -6,6 +6,9 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/toml" + "github.com/influxdata/toml/ast" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -39,7 +42,7 @@ cache_miss 11653333250 samples [pages] 1 1 11653333250 ` const obdfilterJobStatsContents = `job_stats: -- job_id: testjob1 +- job_id: cluster-testjob1 snapshot_time: 1461772761 read_bytes: { samples: 1, unit: bytes, min: 4096, max: 4096, sum: 4096 } write_bytes: { samples: 25, unit: bytes, min: 1048576, max: 1048576, sum: 26214400 } @@ -53,6 +56,20 @@ const obdfilterJobStatsContents = `job_stats: get_info: { samples: 0, unit: reqs } set_info: { samples: 0, unit: reqs } quotactl: { samples: 0, unit: reqs } +- job_id: testjob2 + snapshot_time: 1461772761 + read_bytes: { samples: 1, unit: bytes, min: 1024, max: 1024, sum: 1024 } + write_bytes: { samples: 25, unit: bytes, min: 2048, max: 2048, sum: 51200 } + getattr: { samples: 0, unit: reqs } + setattr: { samples: 0, unit: reqs } + punch: { samples: 1, unit: reqs } + sync: { samples: 0, unit: reqs } + destroy: { samples: 0, unit: reqs } + create: { samples: 0, unit: reqs } + statfs: { samples: 0, unit: reqs } + get_info: { samples: 0, unit: reqs } + set_info: { samples: 0, unit: reqs } + quotactl: { samples: 0, unit: reqs } ` const mdtProcContents = `snapshot_time 1438693238.20113 secs.usecs @@ -75,7 +92,7 @@ crossdir_rename 369571 samples [reqs] ` const mdtJobStatsContents = `job_stats: -- job_id: testjob1 +- job_id: cluster-testjob1 snapshot_time: 1461772761 open: { samples: 5, unit: reqs } close: { samples: 4, unit: reqs } @@ -93,6 +110,24 @@ const mdtJobStatsContents = `job_stats: sync: { samples: 2, unit: reqs } samedir_rename: { samples: 705, unit: reqs } crossdir_rename: { samples: 200, unit: reqs } +- job_id: testjob2 + snapshot_time: 1461772761 + open: { samples: 6, unit: reqs } + close: { samples: 7, unit: reqs } + mknod: { samples: 8, unit: reqs } + link: { samples: 9, unit: reqs } + unlink: { samples: 20, unit: reqs } + mkdir: { samples: 200, unit: reqs } + rmdir: { samples: 210, unit: reqs } + rename: { samples: 8, unit: reqs } + getattr: { samples: 10, unit: reqs } + setattr: { samples: 2, unit: reqs } + getxattr: { samples: 4, unit: reqs } + setxattr: { samples: 5, unit: reqs } + statfs: { samples: 1207, unit: reqs } + sync: { samples: 3, unit: reqs } + samedir_rename: { samples: 706, unit: reqs } + crossdir_rename: { samples: 201, unit: reqs } ` func TestLustre2GeneratesMetrics(t *testing.T) { @@ -172,7 +207,7 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { tempdir := os.TempDir() + "/telegraf/proc/fs/lustre/" ost_name := "OST0001" - job_name := "testjob1" + job_names := []string{"cluster-testjob1", "testjob2"} mdtdir := tempdir + "/mdt/" err := os.MkdirAll(mdtdir+"/"+ost_name, 0755) @@ -199,12 +234,23 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { err = m.Gather(&acc) require.NoError(t, err) - tags := map[string]string{ - "name": ost_name, - "jobid": job_name, + // make this two tags + // and even further make this dependent on summing per OST + tags := []map[string]string{ + { + "name": ost_name, + "jobid": job_names[0], + }, + { + "name": ost_name, + "jobid": job_names[1], + }, } - fields := map[string]interface{}{ + // make this for two tags + var fields []map[string]interface{} + + fields = append(fields, map[string]interface{}{ "jobstats_read_calls": uint64(1), "jobstats_read_min_size": uint64(4096), "jobstats_read_max_size": uint64(4096), @@ -239,10 +285,86 @@ func TestLustre2GeneratesJobstatsMetrics(t *testing.T) { "jobstats_sync": uint64(2), "jobstats_samedir_rename": uint64(705), "jobstats_crossdir_rename": uint64(200), + }) + + fields = append(fields, map[string]interface{}{ + "jobstats_read_calls": uint64(1), + "jobstats_read_min_size": uint64(1024), + "jobstats_read_max_size": uint64(1024), + "jobstats_read_bytes": uint64(1024), + "jobstats_write_calls": uint64(25), + "jobstats_write_min_size": uint64(2048), + "jobstats_write_max_size": uint64(2048), + "jobstats_write_bytes": uint64(51200), + "jobstats_ost_getattr": uint64(0), + "jobstats_ost_setattr": uint64(0), + "jobstats_punch": uint64(1), + "jobstats_ost_sync": uint64(0), + "jobstats_destroy": uint64(0), + "jobstats_create": uint64(0), + "jobstats_ost_statfs": uint64(0), + "jobstats_get_info": uint64(0), + "jobstats_set_info": uint64(0), + "jobstats_quotactl": uint64(0), + "jobstats_open": uint64(6), + "jobstats_close": uint64(7), + "jobstats_mknod": uint64(8), + "jobstats_link": uint64(9), + "jobstats_unlink": uint64(20), + "jobstats_mkdir": uint64(200), + "jobstats_rmdir": uint64(210), + "jobstats_rename": uint64(8), + "jobstats_getattr": uint64(10), + "jobstats_setattr": uint64(2), + "jobstats_getxattr": uint64(4), + "jobstats_setxattr": uint64(5), + "jobstats_statfs": uint64(1207), + "jobstats_sync": uint64(3), + "jobstats_samedir_rename": uint64(706), + "jobstats_crossdir_rename": uint64(201), + }) + + for index := 0; index < len(fields); index++ { + acc.AssertContainsTaggedFields(t, "lustre2", fields[index], tags[index]) } - acc.AssertContainsTaggedFields(t, "lustre2", fields, tags) + // run this over both tags err = os.RemoveAll(os.TempDir() + "/telegraf") require.NoError(t, err) } + +func TestLustre2CanParseConfiguration(t *testing.T) { + config := []byte(` +[[inputs.lustre2]] + ost_procfiles = [ + "/proc/fs/lustre/obdfilter/*/stats", + "/proc/fs/lustre/osd-ldiskfs/*/stats", + ] + mds_procfiles = [ + "/proc/fs/lustre/mdt/*/md_stats", + ]`) + + table, err := toml.Parse([]byte(config)) + require.NoError(t, err) + + inputs, ok := table.Fields["inputs"] + require.True(t, ok) + + lustre2, ok := inputs.(*ast.Table).Fields["lustre2"] + require.True(t, ok) + + var plugin Lustre2 + + require.NoError(t, toml.UnmarshalTable(lustre2.([]*ast.Table)[0], &plugin)) + + assert.Equal(t, Lustre2{ + Ost_procfiles: []string{ + "/proc/fs/lustre/obdfilter/*/stats", + "/proc/fs/lustre/osd-ldiskfs/*/stats", + }, + Mds_procfiles: []string{ + "/proc/fs/lustre/mdt/*/md_stats", + }, + }, plugin) +} diff --git a/plugins/inputs/mailchimp/README.md b/plugins/inputs/mailchimp/README.md new file mode 100644 index 000000000..cf82eb243 --- /dev/null +++ b/plugins/inputs/mailchimp/README.md @@ -0,0 +1,59 @@ +# Mailchimp Input + +Pulls campaign reports from the [Mailchimp API](https://developer.mailchimp.com/). + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage mailchimp`. + +```toml +[[inputs.mailchimp]] + ## MailChimp API key + ## get from https://admin.mailchimp.com/account/api/ + api_key = "" # required + + ## Reports for campaigns sent more than days_old ago will not be collected. + ## 0 means collect all and is the default value. + days_old = 0 + + ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old + # campaign_id = "" +``` + +### Metrics + +- mailchimp + - tags: + - id + - campaign_title + - fields: + - emails_sent (integer, emails) + - abuse_reports (integer, reports) + - unsubscribed (integer, unsubscribes) + - hard_bounces (integer, emails) + - soft_bounces (integer, emails) + - syntax_errors (integer, errors) + - forwards_count (integer, emails) + - forwards_opens (integer, emails) + - opens_total (integer, emails) + - unique_opens (integer, emails) + - open_rate (double, percentage) + - clicks_total (integer, clicks) + - unique_clicks (integer, clicks) + - unique_subscriber_clicks (integer, clicks) + - click_rate (double, percentage) + - facebook_recipient_likes (integer, likes) + - facebook_unique_likes (integer, likes) + - facebook_likes (integer, likes) + - industry_type (string, type) + - industry_open_rate (double, percentage) + - industry_click_rate (double, percentage) + - industry_bounce_rate (double, percentage) + - industry_unopen_rate (double, percentage) + - industry_unsub_rate (double, percentage) + - industry_abuse_rate (double, percentage) + - list_stats_sub_rate (double, percentage) + - list_stats_unsub_rate (double, percentage) + - list_stats_open_rate (double, percentage) + - list_stats_click_rate (double, percentage) diff --git a/plugins/inputs/mailchimp/chimp_api.go b/plugins/inputs/mailchimp/chimp_api.go index db0004ce2..a40614b1d 100644 --- a/plugins/inputs/mailchimp/chimp_api.go +++ b/plugins/inputs/mailchimp/chimp_api.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/json" "fmt" + "io" "io/ioutil" "log" "net/http" @@ -134,7 +135,7 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { req.URL.RawQuery = params.String() req.Header.Set("User-Agent", "Telegraf-MailChimp-Plugin") if api.Debug { - log.Printf("D! Request URL: %s", req.URL.String()) + log.Printf("D! [inputs.mailchimp] request URL: %s", req.URL.String()) } resp, err := client.Do(req) @@ -143,12 +144,18 @@ func runChimp(api *ChimpAPI, params ReportsParams) ([]byte, error) { } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + return nil, fmt.Errorf("%s returned HTTP status %s: %q", api.url.String(), resp.Status, body) + } + body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } if api.Debug { - log.Printf("D! Response Body:%s", string(body)) + log.Printf("D! [inputs.mailchimp] response Body: %q", string(body)) } if err = chimpErrorCheck(body); err != nil { @@ -171,7 +178,7 @@ type Report struct { Unsubscribed int `json:"unsubscribed"` SendTime string `json:"send_time"` - TimeSeries []TimeSerie + TimeSeries []TimeSeries Bounces Bounces `json:"bounces"` Forwards Forwards `json:"forwards"` Opens Opens `json:"opens"` @@ -230,7 +237,7 @@ type ListStats struct { ClickRate float64 `json:"click_rate"` } -type TimeSerie struct { +type TimeSeries struct { TimeStamp string `json:"timestamp"` EmailsSent int `json:"emails_sent"` UniqueOpens int `json:"unique_opens"` diff --git a/plugins/inputs/mailchimp/mailchimp_test.go b/plugins/inputs/mailchimp/mailchimp_test.go index ed6898e60..0c4dab56d 100644 --- a/plugins/inputs/mailchimp/mailchimp_test.go +++ b/plugins/inputs/mailchimp/mailchimp_test.go @@ -140,7 +140,7 @@ func TestMailChimpGatherReport(t *testing.T) { } -func TestMailChimpGatherErroror(t *testing.T) { +func TestMailChimpGatherError(t *testing.T) { ts := httptest.NewServer( http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { diff --git a/plugins/inputs/marklogic/README.md b/plugins/inputs/marklogic/README.md new file mode 100644 index 000000000..afbfb2824 --- /dev/null +++ b/plugins/inputs/marklogic/README.md @@ -0,0 +1,64 @@ +# MarkLogic Plugin + +The MarkLogic Telegraf plugin gathers health status metrics from one or more host. + +### Configuration: + +```toml +[[inputs.marklogic]] + ## Base URL of the MarkLogic HTTP Server. + url = "http://localhost:8002" + + ## List of specific hostnames to retrieve information. At least (1) required. + # hosts = ["hostname1", "hostname2"] + + ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges + # username = "myuser" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Metrics + +- marklogic + - tags: + - source (the hostname of the server address, ex. `ml1.local`) + - id (the host node unique id ex. `2592913110757471141`) + - fields: + - online + - total_load + - total_rate + - ncpus + - ncores + - total_cpu_stat_user + - total_cpu_stat_system + - total_cpu_stat_idle + - total_cpu_stat_iowait + - memory_process_size + - memory_process_rss + - memory_system_total + - memory_system_free + - memory_process_swap_size + - memory_size + - host_size + - log_device_space + - data_dir_space + - query_read_bytes + - query_read_load + - merge_read_bytes + - merge_write_load + - http_server_receive_bytes + - http_server_send_bytes + +### Example Output: + +``` +$> marklogic,host=localhost,id=2592913110757471141,source=ml1.local total_cpu_stat_iowait=0.0125649003311992,memory_process_swap_size=0i,host_size=380i,data_dir_space=28216i,query_read_load=0i,ncpus=1i,log_device_space=28216i,query_read_bytes=13947332i,merge_write_load=0i,http_server_receive_bytes=225893i,online=true,ncores=4i,total_cpu_stat_user=0.150778993964195,total_cpu_stat_system=0.598927974700928,total_cpu_stat_idle=99.2210006713867,memory_system_total=3947i,memory_system_free=2669i,memory_size=4096i,total_rate=14.7697010040283,http_server_send_bytes=0i,memory_process_size=903i,memory_process_rss=486i,merge_read_load=0i,total_load=0.00502600101754069 1566373000000000000 + +``` diff --git a/plugins/inputs/marklogic/marklogic.go b/plugins/inputs/marklogic/marklogic.go new file mode 100644 index 000000000..699541d14 --- /dev/null +++ b/plugins/inputs/marklogic/marklogic.go @@ -0,0 +1,260 @@ +package marklogic + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Marklogic configuration toml +type Marklogic struct { + URL string `toml:"url"` + Hosts []string `toml:"hosts"` + Username string `toml:"username"` + Password string `toml:"password"` + Sources []string + + tls.ClientConfig + + client *http.Client +} + +type MlPointInt struct { + Value int `json:"value"` +} + +type MlPointFloat struct { + Value float64 `json:"value"` +} + +type MlPointBool struct { + Value bool `json:"value"` +} + +// MarkLogic v2 management api endpoints for hosts status +const statsPath = "/manage/v2/hosts/" +const viewFormat = "view=status&format=json" + +type MlHost struct { + HostStatus struct { + ID string `json:"id"` + Name string `json:"name"` + StatusProperties struct { + Online MlPointBool `json:"online"` + LoadProperties struct { + TotalLoad MlPointFloat `json:"total-load"` + } `json:"load-properties"` + RateProperties struct { + TotalRate MlPointFloat `json:"total-rate"` + } `json:"rate-properties"` + StatusDetail struct { + Cpus MlPointInt `json:"cpus"` + Cores MlPointInt `json:"cores"` + TotalCPUStatUser float64 `json:"total-cpu-stat-user"` + TotalCPUStatSystem float64 `json:"total-cpu-stat-system"` + TotalCPUStatIdle float64 `json:"total-cpu-stat-idle"` + TotalCPUStatIowait float64 `json:"total-cpu-stat-iowait"` + MemoryProcessSize MlPointInt `json:"memory-process-size"` + MemoryProcessRss MlPointInt `json:"memory-process-rss"` + MemorySystemTotal MlPointInt `json:"memory-system-total"` + MemorySystemFree MlPointInt `json:"memory-system-free"` + MemoryProcessSwapSize MlPointInt `json:"memory-process-swap-size"` + MemorySize MlPointInt `json:"memory-size"` + HostSize MlPointInt `json:"host-size"` + LogDeviceSpace MlPointInt `json:"log-device-space"` + DataDirSpace MlPointInt `json:"data-dir-space"` + QueryReadBytes MlPointInt `json:"query-read-bytes"` + QueryReadLoad MlPointInt `json:"query-read-load"` + MergeReadLoad MlPointInt `json:"merge-read-load"` + MergeWriteLoad MlPointInt `json:"merge-write-load"` + HTTPServerReceiveBytes MlPointInt `json:"http-server-receive-bytes"` + HTTPServerSendBytes MlPointInt `json:"http-server-send-bytes"` + } `json:"status-detail"` + } `json:"status-properties"` + } `json:"host-status"` +} + +// Description of plugin returned +func (c *Marklogic) Description() string { + return "Retrieves information on a specific host in a MarkLogic Cluster" +} + +var sampleConfig = ` + ## Base URL of the MarkLogic HTTP Server. + url = "http://localhost:8002" + + ## List of specific hostnames to retrieve information. At least (1) required. + # hosts = ["hostname1", "hostname2"] + + ## Using HTTP Basic Authentication. Management API requires 'manage-user' role privileges + # username = "myuser" + # password = "mypassword" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +// Init parse all source URLs and place on the Marklogic struct +func (c *Marklogic) Init() error { + + if len(c.URL) == 0 { + c.URL = "http://localhost:8002/" + } + + for _, u := range c.Hosts { + base, err := url.Parse(c.URL) + if err != nil { + return err + } + + base.Path = path.Join(base.Path, statsPath, u) + addr := base.ResolveReference(base) + + addr.RawQuery = viewFormat + u := addr.String() + c.Sources = append(c.Sources, u) + } + return nil +} + +// SampleConfig to gather stats from localhost, default port. +func (c *Marklogic) SampleConfig() string { + return sampleConfig +} + +// Gather metrics from HTTP Server. +func (c *Marklogic) Gather(accumulator telegraf.Accumulator) error { + var wg sync.WaitGroup + + if c.client == nil { + client, err := c.createHTTPClient() + + if err != nil { + return err + } + c.client = client + } + + // Range over all source URL's appended to the struct + for _, serv := range c.Sources { + //fmt.Printf("Encoded URL is %q\n", serv) + wg.Add(1) + go func(serv string) { + defer wg.Done() + if err := c.fetchAndInsertData(accumulator, serv); err != nil { + accumulator.AddError(fmt.Errorf("[host=%s]: %s", serv, err)) + } + }(serv) + } + + wg.Wait() + + return nil +} + +func (c *Marklogic) fetchAndInsertData(acc telegraf.Accumulator, url string) error { + ml := &MlHost{} + if err := c.gatherJSONData(url, ml); err != nil { + return err + } + + // Build a map of tags + tags := map[string]string{ + "source": ml.HostStatus.Name, + "id": ml.HostStatus.ID, + } + + // Build a map of field values + fields := map[string]interface{}{ + "online": ml.HostStatus.StatusProperties.Online.Value, + "total_load": ml.HostStatus.StatusProperties.LoadProperties.TotalLoad.Value, + "total_rate": ml.HostStatus.StatusProperties.RateProperties.TotalRate.Value, + "ncpus": ml.HostStatus.StatusProperties.StatusDetail.Cpus.Value, + "ncores": ml.HostStatus.StatusProperties.StatusDetail.Cores.Value, + "total_cpu_stat_user": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatUser, + "total_cpu_stat_system": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatSystem, + "total_cpu_stat_idle": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatIdle, + "total_cpu_stat_iowait": ml.HostStatus.StatusProperties.StatusDetail.TotalCPUStatIowait, + "memory_process_size": ml.HostStatus.StatusProperties.StatusDetail.MemoryProcessSize.Value, + "memory_process_rss": ml.HostStatus.StatusProperties.StatusDetail.MemoryProcessRss.Value, + "memory_system_total": ml.HostStatus.StatusProperties.StatusDetail.MemorySystemTotal.Value, + "memory_system_free": ml.HostStatus.StatusProperties.StatusDetail.MemorySystemFree.Value, + "memory_process_swap_size": ml.HostStatus.StatusProperties.StatusDetail.MemoryProcessSwapSize.Value, + "memory_size": ml.HostStatus.StatusProperties.StatusDetail.MemorySize.Value, + "host_size": ml.HostStatus.StatusProperties.StatusDetail.HostSize.Value, + "log_device_space": ml.HostStatus.StatusProperties.StatusDetail.LogDeviceSpace.Value, + "data_dir_space": ml.HostStatus.StatusProperties.StatusDetail.DataDirSpace.Value, + "query_read_bytes": ml.HostStatus.StatusProperties.StatusDetail.QueryReadBytes.Value, + "query_read_load": ml.HostStatus.StatusProperties.StatusDetail.QueryReadLoad.Value, + "merge_read_load": ml.HostStatus.StatusProperties.StatusDetail.MergeReadLoad.Value, + "merge_write_load": ml.HostStatus.StatusProperties.StatusDetail.MergeWriteLoad.Value, + "http_server_receive_bytes": ml.HostStatus.StatusProperties.StatusDetail.HTTPServerReceiveBytes.Value, + "http_server_send_bytes": ml.HostStatus.StatusProperties.StatusDetail.HTTPServerSendBytes.Value, + } + + // Accumulate the tags and values + acc.AddFields("marklogic", fields, tags) + + return nil +} + +func (c *Marklogic) createHTTPClient() (*http.Client, error) { + tlsCfg, err := c.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + }, + Timeout: time.Duration(5 * time.Second), + } + + return client, nil +} + +func (c *Marklogic) gatherJSONData(url string, v interface{}) error { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return err + } + + if c.Username != "" || c.Password != "" { + req.SetBasicAuth(c.Username, c.Password) + } + + response, err := c.client.Do(req) + if err != nil { + return err + } + defer response.Body.Close() + if response.StatusCode != http.StatusOK { + return fmt.Errorf("marklogic: API responded with status-code %d, expected %d", + response.StatusCode, http.StatusOK) + } + + if err = json.NewDecoder(response.Body).Decode(v); err != nil { + return err + } + + return nil +} + +func init() { + inputs.Add("marklogic", func() telegraf.Input { + return &Marklogic{} + }) +} diff --git a/plugins/inputs/marklogic/marklogic_test.go b/plugins/inputs/marklogic/marklogic_test.go new file mode 100644 index 000000000..34e4bbd6b --- /dev/null +++ b/plugins/inputs/marklogic/marklogic_test.go @@ -0,0 +1,1282 @@ +package marklogic + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestMarklogic(t *testing.T) { + // Create a test server with the const response JSON + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, response) + })) + defer ts.Close() + + // Parse the URL of the test server, used to verify the expected host + _, err := url.Parse(ts.URL) + require.NoError(t, err) + + // Create a new Marklogic instance with our given test server + + ml := &Marklogic{ + Hosts: []string{"example1"}, + URL: string(ts.URL), + //Sources: []string{"http://localhost:8002/manage/v2/hosts/hostname1?view=status&format=json"}, + } + + // Create a test accumulator + acc := &testutil.Accumulator{} + + // Init() call to parse all source URL's + err = ml.Init() + require.NoError(t, err) + + // Gather data from the test server + err = ml.Gather(acc) + require.NoError(t, err) + + // Expect the correct values for all known keys + expectFields := map[string]interface{}{ + "online": true, + "total_load": 0.00429263804107904, + "ncpus": 1, + "ncores": 4, + "total_rate": 15.6527042388916, + "total_cpu_stat_user": 0.276381999254227, + "total_cpu_stat_system": 0.636515974998474, + "total_cpu_stat_idle": 99.0578002929688, + "total_cpu_stat_iowait": 0.0125628001987934, + "memory_process_size": 1234, + "memory_process_rss": 815, + "memory_system_total": 3947, + "memory_system_free": 2761, + "memory_process_swap_size": 0, + "memory_size": 4096, + "host_size": 64, + "log_device_space": 34968, + "data_dir_space": 34968, + "query_read_bytes": 11492428, + "query_read_load": 0, + "merge_read_load": 0, + "merge_write_load": 0, + "http_server_receive_bytes": 285915, + "http_server_send_bytes": 0, + } + // Expect the correct values for all tags + expectTags := map[string]string{ + "source": "ml1.local", + "id": "2592913110757471141", + } + + acc.AssertContainsTaggedFields(t, "marklogic", expectFields, expectTags) + +} + +var response = ` +{ + "host-status": { + "id": "2592913110757471141", + "name": "ml1.local", + "version": "10.0-1", + "effective-version": 10000100, + "host-mode": "normal", + "host-mode-description": "", + "meta": { + "uri": "/manage/v2/hosts/ml1.local?view=status", + "current-time": "2019-07-28T22:32:19.056203Z", + "elapsed-time": { + "units": "sec", + "value": 0.013035 + } + }, + "relations": { + "relation-group": [ + { + "uriref": "/manage/v2/forests?view=status&host-id=ml1.local", + "typeref": "forests", + "relation": [ + { + "uriref": "/manage/v2/forests/App-Services", + "idref": "8573569457346659714", + "nameref": "App-Services" + }, + { + "uriref": "/manage/v2/forests/Documents", + "idref": "17189472171231792168", + "nameref": "Documents" + }, + { + "uriref": "/manage/v2/forests/Extensions", + "idref": "1510244530748962553", + "nameref": "Extensions" + }, + { + "uriref": "/manage/v2/forests/Fab", + "idref": "16221965829238302106", + "nameref": "Fab" + }, + { + "uriref": "/manage/v2/forests/Last-Login", + "idref": "1093671762706318022", + "nameref": "Last-Login" + }, + { + "uriref": "/manage/v2/forests/Meters", + "idref": "1573439446779995954", + "nameref": "Meters" + }, + { + "uriref": "/manage/v2/forests/Modules", + "idref": "18320951141685848719", + "nameref": "Modules" + }, + { + "uriref": "/manage/v2/forests/Schemas", + "idref": "18206720449696085936", + "nameref": "Schemas" + }, + { + "uriref": "/manage/v2/forests/Security", + "idref": "9348728036360382939", + "nameref": "Security" + }, + { + "uriref": "/manage/v2/forests/Triggers", + "idref": "10142793547905338229", + "nameref": "Triggers" + } + ] + }, + { + "typeref": "groups", + "relation": [ + { + "uriref": "/manage/v2/groups/Default?view=status", + "idref": "16808579782544283978", + "nameref": "Default" + } + ] + } + ] + }, + "status-properties": { + "online": { + "units": "bool", + "value": true + }, + "secure": { + "units": "bool", + "value": false + }, + "cache-properties": { + "cache-detail": { + "compressed-tree-cache-partition": [ + { + "partition-size": 64, + "partition-table": 3.40000009536743, + "partition-used": 29.7000007629395, + "partition-free": 70.1999969482422, + "partition-overhead": 0.100000001490116 + } + ], + "expanded-tree-cache-partition": [ + { + "partition-size": 128, + "partition-table": 6.19999980926514, + "partition-busy": 0, + "partition-used": 87.3000030517578, + "partition-free": 12.3999996185303, + "partition-overhead": 0.300000011920929 + } + ], + "triple-cache-partition": [ + { + "partition-size": 64, + "partition-busy": 0, + "partition-used": 0, + "partition-free": 100 + } + ], + "triple-value-cache-partition": [ + { + "partition-size": 128, + "partition-busy": 0, + "partition-used": 0, + "partition-free": 100, + "value-count": 0, + "value-bytes-total": 0, + "value-bytes-average": 0 + } + ] + } + }, + "load-properties": { + "total-load": { + "units": "sec/sec", + "value": 0.00429263804107904 + }, + "load-detail": { + "query-read-load": { + "units": "sec/sec", + "value": 0 + }, + "journal-write-load": { + "units": "sec/sec", + "value": 0 + }, + "save-write-load": { + "units": "sec/sec", + "value": 0 + }, + "merge-read-load": { + "units": "sec/sec", + "value": 0 + }, + "merge-write-load": { + "units": "sec/sec", + "value": 0 + }, + "backup-read-load": { + "units": "sec/sec", + "value": 0 + }, + "backup-write-load": { + "units": "sec/sec", + "value": 0 + }, + "restore-read-load": { + "units": "sec/sec", + "value": 0 + }, + "restore-write-load": { + "units": "sec/sec", + "value": 0 + }, + "large-read-load": { + "units": "sec/sec", + "value": 0 + }, + "large-write-load": { + "units": "sec/sec", + "value": 0 + }, + "external-binary-read-load": { + "units": "sec/sec", + "value": 0 + }, + "xdqp-client-receive-load": { + "units": "sec/sec", + "value": 0 + }, + "xdqp-client-send-load": { + "units": "sec/sec", + "value": 0 + }, + "xdqp-server-receive-load": { + "units": "sec/sec", + "value": 0 + }, + "xdqp-server-send-load": { + "units": "sec/sec", + "value": 0 + }, + "foreign-xdqp-client-receive-load": { + "units": "sec/sec", + "value": 0 + }, + "foreign-xdqp-client-send-load": { + "units": "sec/sec", + "value": 0 + }, + "foreign-xdqp-server-receive-load": { + "units": "sec/sec", + "value": 0 + }, + "foreign-xdqp-server-send-load": { + "units": "sec/sec", + "value": 0 + }, + "read-lock-wait-load": { + "units": "sec/sec", + "value": 0 + }, + "read-lock-hold-load": { + "units": "sec/sec", + "value": 0 + }, + "write-lock-wait-load": { + "units": "sec/sec", + "value": 0 + }, + "write-lock-hold-load": { + "units": "sec/sec", + "value": 0.00429263804107904 + }, + "deadlock-wait-load": { + "units": "sec/sec", + "value": 0 + } + } + }, + "rate-properties": { + "total-rate": { + "units": "MB/sec", + "value": 15.6527042388916 + }, + "rate-detail": { + "memory-system-pagein-rate": { + "units": "MB/sec", + "value": 0 + }, + "memory-system-pageout-rate": { + "units": "MB/sec", + "value": 15.6420001983643 + }, + "memory-system-swapin-rate": { + "units": "MB/sec", + "value": 0 + }, + "memory-system-swapout-rate": { + "units": "MB/sec", + "value": 0 + }, + "query-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "journal-write-rate": { + "units": "MB/sec", + "value": 0.00372338597662747 + }, + "save-write-rate": { + "units": "MB/sec", + "value": 0.0024786819703877 + }, + "merge-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "merge-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "backup-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "backup-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "restore-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "restore-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "large-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "large-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "external-binary-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdqp-client-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdqp-client-send-rate": { + "units": "MB/sec", + "value": 0.00293614692054689 + }, + "xdqp-server-receive-rate": { + "units": "MB/sec", + "value": 0.00156576896551996 + }, + "xdqp-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-client-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-client-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "read-lock-rate": { + "units": "MB/sec", + "value": 0 + }, + "write-lock-rate": { + "units": "MB/sec", + "value": 0.251882910728455 + }, + "deadlock-rate": { + "units": "MB/sec", + "value": 0 + } + } + }, + "status-detail": { + "bind-port": 7999, + "connect-port": 7999, + "ssl-fips-enabled": { + "units": "bool", + "value": true + }, + "foreign-bind-port": 7998, + "foreign-connect-port": 7998, + "background-io-limit": { + "units": "quantity", + "value": 0 + }, + "metering-enabled": { + "units": "bool", + "value": true + }, + "meters-database": { + "units": "quantity", + "value": "11952918530142281790" + }, + "performance-metering-enabled": { + "units": "bool", + "value": true + }, + "performance-metering-period": { + "units": "second", + "value": 60 + }, + "performance-metering-retain-raw": { + "units": "day", + "value": 7 + }, + "performance-metering-retain-hourly": { + "units": "day", + "value": 30 + }, + "performance-metering-retain-daily": { + "units": "day", + "value": 90 + }, + "last-startup": { + "units": "datetime", + "value": "2019-07-26T17:23:36.412644Z" + }, + "version": "10.0-1", + "effective-version": { + "units": "quantity", + "value": 10000100 + }, + "software-version": { + "units": "quantity", + "value": 10000100 + }, + "os-version": "NA", + "converters-version": "10.0-1", + "host-mode": { + "units": "enum", + "value": "normal" + }, + "architecture": "x86_64", + "platform": "linux", + "license-key": "000-000-000-000-000-000-000", + "licensee": "NA", + "license-key-expires": { + "units": "datetime", + "value": "2999-01-23T00:00:00Z" + }, + "license-key-cpus": { + "units": "quantity", + "value": 0 + }, + "license-key-cores": { + "units": "quantity", + "value": 0 + }, + "license-key-size": { + "units": "MB", + "value": 0 + }, + "license-key-option": [ + { + "units": "enum", + "value": "conversion" + }, + { + "units": "enum", + "value": "failover" + }, + { + "units": "enum", + "value": "alerting" + }, + { + "units": "enum", + "value": "geospatial" + }, + { + "units": "enum", + "value": "flexible replication" + }, + { + "units": "enum", + "value": "tiered storage" + }, + { + "units": "enum", + "value": "semantics" + }, + { + "units": "enum", + "value": "French" + }, + { + "units": "enum", + "value": "Italian" + }, + { + "units": "enum", + "value": "German" + }, + { + "units": "enum", + "value": "Spanish" + }, + { + "units": "enum", + "value": "Traditional Chinese" + }, + { + "units": "enum", + "value": "Simplified Chinese" + }, + { + "units": "enum", + "value": "Arabic" + }, + { + "units": "enum", + "value": "Russian" + }, + { + "units": "enum", + "value": "Dutch" + }, + { + "units": "enum", + "value": "Korean" + }, + { + "units": "enum", + "value": "Persian" + }, + { + "units": "enum", + "value": "Japanese" + }, + { + "units": "enum", + "value": "Portuguese" + }, + { + "units": "enum", + "value": "English" + } + ], + "edition": { + "units": "enum", + "value": "Enterprise Edition" + }, + "environment": { + "units": "enum", + "value": "developer" + }, + "cpus": { + "units": "quantity", + "value": 1 + }, + "cores": { + "units": "quantity", + "value": 4 + }, + "core-threads": { + "units": "quantity", + "value": 4 + }, + "total-cpu-stat-user": 0.276381999254227, + "total-cpu-stat-nice": 0, + "total-cpu-stat-system": 0.636515974998474, + "total-cpu-stat-idle": 99.0578002929688, + "total-cpu-stat-iowait": 0.0125628001987934, + "total-cpu-stat-irq": 0, + "total-cpu-stat-softirq": 0.0167504008859396, + "total-cpu-stat-steal": 0, + "total-cpu-stat-guest": 0, + "total-cpu-stat-guest-nice": 0, + "memory-process-size": { + "units": "fraction", + "value": 1234 + }, + "memory-process-rss": { + "units": "fraction", + "value": 815 + }, + "memory-process-anon": { + "units": "fraction", + "value": 743 + }, + "memory-process-rss-hwm": { + "units": "fraction", + "value": 1072 + }, + "memory-process-swap-size": { + "units": "fraction", + "value": 0 + }, + "memory-process-huge-pages-size": { + "units": "fraction", + "value": 0 + }, + "memory-system-total": { + "units": "fraction", + "value": 3947 + }, + "memory-system-free": { + "units": "fraction", + "value": 2761 + }, + "memory-system-pagein-rate": { + "units": "fraction", + "value": 0 + }, + "memory-system-pageout-rate": { + "units": "fraction", + "value": 15.6420001983643 + }, + "memory-system-swapin-rate": { + "units": "fraction", + "value": 0 + }, + "memory-system-swapout-rate": { + "units": "fraction", + "value": 0 + }, + "memory-size": { + "units": "quantity", + "value": 4096 + }, + "memory-file-size": { + "units": "quantity", + "value": 5 + }, + "memory-forest-size": { + "units": "quantity", + "value": 849 + }, + "memory-unclosed-size": { + "units": "quantity", + "value": 0 + }, + "memory-cache-size": { + "units": "quantity", + "value": 320 + }, + "memory-registry-size": { + "units": "quantity", + "value": 1 + }, + "memory-join-size": { + "units": "quantity", + "value": 0 + }, + "host-size": { + "units": "MB", + "value": 64 + }, + "host-large-data-size": { + "units": "MB", + "value": 0 + }, + "log-device-space": { + "units": "MB", + "value": 34968 + }, + "data-dir-space": { + "units": "MB", + "value": 34968 + }, + "query-read-bytes": { + "units": "bytes", + "value": 11492428 + }, + "query-read-time": { + "units": "time", + "value": "PT0.141471S" + }, + "query-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "query-read-load": { + "units": "", + "value": 0 + }, + "journal-write-bytes": { + "units": "bytes", + "value": 285717868 + }, + "journal-write-time": { + "units": "time", + "value": "PT17.300832S" + }, + "journal-write-rate": { + "units": "MB/sec", + "value": 0.00372338597662747 + }, + "journal-write-load": { + "units": "", + "value": 0 + }, + "save-write-bytes": { + "units": "bytes", + "value": 95818597 + }, + "save-write-time": { + "units": "time", + "value": "PT2.972855S" + }, + "save-write-rate": { + "units": "MB/sec", + "value": 0.0024786819703877 + }, + "save-write-load": { + "units": "", + "value": 0 + }, + "merge-read-bytes": { + "units": "bytes", + "value": 55374848 + }, + "merge-read-time": { + "units": "time", + "value": "PT0.535705S" + }, + "merge-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "merge-read-load": { + "units": "", + "value": 0 + }, + "merge-write-bytes": { + "units": "bytes", + "value": 146451731 + }, + "merge-write-time": { + "units": "time", + "value": "PT5.392288S" + }, + "merge-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "merge-write-load": { + "units": "", + "value": 0 + }, + "backup-read-bytes": { + "units": "bytes", + "value": 0 + }, + "backup-read-time": { + "units": "time", + "value": "PT0S" + }, + "backup-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "backup-read-load": { + "units": "", + "value": 0 + }, + "backup-write-bytes": { + "units": "bytes", + "value": 0 + }, + "backup-write-time": { + "units": "time", + "value": "PT0S" + }, + "backup-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "backup-write-load": { + "units": "", + "value": 0 + }, + "restore-read-bytes": { + "units": "bytes", + "value": 0 + }, + "restore-read-time": { + "units": "time", + "value": "PT0S" + }, + "restore-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "restore-read-load": { + "units": "", + "value": 0 + }, + "restore-write-bytes": { + "units": "bytes", + "value": 0 + }, + "restore-write-time": { + "units": "time", + "value": "PT0S" + }, + "restore-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "restore-write-load": { + "units": "", + "value": 0 + }, + "large-read-bytes": { + "units": "bytes", + "value": 0 + }, + "large-read-time": { + "units": "time", + "value": "PT0S" + }, + "large-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "large-read-load": { + "units": "", + "value": 0 + }, + "large-write-bytes": { + "units": "bytes", + "value": 0 + }, + "large-write-time": { + "units": "time", + "value": "PT0S" + }, + "large-write-rate": { + "units": "MB/sec", + "value": 0 + }, + "large-write-load": { + "units": "", + "value": 0 + }, + "external-binary-read-bytes": { + "units": "bytes", + "value": 0 + }, + "external-binary-read-time": { + "units": "time", + "value": "PT0S" + }, + "external-binary-read-rate": { + "units": "MB/sec", + "value": 0 + }, + "external-binary-read-load": { + "units": "", + "value": 0 + }, + "webDAV-server-receive-bytes": { + "units": "bytes", + "value": 0 + }, + "webDAV-server-receive-time": { + "units": "sec", + "value": "PT0S" + }, + "webDAV-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "webDAV-server-receive-load": { + "units": "", + "value": 0 + }, + "webDAV-server-send-bytes": { + "units": "bytes", + "value": 0 + }, + "webDAV-server-send-time": { + "units": "sec", + "value": "PT0S" + }, + "webDAV-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "webDAV-server-send-load": { + "units": "", + "value": 0 + }, + "http-server-receive-bytes": { + "units": "bytes", + "value": 285915 + }, + "http-server-receive-time": { + "units": "sec", + "value": "PT0.02028S" + }, + "http-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "http-server-receive-load": { + "units": "", + "value": 0 + }, + "http-server-send-bytes": { + "units": "bytes", + "value": 0 + }, + "http-server-send-time": { + "units": "sec", + "value": "PT0S" + }, + "http-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "http-server-send-load": { + "units": "", + "value": 0 + }, + "xdbc-server-receive-bytes": { + "units": "bytes", + "value": 0 + }, + "xdbc-server-receive-time": { + "units": "sec", + "value": "PT0S" + }, + "xdbc-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdbc-server-receive-load": { + "units": "", + "value": 0 + }, + "xdbc-server-send-bytes": { + "units": "bytes", + "value": 0 + }, + "xdbc-server-send-time": { + "units": "sec", + "value": "PT0S" + }, + "xdbc-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdbc-server-send-load": { + "units": "", + "value": 0 + }, + "odbc-server-receive-bytes": { + "units": "bytes", + "value": 0 + }, + "odbc-server-receive-time": { + "units": "sec", + "value": "PT0S" + }, + "odbc-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "odbc-server-receive-load": { + "units": "", + "value": 0 + }, + "odbc-server-send-bytes": { + "units": "bytes", + "value": 0 + }, + "odbc-server-send-time": { + "units": "sec", + "value": "PT0S" + }, + "odbc-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "odbc-server-send-load": { + "units": "", + "value": 0 + }, + "xdqp-client-receive-bytes": { + "units": "bytes", + "value": 3020032 + }, + "xdqp-client-receive-time": { + "units": "time", + "value": "PT0.046612S" + }, + "xdqp-client-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdqp-client-receive-load": { + "units": "", + "value": 0 + }, + "xdqp-client-send-bytes": { + "units": "bytes", + "value": 163513952 + }, + "xdqp-client-send-time": { + "units": "time", + "value": "PT22.700289S" + }, + "xdqp-client-send-rate": { + "units": "MB/sec", + "value": 0.00293614692054689 + }, + "xdqp-client-send-load": { + "units": "", + "value": 0 + }, + "xdqp-server-receive-bytes": { + "units": "bytes", + "value": 131973888 + }, + "xdqp-server-receive-time": { + "units": "time", + "value": "PT3.474521S" + }, + "xdqp-server-receive-rate": { + "units": "MB/sec", + "value": 0.00156576896551996 + }, + "xdqp-server-receive-load": { + "units": "", + "value": 0 + }, + "xdqp-server-send-bytes": { + "units": "bytes", + "value": 10035300 + }, + "xdqp-server-send-time": { + "units": "time", + "value": "PT4.275597S" + }, + "xdqp-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "xdqp-server-send-load": { + "units": "", + "value": 0 + }, + "xdqp-server-request-time": { + "units": "milliseconds", + "value": 0.743777990341187 + }, + "xdqp-server-request-rate": { + "units": "requests/sec", + "value": 0.371862411499023 + }, + "foreign-xdqp-client-receive-bytes": { + "units": "bytes", + "value": 0 + }, + "foreign-xdqp-client-receive-time": { + "units": "time", + "value": "PT0S" + }, + "foreign-xdqp-client-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-client-receive-load": { + "units": "", + "value": 0 + }, + "foreign-xdqp-client-send-bytes": { + "units": "bytes", + "value": 0 + }, + "foreign-xdqp-client-send-time": { + "units": "time", + "value": "PT0S" + }, + "foreign-xdqp-client-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-client-send-load": { + "units": "", + "value": 0 + }, + "foreign-xdqp-server-receive-bytes": { + "units": "bytes", + "value": 0 + }, + "foreign-xdqp-server-receive-time": { + "units": "time", + "value": "PT0S" + }, + "foreign-xdqp-server-receive-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-server-receive-load": { + "units": "", + "value": 0 + }, + "foreign-xdqp-server-send-bytes": { + "units": "bytes", + "value": 0 + }, + "foreign-xdqp-server-send-time": { + "units": "time", + "value": "PT0S" + }, + "foreign-xdqp-server-send-rate": { + "units": "MB/sec", + "value": 0 + }, + "foreign-xdqp-server-send-load": { + "units": "", + "value": 0 + }, + "read-lock-count": { + "units": "locks", + "value": 104 + }, + "read-lock-wait-time": { + "units": "seconds", + "value": "PT0.001464S" + }, + "read-lock-hold-time": { + "units": "seconds", + "value": "PT3.022913S" + }, + "read-lock-rate": { + "units": "locks/sec", + "value": 0 + }, + "read-lock-wait-load": { + "units": "", + "value": 0 + }, + "read-lock-hold-load": { + "units": "", + "value": 0 + }, + "write-lock-count": { + "units": "locks", + "value": 15911 + }, + "write-lock-wait-time": { + "units": "seconds", + "value": "PT0.317098S" + }, + "write-lock-hold-time": { + "units": "seconds", + "value": "PT11M46.9923759S" + }, + "write-lock-rate": { + "units": "locks/sec", + "value": 0.251882910728455 + }, + "write-lock-wait-load": { + "units": "", + "value": 0 + }, + "write-lock-hold-load": { + "units": "", + "value": 0.00429263804107904 + }, + "deadlock-count": { + "units": "locks", + "value": 0 + }, + "deadlock-wait-time": { + "units": "seconds", + "value": "PT0S" + }, + "deadlock-rate": { + "units": "locks/sec", + "value": 0 + }, + "deadlock-wait-load": { + "units": "", + "value": 0 + }, + "external-kms-request-rate": { + "units": "requests/sec", + "value": 0 + }, + "external-kms-request-time": { + "units": "milliseconds", + "value": 0 + }, + "keystore-status": "normal", + "ldap-request-rate": { + "units": "requests/sec", + "value": 0 + }, + "ldap-request-time": { + "units": "milliseconds", + "value": 0 + } + } + }, + "related-views": { + "related-view": [ + { + "view-type": "item", + "view-name": "default", + "view-uri": "/manage/v2/hosts/example" + } + ] + } + } +} +` diff --git a/plugins/inputs/mem/README.md b/plugins/inputs/mem/README.md new file mode 100644 index 000000000..87280d8d2 --- /dev/null +++ b/plugins/inputs/mem/README.md @@ -0,0 +1,60 @@ +# Mem Input Plugin + +The mem plugin collects system memory metrics. + +For a more complete explanation of the difference between *used* and +*actual_used* RAM, see [Linux ate my ram](http://www.linuxatemyram.com/). + +### Configuration: +```toml +# Read metrics about memory usage +[[inputs.mem]] + # no configuration +``` + +### Metrics: + +Available fields are dependent on platform. + +- mem + - fields: + - active (integer) + - available (integer) + - buffered (integer) + - cached (integer) + - free (integer) + - inactive (integer) + - slab (integer) + - total (integer) + - used (integer) + - available_percent (float) + - used_percent (float) + - wired (integer) + - commit_limit (integer) + - committed_as (integer) + - dirty (integer) + - high_free (integer) + - high_total (integer) + - huge_page_size (integer) + - huge_pages_free (integer) + - huge_pages_total (integer) + - low_free (integer) + - low_total (integer) + - mapped (integer) + - page_tables (integer) + - shared (integer) + - sreclaimable (integer) + - sunreclaim (integer) + - swap_cached (integer) + - swap_free (integer) + - swap_total (integer) + - vmalloc_chunk (integer) + - vmalloc_total (integer) + - vmalloc_used (integer) + - write_back (integer) + - write_back_tmp (integer) + +### Example Output: +``` +mem active=9299595264i,available=16818249728i,available_percent=80.41654254645131,buffered=2383761408i,cached=13316689920i,commit_limit=14751920128i,committed_as=11781156864i,dirty=122880i,free=1877688320i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=7549939712i,low_free=0i,low_total=0i,mapped=416763904i,page_tables=19787776i,shared=670679040i,slab=2081071104i,sreclaimable=1923395584i,sunreclaim=157675520i,swap_cached=1302528i,swap_free=4286128128i,swap_total=4294963200i,total=20913917952i,used=3335778304i,used_percent=15.95004011996231,vmalloc_chunk=0i,vmalloc_total=35184372087808i,vmalloc_used=0i,wired=0i,write_back=0i,write_back_tmp=0i 1574712869000000000 +``` diff --git a/plugins/inputs/system/memory.go b/plugins/inputs/mem/memory.go similarity index 52% rename from plugins/inputs/system/memory.go rename to plugins/inputs/mem/memory.go index b44fabc49..daae390b8 100644 --- a/plugins/inputs/system/memory.go +++ b/plugins/inputs/mem/memory.go @@ -1,14 +1,15 @@ -package system +package mem import ( "fmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) type MemStats struct { - ps PS + ps system.PS } func (_ *MemStats) Description() string { @@ -36,6 +37,29 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { "slab": vm.Slab, "used_percent": 100 * float64(vm.Used) / float64(vm.Total), "available_percent": 100 * float64(vm.Available) / float64(vm.Total), + "commit_limit": vm.CommitLimit, + "committed_as": vm.CommittedAS, + "dirty": vm.Dirty, + "high_free": vm.HighFree, + "high_total": vm.HighTotal, + "huge_page_size": vm.HugePageSize, + "huge_pages_free": vm.HugePagesFree, + "huge_pages_total": vm.HugePagesTotal, + "low_free": vm.LowFree, + "low_total": vm.LowTotal, + "mapped": vm.Mapped, + "page_tables": vm.PageTables, + "shared": vm.Shared, + "sreclaimable": vm.SReclaimable, + "sunreclaim": vm.SUnreclaim, + "swap_cached": vm.SwapCached, + "swap_free": vm.SwapFree, + "swap_total": vm.SwapTotal, + "vmalloc_chunk": vm.VMallocChunk, + "vmalloc_total": vm.VMallocTotal, + "vmalloc_used": vm.VMallocUsed, + "write_back": vm.Writeback, + "write_back_tmp": vm.WritebackTmp, } acc.AddGauge("mem", fields, nil) @@ -43,7 +67,7 @@ func (s *MemStats) Gather(acc telegraf.Accumulator) error { } func init() { - ps := newSystemPS() + ps := system.NewSystemPS() inputs.Add("mem", func() telegraf.Input { return &MemStats{ps: ps} }) diff --git a/plugins/inputs/mem/memory_test.go b/plugins/inputs/mem/memory_test.go new file mode 100644 index 000000000..653010fa8 --- /dev/null +++ b/plugins/inputs/mem/memory_test.go @@ -0,0 +1,100 @@ +package mem + +import ( + "testing" + + "github.com/influxdata/telegraf/plugins/inputs/system" + "github.com/influxdata/telegraf/testutil" + "github.com/shirou/gopsutil/mem" + "github.com/stretchr/testify/require" +) + +func TestMemStats(t *testing.T) { + var mps system.MockPS + var err error + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + + vms := &mem.VirtualMemoryStat{ + Total: 12400, + Available: 7600, + Used: 5000, + Free: 1235, + Active: 8134, + Inactive: 1124, + Slab: 1234, + Wired: 134, + // Buffers: 771, + // Cached: 4312, + // Shared: 2142, + CommitLimit: 1, + CommittedAS: 118680, + Dirty: 4, + HighFree: 0, + HighTotal: 0, + HugePageSize: 4096, + HugePagesFree: 0, + HugePagesTotal: 0, + LowFree: 69936, + LowTotal: 255908, + Mapped: 42236, + PageTables: 1236, + Shared: 0, + SReclaimable: 1923022848, + SUnreclaim: 157728768, + SwapCached: 0, + SwapFree: 524280, + SwapTotal: 524280, + VMallocChunk: 3872908, + VMallocTotal: 3874808, + VMallocUsed: 1416, + Writeback: 0, + WritebackTmp: 0, + } + + mps.On("VMStat").Return(vms, nil) + + err = (&MemStats{&mps}).Gather(&acc) + require.NoError(t, err) + + memfields := map[string]interface{}{ + "total": uint64(12400), + "available": uint64(7600), + "used": uint64(5000), + "available_percent": float64(7600) / float64(12400) * 100, + "used_percent": float64(5000) / float64(12400) * 100, + "free": uint64(1235), + "cached": uint64(0), + "buffered": uint64(0), + "active": uint64(8134), + "inactive": uint64(1124), + "wired": uint64(134), + "slab": uint64(1234), + "commit_limit": uint64(1), + "committed_as": uint64(118680), + "dirty": uint64(4), + "high_free": uint64(0), + "high_total": uint64(0), + "huge_page_size": uint64(4096), + "huge_pages_free": uint64(0), + "huge_pages_total": uint64(0), + "low_free": uint64(69936), + "low_total": uint64(255908), + "mapped": uint64(42236), + "page_tables": uint64(1236), + "shared": uint64(0), + "sreclaimable": uint64(1923022848), + "sunreclaim": uint64(157728768), + "swap_cached": uint64(0), + "swap_free": uint64(524280), + "swap_total": uint64(524280), + "vmalloc_chunk": uint64(3872908), + "vmalloc_total": uint64(3874808), + "vmalloc_used": uint64(1416), + "write_back": uint64(0), + "write_back_tmp": uint64(0), + } + acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string)) + + acc.Metrics = nil +} diff --git a/plugins/inputs/memcached/README.md b/plugins/inputs/memcached/README.md index 16a19973b..721be9130 100644 --- a/plugins/inputs/memcached/README.md +++ b/plugins/inputs/memcached/README.md @@ -20,31 +20,46 @@ The fields from this plugin are gathered in the *memcached* measurement. Fields: -* get_hits - Number of keys that have been requested and found present -* get_misses - Number of items that have been requested and not found -* evictions - Number of valid items removed from cache to free memory for new items -* limit_maxbytes - Number of bytes this server is allowed to use for storage +* accepting_conns - Whether or not server is accepting conns +* auth_cmds - Number of authentication commands handled, success or failure +* auth_errors - Number of failed authentications * bytes - Current number of bytes used to store items -* uptime - Number of secs since the server started -* curr_items - Current number of items stored -* total_items - Total number of items stored since the server started -* curr_connections - Number of open connections -* total_connections - Total number of connections opened since the server started running -* connection_structures - Number of connection structures allocated by the server -* cmd_get - Cumulative number of retrieval reqs -* cmd_set - Cumulative number of storage reqs -* delete_hits - Number of deletion reqs resulting in an item being removed -* delete_misses - umber of deletions reqs for missing keys -* incr_hits - Number of successful incr reqs -* incr_misses - Number of incr reqs against missing keys -* decr_hits - Number of successful decr reqs -* decr_misses - Number of decr reqs against missing keys -* cas_hits - Number of successful CAS reqs -* cas_misses - Number of CAS reqs against missing keys * bytes_read - Total number of bytes read by this server from network * bytes_written - Total number of bytes sent by this server to network -* threads - Number of worker threads requested +* cas_badval - Number of CAS reqs for which a key was found, but the CAS value did not match +* cas_hits - Number of successful CAS reqs +* cas_misses - Number of CAS reqs against missing keys +* cmd_flush - Cumulative number of flush reqs +* cmd_get - Cumulative number of retrieval reqs +* cmd_set - Cumulative number of storage reqs +* cmd_touch - Cumulative number of touch reqs * conn_yields - Number of times any connection yielded to another due to hitting the -R limit +* connection_structures - Number of connection structures allocated by the server +* curr_connections - Number of open connections +* curr_items - Current number of items stored +* decr_hits - Number of successful decr reqs +* decr_misses - Number of decr reqs against missing keys +* delete_hits - Number of deletion reqs resulting in an item being removed +* delete_misses - umber of deletions reqs for missing keys +* evicted_unfetched - Items evicted from LRU that were never touched by get/incr/append/etc +* evictions - Number of valid items removed from cache to free memory for new items +* expired_unfetched - Items pulled from LRU that were never touched by get/incr/append/etc before expiring +* get_hits - Number of keys that have been requested and found present +* get_misses - Number of items that have been requested and not found +* hash_bytes - Bytes currently used by hash tables +* hash_is_expanding - Indicates if the hash table is being grown to a new size +* hash_power_level - Current size multiplier for hash table +* incr_hits - Number of successful incr reqs +* incr_misses - Number of incr reqs against missing keys +* limit_maxbytes - Number of bytes this server is allowed to use for storage +* listen_disabled_num - Number of times server has stopped accepting new connections (maxconns) +* reclaimed - Number of times an entry was stored using memory from an expired entry +* threads - Number of worker threads requested +* total_connections - Total number of connections opened since the server started running +* total_items - Total number of items stored since the server started +* touch_hits - Number of keys that have been touched with a new expiration time +* touch_misses - Number of items that have been touched and not found +* uptime - Number of secs since the server started Description of gathered fields taken from [here](https://github.com/memcached/memcached/blob/master/doc/protocol.txt). diff --git a/plugins/inputs/memcached/memcached.go b/plugins/inputs/memcached/memcached.go index 2b6b120c8..99128263a 100644 --- a/plugins/inputs/memcached/memcached.go +++ b/plugins/inputs/memcached/memcached.go @@ -29,31 +29,46 @@ var defaultTimeout = 5 * time.Second // The list of metrics that should be sent var sendMetrics = []string{ - "get_hits", - "get_misses", - "evictions", - "limit_maxbytes", + "accepting_conns", + "auth_cmds", + "auth_errors", "bytes", - "uptime", - "curr_items", - "total_items", - "curr_connections", - "total_connections", - "connection_structures", - "cmd_get", - "cmd_set", - "delete_hits", - "delete_misses", - "incr_hits", - "incr_misses", - "decr_hits", - "decr_misses", - "cas_hits", - "cas_misses", "bytes_read", "bytes_written", - "threads", + "cas_badval", + "cas_hits", + "cas_misses", + "cmd_flush", + "cmd_get", + "cmd_set", + "cmd_touch", "conn_yields", + "connection_structures", + "curr_connections", + "curr_items", + "decr_hits", + "decr_misses", + "delete_hits", + "delete_misses", + "evicted_unfetched", + "evictions", + "expired_unfetched", + "get_hits", + "get_misses", + "hash_bytes", + "hash_is_expanding", + "hash_power_level", + "incr_hits", + "incr_misses", + "limit_maxbytes", + "listen_disabled_num", + "reclaimed", + "threads", + "total_connections", + "total_items", + "touch_hits", + "touch_misses", + "uptime", } // SampleConfig returns sample configuration message diff --git a/plugins/inputs/mesos/README.md b/plugins/inputs/mesos/README.md index b18908b8a..284588188 100644 --- a/plugins/inputs/mesos/README.md +++ b/plugins/inputs/mesos/README.md @@ -10,8 +10,10 @@ For more information, please check the [Mesos Observability Metrics](http://meso [[inputs.mesos]] ## Timeout, in ms. timeout = 100 + ## A list of Mesos masters. masters = ["http://localhost:5050"] + ## Master metrics groups to be collected, by default, all enabled. master_collections = [ "resources", @@ -19,13 +21,17 @@ For more information, please check the [Mesos Observability Metrics](http://meso "system", "agents", "frameworks", + "framework_offers", "tasks", "messages", "evqueue", "registrar", + "allocator", ] + ## A list of Mesos slaves, default is [] # slaves = [] + ## Slave metrics groups to be collected, by default, all enabled. # slave_collections = [ # "resources", @@ -100,6 +106,10 @@ Mesos master metric groups - master/slaves_connected - master/slaves_disconnected - master/slaves_inactive + - master/slave_unreachable_canceled + - master/slave_unreachable_completed + - master/slave_unreachable_scheduled + - master/slaves_unreachable - frameworks - master/frameworks_active @@ -108,6 +118,22 @@ Mesos master metric groups - master/frameworks_inactive - master/outstanding_offers +- framework offers + - master/frameworks/subscribed + - master/frameworks/calls_total + - master/frameworks/calls + - master/frameworks/events_total + - master/frameworks/events + - master/frameworks/operations_total + - master/frameworks/operations + - master/frameworks/tasks/active + - master/frameworks/tasks/terminal + - master/frameworks/offers/sent + - master/frameworks/offers/accepted + - master/frameworks/offers/declined + - master/frameworks/offers/rescinded + - master/frameworks/roles/suppressed + - tasks - master/tasks_error - master/tasks_failed @@ -117,6 +143,11 @@ Mesos master metric groups - master/tasks_running - master/tasks_staging - master/tasks_starting + - master/tasks_dropped + - master/tasks_gone + - master/tasks_gone_by_operator + - master/tasks_killing + - master/tasks_unreachable - messages - master/invalid_executor_to_framework_messages @@ -155,11 +186,17 @@ Mesos master metric groups - master/task_lost/source_master/reason_slave_removed - master/task_lost/source_slave/reason_executor_terminated - master/valid_executor_to_framework_messages + - master/invalid_operation_status_update_acknowledgements + - master/messages_operation_status_update_acknowledgement + - master/messages_reconcile_operations + - master/messages_suppress_offers + - master/valid_operation_status_update_acknowledgements - evqueue - master/event_queue_dispatches - master/event_queue_http_requests - master/event_queue_messages + - master/operator_event_stream_subscribers - registrar - registrar/state_fetch_ms @@ -172,6 +209,45 @@ Mesos master metric groups - registrar/state_store_ms/p99 - registrar/state_store_ms/p999 - registrar/state_store_ms/p9999 + - registrar/state_store_ms/count + - registrar/log/ensemble_size + - registrar/log/recovered + - registrar/queued_operations + - registrar/registry_size_bytes + +- allocator + - allocator/allocation_run_ms + - allocator/allocation_run_ms/count + - allocator/allocation_run_ms/max + - allocator/allocation_run_ms/min + - allocator/allocation_run_ms/p50 + - allocator/allocation_run_ms/p90 + - allocator/allocation_run_ms/p95 + - allocator/allocation_run_ms/p99 + - allocator/allocation_run_ms/p999 + - allocator/allocation_run_ms/p9999 + - allocator/allocation_runs + - allocator/allocation_run_latency_ms + - allocator/allocation_run_latency_ms/count + - allocator/allocation_run_latency_ms/max + - allocator/allocation_run_latency_ms/min + - allocator/allocation_run_latency_ms/p50 + - allocator/allocation_run_latency_ms/p90 + - allocator/allocation_run_latency_ms/p95 + - allocator/allocation_run_latency_ms/p99 + - allocator/allocation_run_latency_ms/p999 + - allocator/allocation_run_latency_ms/p9999 + - allocator/roles/shares/dominant + - allocator/event_queue_dispatches + - allocator/offer_filters/roles/active + - allocator/quota/roles/resources/offered_or_allocated + - allocator/quota/roles/resources/guarantee + - allocator/resources/cpus/offered_or_allocated + - allocator/resources/cpus/total + - allocator/resources/disk/offered_or_allocated + - allocator/resources/disk/total + - allocator/resources/mem/offered_or_allocated + - allocator/resources/mem/total Mesos slave metric groups - resources diff --git a/plugins/inputs/mesos/mesos.go b/plugins/inputs/mesos/mesos.go index 15e2bfccb..4ce68e604 100644 --- a/plugins/inputs/mesos/mesos.go +++ b/plugins/inputs/mesos/mesos.go @@ -32,9 +32,10 @@ type Mesos struct { MasterCols []string `toml:"master_collections"` Slaves []string SlaveCols []string `toml:"slave_collections"` - //SlaveTasks bool tls.ClientConfig + Log telegraf.Logger + initialized bool client *http.Client masterURLs []*url.URL @@ -42,15 +43,17 @@ type Mesos struct { } var allMetrics = map[Role][]string{ - MASTER: []string{"resources", "master", "system", "agents", "frameworks", "tasks", "messages", "evqueue", "registrar"}, - SLAVE: []string{"resources", "agent", "system", "executors", "tasks", "messages"}, + MASTER: {"resources", "master", "system", "agents", "frameworks", "framework_offers", "tasks", "messages", "evqueue", "registrar", "allocator"}, + SLAVE: {"resources", "agent", "system", "executors", "tasks", "messages"}, } var sampleConfig = ` ## Timeout, in ms. timeout = 100 + ## A list of Mesos masters. masters = ["http://localhost:5050"] + ## Master metrics groups to be collected, by default, all enabled. master_collections = [ "resources", @@ -58,13 +61,17 @@ var sampleConfig = ` "system", "agents", "frameworks", + "framework_offers", "tasks", "messages", "evqueue", "registrar", + "allocator", ] + ## A list of Mesos slaves, default is [] # slaves = [] + ## Slave metrics groups to be collected, by default, all enabled. # slave_collections = [ # "resources", @@ -108,7 +115,7 @@ func parseURL(s string, role Role) (*url.URL, error) { } s = "http://" + host + ":" + port - log.Printf("W! [inputs.mesos] Using %q as connection URL; please update your configuration to use an URL", s) + log.Printf("W! [inputs.mesos] using %q as connection URL; please update your configuration to use an URL", s) } return url.Parse(s) @@ -124,7 +131,7 @@ func (m *Mesos) initialize() error { } if m.Timeout == 0 { - log.Println("I! [inputs.mesos] Missing timeout value, setting default value (100ms)") + m.Log.Info("Missing timeout value, setting default value (100ms)") m.Timeout = 100 } @@ -189,17 +196,6 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error { wg.Done() return }(slave) - - // if !m.SlaveTasks { - // continue - // } - - // wg.Add(1) - // go func(c string) { - // acc.AddError(m.gatherSlaveTaskMetrics(slave, acc)) - // wg.Done() - // return - // }(v) } wg.Wait() @@ -246,7 +242,7 @@ func metricsDiff(role Role, w []string) []string { return b } -// masterBlocks serves as kind of metrics registry groupping them in sets +// masterBlocks serves as kind of metrics registry grouping them in sets func getMetrics(role Role, group string) []string { var m map[string][]string @@ -305,6 +301,10 @@ func getMetrics(role Role, group string) []string { "master/slaves_connected", "master/slaves_disconnected", "master/slaves_inactive", + "master/slave_unreachable_canceled", + "master/slave_unreachable_completed", + "master/slave_unreachable_scheduled", + "master/slaves_unreachable", } m["frameworks"] = []string{ @@ -315,6 +315,12 @@ func getMetrics(role Role, group string) []string { "master/outstanding_offers", } + // framework_offers and allocator metrics have unpredictable names, so they can't be listed here. + // These empty groups are included to prevent the "unknown metrics group" info log below. + // filterMetrics() filters these metrics by looking for names with the corresponding prefix. + m["framework_offers"] = []string{} + m["allocator"] = []string{} + m["tasks"] = []string{ "master/tasks_error", "master/tasks_failed", @@ -324,6 +330,11 @@ func getMetrics(role Role, group string) []string { "master/tasks_running", "master/tasks_staging", "master/tasks_starting", + "master/tasks_dropped", + "master/tasks_gone", + "master/tasks_gone_by_operator", + "master/tasks_killing", + "master/tasks_unreachable", } m["messages"] = []string{ @@ -363,12 +374,18 @@ func getMetrics(role Role, group string) []string { "master/task_lost/source_master/reason_slave_removed", "master/task_lost/source_slave/reason_executor_terminated", "master/valid_executor_to_framework_messages", + "master/invalid_operation_status_update_acknowledgements", + "master/messages_operation_status_update_acknowledgement", + "master/messages_reconcile_operations", + "master/messages_suppress_offers", + "master/valid_operation_status_update_acknowledgements", } m["evqueue"] = []string{ "master/event_queue_dispatches", "master/event_queue_http_requests", "master/event_queue_messages", + "master/operator_event_stream_subscribers", } m["registrar"] = []string{ @@ -382,6 +399,11 @@ func getMetrics(role Role, group string) []string { "registrar/state_store_ms/p99", "registrar/state_store_ms/p999", "registrar/state_store_ms/p9999", + "registrar/log/ensemble_size", + "registrar/log/recovered", + "registrar/queued_operations", + "registrar/registry_size_bytes", + "registrar/state_store_ms/count", } } else if role == SLAVE { m["resources"] = []string{ @@ -459,7 +481,7 @@ func getMetrics(role Role, group string) []string { ret, ok := m[group] if !ok { - log.Printf("I! [mesos] Unknown %s metrics group: %s\n", role, group) + log.Printf("I! [inputs.mesos] unknown role %q metrics group: %s", role, group) return []string{} } @@ -477,9 +499,27 @@ func (m *Mesos) filterMetrics(role Role, metrics *map[string]interface{}) { } for _, k := range metricsDiff(role, selectedMetrics) { - for _, v := range getMetrics(role, k) { - if _, ok = (*metrics)[v]; ok { - delete((*metrics), v) + switch k { + // allocator and framework_offers metrics have unpredictable names, so we have to identify them by name prefix. + case "allocator": + for m := range *metrics { + if strings.HasPrefix(m, "allocator/") { + delete((*metrics), m) + } + } + case "framework_offers": + for m := range *metrics { + if strings.HasPrefix(m, "master/frameworks/") || strings.HasPrefix(m, "frameworks/") { + delete((*metrics), m) + } + } + + // All other metrics have predictable names. We can use getMetrics() to retrieve them. + default: + for _, v := range getMetrics(role, k) { + if _, ok = (*metrics)[v]; ok { + delete((*metrics), v) + } } } } diff --git a/plugins/inputs/mesos/mesos_test.go b/plugins/inputs/mesos/mesos_test.go index 905adb6e3..e25f250c8 100644 --- a/plugins/inputs/mesos/mesos_test.go +++ b/plugins/inputs/mesos/mesos_test.go @@ -8,6 +8,7 @@ import ( "net/http/httptest" "net/url" "os" + "strings" "testing" "github.com/influxdata/telegraf/testutil" @@ -27,194 +28,262 @@ func randUUID() string { return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) } +// master metrics that will be returned by generateMetrics() +var masterMetricNames []string = []string{ + // resources + "master/cpus_percent", + "master/cpus_used", + "master/cpus_total", + "master/cpus_revocable_percent", + "master/cpus_revocable_total", + "master/cpus_revocable_used", + "master/disk_percent", + "master/disk_used", + "master/disk_total", + "master/disk_revocable_percent", + "master/disk_revocable_total", + "master/disk_revocable_used", + "master/gpus_percent", + "master/gpus_used", + "master/gpus_total", + "master/gpus_revocable_percent", + "master/gpus_revocable_total", + "master/gpus_revocable_used", + "master/mem_percent", + "master/mem_used", + "master/mem_total", + "master/mem_revocable_percent", + "master/mem_revocable_total", + "master/mem_revocable_used", + // master + "master/elected", + "master/uptime_secs", + // system + "system/cpus_total", + "system/load_15min", + "system/load_5min", + "system/load_1min", + "system/mem_free_bytes", + "system/mem_total_bytes", + // agents + "master/slave_registrations", + "master/slave_removals", + "master/slave_reregistrations", + "master/slave_shutdowns_scheduled", + "master/slave_shutdowns_canceled", + "master/slave_shutdowns_completed", + "master/slaves_active", + "master/slaves_connected", + "master/slaves_disconnected", + "master/slaves_inactive", + "master/slave_unreachable_canceled", + "master/slave_unreachable_completed", + "master/slave_unreachable_scheduled", + "master/slaves_unreachable", + // frameworks + "master/frameworks_active", + "master/frameworks_connected", + "master/frameworks_disconnected", + "master/frameworks_inactive", + "master/outstanding_offers", + // framework offers + "master/frameworks/marathon/abc-123/calls", + "master/frameworks/marathon/abc-123/calls/accept", + "master/frameworks/marathon/abc-123/events", + "master/frameworks/marathon/abc-123/events/error", + "master/frameworks/marathon/abc-123/offers/sent", + "master/frameworks/marathon/abc-123/operations", + "master/frameworks/marathon/abc-123/operations/create", + "master/frameworks/marathon/abc-123/roles/*/suppressed", + "master/frameworks/marathon/abc-123/subscribed", + "master/frameworks/marathon/abc-123/tasks/active/task_killing", + "master/frameworks/marathon/abc-123/tasks/active/task_dropped", + "master/frameworks/marathon/abc-123/tasks/terminal/task_dropped", + "master/frameworks/marathon/abc-123/unknown/unknown", // test case for unknown metric type + // tasks + "master/tasks_error", + "master/tasks_failed", + "master/tasks_finished", + "master/tasks_killed", + "master/tasks_lost", + "master/tasks_running", + "master/tasks_staging", + "master/tasks_starting", + "master/tasks_dropped", + "master/tasks_gone", + "master/tasks_gone_by_operator", + "master/tasks_killing", + "master/tasks_unreachable", + // messages + "master/invalid_executor_to_framework_messages", + "master/invalid_framework_to_executor_messages", + "master/invalid_status_update_acknowledgements", + "master/invalid_status_updates", + "master/dropped_messages", + "master/messages_authenticate", + "master/messages_deactivate_framework", + "master/messages_decline_offers", + "master/messages_executor_to_framework", + "master/messages_exited_executor", + "master/messages_framework_to_executor", + "master/messages_kill_task", + "master/messages_launch_tasks", + "master/messages_reconcile_tasks", + "master/messages_register_framework", + "master/messages_register_slave", + "master/messages_reregister_framework", + "master/messages_reregister_slave", + "master/messages_resource_request", + "master/messages_revive_offers", + "master/messages_status_update", + "master/messages_status_update_acknowledgement", + "master/messages_unregister_framework", + "master/messages_unregister_slave", + "master/messages_update_slave", + "master/recovery_slave_removals", + "master/slave_removals/reason_registered", + "master/slave_removals/reason_unhealthy", + "master/slave_removals/reason_unregistered", + "master/valid_framework_to_executor_messages", + "master/valid_status_update_acknowledgements", + "master/valid_status_updates", + "master/task_lost/source_master/reason_invalid_offers", + "master/task_lost/source_master/reason_slave_removed", + "master/task_lost/source_slave/reason_executor_terminated", + "master/valid_executor_to_framework_messages", + "master/invalid_operation_status_update_acknowledgements", + "master/messages_operation_status_update_acknowledgement", + "master/messages_reconcile_operations", + "master/messages_suppress_offers", + "master/valid_operation_status_update_acknowledgements", + // evgqueue + "master/event_queue_dispatches", + "master/event_queue_http_requests", + "master/event_queue_messages", + "master/operator_event_stream_subscribers", + // registrar + "registrar/log/ensemble_size", + "registrar/log/recovered", + "registrar/queued_operations", + "registrar/registry_size_bytes", + "registrar/state_fetch_ms", + "registrar/state_store_ms", + "registrar/state_store_ms/max", + "registrar/state_store_ms/min", + "registrar/state_store_ms/p50", + "registrar/state_store_ms/p90", + "registrar/state_store_ms/p95", + "registrar/state_store_ms/p99", + "registrar/state_store_ms/p999", + "registrar/state_store_ms/p9999", + "registrar/state_store_ms/count", + // allocator + "allocator/mesos/allocation_run_ms", + "allocator/mesos/allocation_run_ms/count", + "allocator/mesos/allocation_run_ms/max", + "allocator/mesos/allocation_run_ms/min", + "allocator/mesos/allocation_run_ms/p50", + "allocator/mesos/allocation_run_ms/p90", + "allocator/mesos/allocation_run_ms/p95", + "allocator/mesos/allocation_run_ms/p99", + "allocator/mesos/allocation_run_ms/p999", + "allocator/mesos/allocation_run_ms/p9999", + "allocator/mesos/allocation_runs", + "allocator/mesos/allocation_run_latency_ms", + "allocator/mesos/allocation_run_latency_ms/count", + "allocator/mesos/allocation_run_latency_ms/max", + "allocator/mesos/allocation_run_latency_ms/min", + "allocator/mesos/allocation_run_latency_ms/p50", + "allocator/mesos/allocation_run_latency_ms/p90", + "allocator/mesos/allocation_run_latency_ms/p95", + "allocator/mesos/allocation_run_latency_ms/p99", + "allocator/mesos/allocation_run_latency_ms/p999", + "allocator/mesos/allocation_run_latency_ms/p9999", + "allocator/mesos/roles/*/shares/dominant", + "allocator/mesos/event_queue_dispatches", + "allocator/mesos/offer_filters/roles/*/active", + "allocator/mesos/quota/roles/*/resources/disk/offered_or_allocated", + "allocator/mesos/quota/roles/*/resources/mem/guarantee", + "allocator/mesos/quota/roles/*/resources/disk/guarantee", + "allocator/mesos/resources/cpus/offered_or_allocated", + "allocator/mesos/resources/cpus/total", + "allocator/mesos/resources/disk/offered_or_allocated", + "allocator/mesos/resources/disk/total", + "allocator/mesos/resources/mem/offered_or_allocated", + "allocator/mesos/resources/mem/total", +} + +// slave metrics that will be returned by generateMetrics() +var slaveMetricNames []string = []string{ + // resources + "slave/cpus_percent", + "slave/cpus_used", + "slave/cpus_total", + "slave/cpus_revocable_percent", + "slave/cpus_revocable_total", + "slave/cpus_revocable_used", + "slave/disk_percent", + "slave/disk_used", + "slave/disk_total", + "slave/disk_revocable_percent", + "slave/disk_revocable_total", + "slave/disk_revocable_used", + "slave/gpus_percent", + "slave/gpus_used", + "slave/gpus_total", + "slave/gpus_revocable_percent", + "slave/gpus_revocable_total", + "slave/gpus_revocable_used", + "slave/mem_percent", + "slave/mem_used", + "slave/mem_total", + "slave/mem_revocable_percent", + "slave/mem_revocable_total", + "slave/mem_revocable_used", + // agent + "slave/registered", + "slave/uptime_secs", + // system + "system/cpus_total", + "system/load_15min", + "system/load_5min", + "system/load_1min", + "system/mem_free_bytes", + "system/mem_total_bytes", + // executors + "containerizer/mesos/container_destroy_errors", + "slave/container_launch_errors", + "slave/executors_preempted", + "slave/frameworks_active", + "slave/executor_directory_max_allowed_age_secs", + "slave/executors_registering", + "slave/executors_running", + "slave/executors_terminated", + "slave/executors_terminating", + "slave/recovery_errors", + // tasks + "slave/tasks_failed", + "slave/tasks_finished", + "slave/tasks_killed", + "slave/tasks_lost", + "slave/tasks_running", + "slave/tasks_staging", + "slave/tasks_starting", + // messages + "slave/invalid_framework_messages", + "slave/invalid_status_updates", + "slave/valid_framework_messages", + "slave/valid_status_updates", +} + func generateMetrics() { masterMetrics = make(map[string]interface{}) - - metricNames := []string{ - // resources - "master/cpus_percent", - "master/cpus_used", - "master/cpus_total", - "master/cpus_revocable_percent", - "master/cpus_revocable_total", - "master/cpus_revocable_used", - "master/disk_percent", - "master/disk_used", - "master/disk_total", - "master/disk_revocable_percent", - "master/disk_revocable_total", - "master/disk_revocable_used", - "master/gpus_percent", - "master/gpus_used", - "master/gpus_total", - "master/gpus_revocable_percent", - "master/gpus_revocable_total", - "master/gpus_revocable_used", - "master/mem_percent", - "master/mem_used", - "master/mem_total", - "master/mem_revocable_percent", - "master/mem_revocable_total", - "master/mem_revocable_used", - // master - "master/elected", - "master/uptime_secs", - // system - "system/cpus_total", - "system/load_15min", - "system/load_5min", - "system/load_1min", - "system/mem_free_bytes", - "system/mem_total_bytes", - // agents - "master/slave_registrations", - "master/slave_removals", - "master/slave_reregistrations", - "master/slave_shutdowns_scheduled", - "master/slave_shutdowns_canceled", - "master/slave_shutdowns_completed", - "master/slaves_active", - "master/slaves_connected", - "master/slaves_disconnected", - "master/slaves_inactive", - // frameworks - "master/frameworks_active", - "master/frameworks_connected", - "master/frameworks_disconnected", - "master/frameworks_inactive", - "master/outstanding_offers", - // tasks - "master/tasks_error", - "master/tasks_failed", - "master/tasks_finished", - "master/tasks_killed", - "master/tasks_lost", - "master/tasks_running", - "master/tasks_staging", - "master/tasks_starting", - // messages - "master/invalid_executor_to_framework_messages", - "master/invalid_framework_to_executor_messages", - "master/invalid_status_update_acknowledgements", - "master/invalid_status_updates", - "master/dropped_messages", - "master/messages_authenticate", - "master/messages_deactivate_framework", - "master/messages_decline_offers", - "master/messages_executor_to_framework", - "master/messages_exited_executor", - "master/messages_framework_to_executor", - "master/messages_kill_task", - "master/messages_launch_tasks", - "master/messages_reconcile_tasks", - "master/messages_register_framework", - "master/messages_register_slave", - "master/messages_reregister_framework", - "master/messages_reregister_slave", - "master/messages_resource_request", - "master/messages_revive_offers", - "master/messages_status_update", - "master/messages_status_update_acknowledgement", - "master/messages_unregister_framework", - "master/messages_unregister_slave", - "master/messages_update_slave", - "master/recovery_slave_removals", - "master/slave_removals/reason_registered", - "master/slave_removals/reason_unhealthy", - "master/slave_removals/reason_unregistered", - "master/valid_framework_to_executor_messages", - "master/valid_status_update_acknowledgements", - "master/valid_status_updates", - "master/task_lost/source_master/reason_invalid_offers", - "master/task_lost/source_master/reason_slave_removed", - "master/task_lost/source_slave/reason_executor_terminated", - "master/valid_executor_to_framework_messages", - // evgqueue - "master/event_queue_dispatches", - "master/event_queue_http_requests", - "master/event_queue_messages", - // registrar - "registrar/state_fetch_ms", - "registrar/state_store_ms", - "registrar/state_store_ms/max", - "registrar/state_store_ms/min", - "registrar/state_store_ms/p50", - "registrar/state_store_ms/p90", - "registrar/state_store_ms/p95", - "registrar/state_store_ms/p99", - "registrar/state_store_ms/p999", - "registrar/state_store_ms/p9999", - } - - for _, k := range metricNames { + for _, k := range masterMetricNames { masterMetrics[k] = rand.Float64() } slaveMetrics = make(map[string]interface{}) - - metricNames = []string{ - // resources - "slave/cpus_percent", - "slave/cpus_used", - "slave/cpus_total", - "slave/cpus_revocable_percent", - "slave/cpus_revocable_total", - "slave/cpus_revocable_used", - "slave/disk_percent", - "slave/disk_used", - "slave/disk_total", - "slave/disk_revocable_percent", - "slave/disk_revocable_total", - "slave/disk_revocable_used", - "slave/gpus_percent", - "slave/gpus_used", - "slave/gpus_total", - "slave/gpus_revocable_percent", - "slave/gpus_revocable_total", - "slave/gpus_revocable_used", - "slave/mem_percent", - "slave/mem_used", - "slave/mem_total", - "slave/mem_revocable_percent", - "slave/mem_revocable_total", - "slave/mem_revocable_used", - // agent - "slave/registered", - "slave/uptime_secs", - // system - "system/cpus_total", - "system/load_15min", - "system/load_5min", - "system/load_1min", - "system/mem_free_bytes", - "system/mem_total_bytes", - // executors - "containerizer/mesos/container_destroy_errors", - "slave/container_launch_errors", - "slave/executors_preempted", - "slave/frameworks_active", - "slave/executor_directory_max_allowed_age_secs", - "slave/executors_registering", - "slave/executors_running", - "slave/executors_terminated", - "slave/executors_terminating", - "slave/recovery_errors", - // tasks - "slave/tasks_failed", - "slave/tasks_finished", - "slave/tasks_killed", - "slave/tasks_lost", - "slave/tasks_running", - "slave/tasks_staging", - "slave/tasks_starting", - // messages - "slave/invalid_framework_messages", - "slave/invalid_status_updates", - "slave/valid_framework_messages", - "slave/valid_status_updates", - } - - for _, k := range metricNames { + for _, k := range slaveMetricNames { slaveMetrics[k] = rand.Float64() } @@ -280,6 +349,7 @@ func TestMesosMaster(t *testing.T) { var acc testutil.Accumulator m := Mesos{ + Log: testutil.Logger{}, Masters: []string{masterTestServer.Listener.Addr().String()}, Timeout: 10, } @@ -295,8 +365,9 @@ func TestMesosMaster(t *testing.T) { func TestMasterFilter(t *testing.T) { m := Mesos{ + Log: testutil.Logger{}, MasterCols: []string{ - "resources", "master", "registrar", + "resources", "master", "registrar", "allocator", }, } b := []string{ @@ -306,6 +377,26 @@ func TestMasterFilter(t *testing.T) { m.filterMetrics(MASTER, &masterMetrics) + // Assert expected metrics are present. + for _, v := range m.MasterCols { + for _, x := range getMetrics(MASTER, v) { + if _, ok := masterMetrics[x]; !ok { + t.Errorf("Didn't find key %s, it should present.", x) + } + } + } + // m.MasterCols includes "allocator", so allocator metrics should be present. + // allocator metrics have unpredictable names, so we can't rely on the list of metrics returned from + // getMetrics(). We have to find them by checking name prefixes. + for _, x := range masterMetricNames { + if strings.HasPrefix(x, "allocator/") { + if _, ok := masterMetrics[x]; !ok { + t.Errorf("Didn't find key %s, it should be present.", x) + } + } + } + + // Assert unexpected metrics are not present. for _, v := range b { for _, x := range getMetrics(MASTER, v) { if _, ok := masterMetrics[x]; ok { @@ -313,11 +404,12 @@ func TestMasterFilter(t *testing.T) { } } } - for _, v := range m.MasterCols { - for _, x := range getMetrics(MASTER, v) { - if _, ok := masterMetrics[x]; !ok { - t.Errorf("Didn't find key %s, it should present.", x) - } + // m.MasterCols does not include "framework_offers", so framework_offers metrics should not be present. + // framework_offers metrics have unpredictable names, so we can't rely on the list of metrics returned from + // getMetrics(). We have to find them by checking name prefixes. + for k := range masterMetrics { + if strings.HasPrefix(k, "master/frameworks/") || strings.HasPrefix(k, "frameworks/") { + t.Errorf("Found key %s, it should be gone.", k) } } } @@ -326,6 +418,7 @@ func TestMesosSlave(t *testing.T) { var acc testutil.Accumulator m := Mesos{ + Log: testutil.Logger{}, Masters: []string{}, Slaves: []string{slaveTestServer.Listener.Addr().String()}, // SlaveTasks: true, @@ -339,22 +432,11 @@ func TestMesosSlave(t *testing.T) { } acc.AssertContainsFields(t, "mesos", slaveMetrics) - - // expectedFields := make(map[string]interface{}, len(slaveTaskMetrics["statistics"].(map[string]interface{}))+1) - // for k, v := range slaveTaskMetrics["statistics"].(map[string]interface{}) { - // expectedFields[k] = v - // } - // expectedFields["executor_id"] = slaveTaskMetrics["executor_id"] - - // acc.AssertContainsTaggedFields( - // t, - // "mesos_tasks", - // expectedFields, - // map[string]string{"server": "127.0.0.1", "framework_id": slaveTaskMetrics["framework_id"].(string)}) } func TestSlaveFilter(t *testing.T) { m := Mesos{ + Log: testutil.Logger{}, SlaveCols: []string{ "resources", "agent", "tasks", }, diff --git a/plugins/inputs/minecraft/README.md b/plugins/inputs/minecraft/README.md index 726f9a29e..e27fca9ba 100644 --- a/plugins/inputs/minecraft/README.md +++ b/plugins/inputs/minecraft/README.md @@ -1,66 +1,84 @@ -# Minecraft Plugin +# Minecraft Input Plugin -This plugin uses the RCON protocol to collect [statistics](http://minecraft.gamepedia.com/Statistics) from a [scoreboard](http://minecraft.gamepedia.com/Scoreboard) on a -Minecraft server. +The `minecraft` plugin connects to a Minecraft server using the RCON protocol +to collects scores from the server [scoreboard][]. -To enable [RCON](http://wiki.vg/RCON) on the minecraft server, add this to your server configuration in the `server.properties` file: +This plugin is known to support Minecraft Java Edition versions 1.11 - 1.14. +When using an version of Minecraft earlier than 1.13, be aware that the values +for some criterion has changed and may need to be modified. -``` +#### Server Setup + +Enable [RCON][] on the Minecraft server, add this to your server configuration +in the [server.properties][] file: + +```conf enable-rcon=true rcon.password= rcon.port=<1-65535> ``` -To create a new scoreboard objective called `jump` on a minecraft server tracking the `stat.jump` criteria, run this command -in the Minecraft console: +Scoreboard [Objectives][] must be added using the server console for the +plugin to collect. These can be added in game by players with op status, +from the server console, or over an RCON connection. -`/scoreboard objectives add jump stat.jump` - -Stats are collected with the following RCON command, issued by the plugin: - -`/scoreboard players list *` - -### Configuration: +When getting started pick an easy to test objective. This command will add an +objective that counts the number of times a player has jumped: ``` +/scoreboard objectives add jumps minecraft.custom:minecraft.jump +``` + +Once a player has triggered the event they will be added to the scoreboard, +you can then list all players with recorded scores: +``` +/scoreboard players list +``` + +View the current scores with a command, substituting your player name: +``` +/scoreboard players list Etho +``` + +### Configuration + +```toml [[inputs.minecraft]] - # server address for minecraft - server = "localhost" - # port for RCON - port = "25575" - # password RCON for mincraft server - password = "replace_me" + ## Address of the Minecraft server. + # server = "localhost" + + ## Server RCON Port. + # port = "25575" + + ## Server RCON Password. + password = "" ``` -### Measurements & Fields: +### Metrics -*This plugin uses only one measurement, titled* `minecraft` - -- The field name is the scoreboard objective name. -- The field value is the count of the scoreboard objective - -- `minecraft` +- minecraft + - tags: + - player + - port (port of the server) + - server (hostname:port, deprecated in 1.11; use `source` and `port` tags) + - source (hostname of the server) + - fields: - `` (integer, count) -### Tags: - -- The `minecraft` measurement: - - `server`: the Minecraft RCON server - - `player`: the Minecraft player - - ### Sample Queries: Get the number of jumps per player in the last hour: ``` -SELECT SPREAD("jump") FROM "minecraft" WHERE time > now() - 1h GROUP BY "player" +SELECT SPREAD("jumps") FROM "minecraft" WHERE time > now() - 1h GROUP BY "player" ``` ### Example Output: +``` +minecraft,player=notch,source=127.0.0.1,port=25575 jumps=178i 1498261397000000000 +minecraft,player=dinnerbone,source=127.0.0.1,port=25575 deaths=1i,jumps=1999i,cow_kills=1i 1498261397000000000 +minecraft,player=jeb,source=127.0.0.1,port=25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000 +``` -``` -$ telegraf --input-filter minecraft --test -* Plugin: inputs.minecraft, Collection 1 -> minecraft,player=notch,server=127.0.0.1:25575 jumps=178i 1498261397000000000 -> minecraft,player=dinnerbone,server=127.0.0.1:25575 deaths=1i,jumps=1999i,cow_kills=1i 1498261397000000000 -> minecraft,player=jeb,server=127.0.0.1:25575 d_pickaxe=1i,damage_dealt=80i,d_sword=2i,hunger=20i,health=20i,kills=1i,level=33i,jumps=264i,armor=15i 1498261397000000000 -``` +[server.properties]: https://minecraft.gamepedia.com/Server.properties +[scoreboard]: http://minecraft.gamepedia.com/Scoreboard +[objectives]: https://minecraft.gamepedia.com/Scoreboard#Objectives +[rcon]: http://wiki.vg/RCON diff --git a/plugins/inputs/minecraft/client.go b/plugins/inputs/minecraft/client.go new file mode 100644 index 000000000..30f56213a --- /dev/null +++ b/plugins/inputs/minecraft/client.go @@ -0,0 +1,205 @@ +package minecraft + +import ( + "regexp" + "strconv" + "strings" + + "github.com/influxdata/telegraf/plugins/inputs/minecraft/internal/rcon" +) + +var ( + scoreboardRegexLegacy = regexp.MustCompile(`(?U):\s(?P\d+)\s\((?P.*)\)`) + scoreboardRegex = regexp.MustCompile(`\[(?P[^\]]+)\]: (?P\d+)`) +) + +// Connection is an established connection to the Minecraft server. +type Connection interface { + // Execute runs a command. + Execute(command string) (string, error) +} + +// Connector is used to create connections to the Minecraft server. +type Connector interface { + // Connect establishes a connection to the server. + Connect() (Connection, error) +} + +func NewConnector(hostname, port, password string) (*connector, error) { + return &connector{ + hostname: hostname, + port: port, + password: password, + }, nil +} + +type connector struct { + hostname string + port string + password string +} + +func (c *connector) Connect() (Connection, error) { + p, err := strconv.Atoi(c.port) + if err != nil { + return nil, err + } + + rcon, err := rcon.NewClient(c.hostname, p) + if err != nil { + return nil, err + } + + _, err = rcon.Authorize(c.password) + if err != nil { + return nil, err + } + + return &connection{rcon: rcon}, nil +} + +func NewClient(connector Connector) (*client, error) { + return &client{connector: connector}, nil +} + +type client struct { + connector Connector + conn Connection +} + +func (c *client) Connect() error { + conn, err := c.connector.Connect() + if err != nil { + return err + } + c.conn = conn + return nil +} + +func (c *client) Players() ([]string, error) { + if c.conn == nil { + err := c.Connect() + if err != nil { + return nil, err + } + } + + resp, err := c.conn.Execute("scoreboard players list") + if err != nil { + c.conn = nil + return nil, err + } + + players, err := parsePlayers(resp) + if err != nil { + c.conn = nil + return nil, err + } + + return players, nil +} + +func (c *client) Scores(player string) ([]Score, error) { + if c.conn == nil { + err := c.Connect() + if err != nil { + return nil, err + } + } + + resp, err := c.conn.Execute("scoreboard players list " + player) + if err != nil { + c.conn = nil + return nil, err + } + + scores, err := parseScores(resp) + if err != nil { + c.conn = nil + return nil, err + } + + return scores, nil +} + +type connection struct { + rcon *rcon.Client +} + +func (c *connection) Execute(command string) (string, error) { + packet, err := c.rcon.Execute(command) + if err != nil { + return "", err + } + return packet.Body, nil +} + +func parsePlayers(input string) ([]string, error) { + parts := strings.SplitAfterN(input, ":", 2) + if len(parts) != 2 { + return []string{}, nil + } + + names := strings.Split(parts[1], ",") + + // Detect Minecraft <= 1.12 + if strings.Contains(parts[0], "players on the scoreboard") && len(names) > 0 { + // Split the last two player names: ex: "notch and dinnerbone" + head := names[:len(names)-1] + tail := names[len(names)-1] + names = append(head, strings.SplitN(tail, " and ", 2)...) + } + + var players []string + for _, name := range names { + name := strings.TrimSpace(name) + if name == "" { + continue + } + players = append(players, name) + + } + return players, nil +} + +// Score is an individual tracked scoreboard stat. +type Score struct { + Name string + Value int64 +} + +func parseScores(input string) ([]Score, error) { + if strings.Contains(input, "has no scores") { + return []Score{}, nil + } + + // Detect Minecraft <= 1.12 + var re *regexp.Regexp + if strings.Contains(input, "tracked objective") { + re = scoreboardRegexLegacy + } else { + re = scoreboardRegex + } + + var scores []Score + matches := re.FindAllStringSubmatch(input, -1) + for _, match := range matches { + score := Score{} + for i, subexp := range re.SubexpNames() { + switch subexp { + case "name": + score.Name = match[i] + case "value": + value, err := strconv.ParseInt(match[i], 10, 64) + if err != nil { + continue + } + score.Value = value + default: + continue + } + } + scores = append(scores, score) + } + return scores, nil +} diff --git a/plugins/inputs/minecraft/client_test.go b/plugins/inputs/minecraft/client_test.go new file mode 100644 index 000000000..767a0c30e --- /dev/null +++ b/plugins/inputs/minecraft/client_test.go @@ -0,0 +1,195 @@ +package minecraft + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type MockConnection struct { + commands map[string]string +} + +func (c *MockConnection) Execute(command string) (string, error) { + return c.commands[command], nil +} + +type MockConnector struct { + conn *MockConnection +} + +func (c *MockConnector) Connect() (Connection, error) { + return c.conn, nil +} + +func TestClient_Player(t *testing.T) { + tests := []struct { + name string + commands map[string]string + expected []string + }{ + { + name: "minecraft 1.12 no players", + commands: map[string]string{ + "scoreboard players list": "There are no tracked players on the scoreboard", + }, + expected: []string{}, + }, + { + name: "minecraft 1.12 single player", + commands: map[string]string{ + "scoreboard players list": "Showing 1 tracked players on the scoreboard:Etho", + }, + expected: []string{"Etho"}, + }, + { + name: "minecraft 1.12 two players", + commands: map[string]string{ + "scoreboard players list": "Showing 2 tracked players on the scoreboard:Etho and torham", + }, + expected: []string{"Etho", "torham"}, + }, + { + name: "minecraft 1.12 three players", + commands: map[string]string{ + "scoreboard players list": "Showing 3 tracked players on the scoreboard:Etho, notch and torham", + }, + expected: []string{"Etho", "notch", "torham"}, + }, + { + name: "minecraft 1.12 players space in username", + commands: map[string]string{ + "scoreboard players list": "Showing 4 tracked players on the scoreboard:with space, Etho, notch and torham", + }, + expected: []string{"with space", "Etho", "notch", "torham"}, + }, + { + name: "minecraft 1.12 players and in username", + commands: map[string]string{ + "scoreboard players list": "Showing 5 tracked players on the scoreboard:left and right, with space,Etho, notch and torham", + }, + expected: []string{"left and right", "with space", "Etho", "notch", "torham"}, + }, + { + name: "minecraft 1.13 no players", + commands: map[string]string{ + "scoreboard players list": "There are no tracked entities", + }, + expected: []string{}, + }, + { + name: "minecraft 1.13 single player", + commands: map[string]string{ + "scoreboard players list": "There are 1 tracked entities: torham", + }, + expected: []string{"torham"}, + }, + { + name: "minecraft 1.13 multiple player", + commands: map[string]string{ + "scoreboard players list": "There are 3 tracked entities: Etho, notch, torham", + }, + expected: []string{"Etho", "notch", "torham"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + connector := &MockConnector{ + conn: &MockConnection{commands: tt.commands}, + } + + client, err := NewClient(connector) + require.NoError(t, err) + + actual, err := client.Players() + require.NoError(t, err) + + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestClient_Scores(t *testing.T) { + tests := []struct { + name string + player string + commands map[string]string + expected []Score + }{ + { + name: "minecraft 1.12 player with no scores", + player: "Etho", + commands: map[string]string{ + "scoreboard players list Etho": "Player Etho has no scores recorded", + }, + expected: []Score{}, + }, + { + name: "minecraft 1.12 player with one score", + player: "Etho", + commands: map[string]string{ + "scoreboard players list Etho": "Showing 1 tracked objective(s) for Etho:- jump: 2 (jump)", + }, + expected: []Score{ + {Name: "jump", Value: 2}, + }, + }, + { + name: "minecraft 1.12 player with many scores", + player: "Etho", + commands: map[string]string{ + "scoreboard players list Etho": "Showing 3 tracked objective(s) for Etho:- hopper: 2 (hopper)- dropper: 2 (dropper)- redstone: 1 (redstone)", + }, + expected: []Score{ + {Name: "hopper", Value: 2}, + {Name: "dropper", Value: 2}, + {Name: "redstone", Value: 1}, + }, + }, + { + name: "minecraft 1.13 player with no scores", + player: "Etho", + commands: map[string]string{ + "scoreboard players list Etho": "Etho has no scores to show", + }, + expected: []Score{}, + }, + { + name: "minecraft 1.13 player with one score", + player: "Etho", + commands: map[string]string{ + "scoreboard players list Etho": "Etho has 1 scores:[jumps]: 1", + }, + expected: []Score{ + {Name: "jumps", Value: 1}, + }, + }, + { + name: "minecraft 1.13 player with many scores", + player: "Etho", + commands: map[string]string{ + "scoreboard players list Etho": "Etho has 3 scores:[hopper]: 2[dropper]: 2[redstone]: 1", + }, + expected: []Score{ + {Name: "hopper", Value: 2}, + {Name: "dropper", Value: 2}, + {Name: "redstone", Value: 1}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + connector := &MockConnector{ + conn: &MockConnection{commands: tt.commands}, + } + + client, err := NewClient(connector) + require.NoError(t, err) + + actual, err := client.Scores(tt.player) + require.NoError(t, err) + + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/plugins/inputs/minecraft/internal/rcon/rcon.go b/plugins/inputs/minecraft/internal/rcon/rcon.go index a57d75629..f9e49e6e6 100644 --- a/plugins/inputs/minecraft/internal/rcon/rcon.go +++ b/plugins/inputs/minecraft/internal/rcon/rcon.go @@ -32,8 +32,8 @@ const ( // Rcon package errors. var ( - ErrInvalidWrite = errors.New("Failed to write the payload corretly to remote connection.") - ErrInvalidRead = errors.New("Failed to read the response corretly from remote connection.") + ErrInvalidWrite = errors.New("Failed to write the payload correctly to remote connection.") + ErrInvalidRead = errors.New("Failed to read the response correctly from remote connection.") ErrInvalidChallenge = errors.New("Server failed to mirror request challenge.") ErrUnauthorizedRequest = errors.New("Client not authorized to remote server.") ErrFailedAuthorization = errors.New("Failed to authorize to the remote server.") @@ -57,7 +57,7 @@ type Packet struct { Body string // Body of packet. } -// Compile converts a packets header and body into its approriate +// Compile converts a packets header and body into its appropriate // byte array payload, returning an error if the binary packages // Write method fails to write the header bytes in their little // endian byte order. @@ -112,7 +112,7 @@ func (c *Client) Execute(command string) (response *Packet, err error) { // Sends accepts the commands type and its string to execute to the clients server, // creating a packet with a random challenge id for the server to mirror, -// and compiling its payload bytes in the appropriate order. The resonse is +// and compiling its payload bytes in the appropriate order. The response is // decompiled from its bytes into a Packet type for return. An error is returned // if send fails. func (c *Client) Send(typ int32, command string) (response *Packet, err error) { @@ -152,7 +152,7 @@ func (c *Client) Send(typ int32, command string) (response *Packet, err error) { } if packet.Header.Type == Auth && header.Type == ResponseValue { - // Discard, empty SERVERDATA_RESPOSE_VALUE from authorization. + // Discard, empty SERVERDATA_RESPONSE_VALUE from authorization. c.Connection.Read(make([]byte, header.Size-int32(PacketHeaderSize))) // Reread the packet header. diff --git a/plugins/inputs/minecraft/minecraft.go b/plugins/inputs/minecraft/minecraft.go index 6debbd25b..0de79d94a 100644 --- a/plugins/inputs/minecraft/minecraft.go +++ b/plugins/inputs/minecraft/minecraft.go @@ -1,95 +1,89 @@ package minecraft import ( - "fmt" - "regexp" - "strconv" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" ) const sampleConfig = ` - ## server address for minecraft + ## Address of the Minecraft server. # server = "localhost" - ## port for RCON + + ## Server RCON Port. # port = "25575" - ## password RCON for mincraft server - # password = "" + + ## Server RCON Password. + password = "" + + ## Uncomment to remove deprecated metric components. + # tagdrop = ["server"] ` -var ( - playerNameRegex = regexp.MustCompile(`for\s([^:]+):-`) - scoreboardRegex = regexp.MustCompile(`(?U):\s(\d+)\s\((.*)\)`) -) - -// Client is an interface for a client which gathers data from a minecraft server +// Client is a client for the Minecraft server. type Client interface { - Gather(producer RCONClientProducer) ([]string, error) + // Connect establishes a connection to the server. + Connect() error + + // Players returns the players on the scoreboard. + Players() ([]string, error) + + // Scores return the objective scores for a player. + Scores(player string) ([]Score, error) } -// Minecraft represents a connection to a minecraft server +// Minecraft is the plugin type. type Minecraft struct { - Server string - Port string - Password string - client Client - clientSet bool + Server string `toml:"server"` + Port string `toml:"port"` + Password string `toml:"password"` + + client Client } -// Description gives a brief description. func (s *Minecraft) Description() string { - return "Collects scores from a minecraft server's scoreboard using the RCON protocol" + return "Collects scores from a Minecraft server's scoreboard using the RCON protocol" } -// SampleConfig returns our sampleConfig. func (s *Minecraft) SampleConfig() string { return sampleConfig } -// Gather uses the RCON protocol to collect player and -// scoreboard stats from a minecraft server. -//var hasClient bool = false func (s *Minecraft) Gather(acc telegraf.Accumulator) error { - // can't simply compare s.client to nil, because comparing an interface - // to nil often does not produce the desired result - if !s.clientSet { - var err error - s.client, err = NewRCON(s.Server, s.Port, s.Password) + if s.client == nil { + connector, err := NewConnector(s.Server, s.Port, s.Password) if err != nil { return err } - s.clientSet = true + + client, err := NewClient(connector) + if err != nil { + return err + } + + s.client = client } - // (*RCON).Gather() takes an RCONClientProducer for testing purposes - d := defaultClientProducer{ - Server: s.Server, - Port: s.Port, - } - - scores, err := s.client.Gather(d) + players, err := s.client.Players() if err != nil { return err } - for _, score := range scores { - player, err := ParsePlayerName(score) + for _, player := range players { + scores, err := s.client.Scores(player) if err != nil { return err } + tags := map[string]string{ "player": player, "server": s.Server + ":" + s.Port, + "source": s.Server, + "port": s.Port, } - stats, err := ParseScoreboard(score) - if err != nil { - return err - } - var fields = make(map[string]interface{}, len(stats)) - for _, stat := range stats { - fields[stat.Name] = stat.Value + var fields = make(map[string]interface{}, len(scores)) + for _, score := range scores { + fields[score.Name] = score.Value } acc.AddFields("minecraft", fields, tags) @@ -98,51 +92,6 @@ func (s *Minecraft) Gather(acc telegraf.Accumulator) error { return nil } -// ParsePlayerName takes an input string from rcon, to parse -// the player. -func ParsePlayerName(input string) (string, error) { - playerMatches := playerNameRegex.FindAllStringSubmatch(input, -1) - if playerMatches == nil { - return "", fmt.Errorf("no player was matched") - } - return playerMatches[0][1], nil -} - -// Score is an individual tracked scoreboard stat. -type Score struct { - Name string - Value int -} - -// ParseScoreboard takes an input string from rcon, to parse -// scoreboard stats. -func ParseScoreboard(input string) ([]Score, error) { - scoreMatches := scoreboardRegex.FindAllStringSubmatch(input, -1) - if scoreMatches == nil { - return nil, fmt.Errorf("No scores found") - } - - var scores []Score - - for _, match := range scoreMatches { - number := match[1] - name := match[2] - n, err := strconv.Atoi(number) - // Not necessary in current state, because regex can only match integers, - // maybe become necessary if regex is modified to match more types of - // numbers - if err != nil { - return nil, fmt.Errorf("Failed to parse score") - } - s := Score{ - Name: name, - Value: n, - } - scores = append(scores, s) - } - return scores, nil -} - func init() { inputs.Add("minecraft", func() telegraf.Input { return &Minecraft{ diff --git a/plugins/inputs/minecraft/minecraft_test.go b/plugins/inputs/minecraft/minecraft_test.go index c0a9e6cf5..487f7d58a 100644 --- a/plugins/inputs/minecraft/minecraft_test.go +++ b/plugins/inputs/minecraft/minecraft_test.go @@ -1,234 +1,124 @@ package minecraft import ( - "fmt" - "reflect" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) -// TestParsePlayerName tests different Minecraft RCON inputs for players -func TestParsePlayerName(t *testing.T) { - // Test a valid input string to ensure player is extracted - input := "1 tracked objective(s) for divislight:- jumps: 178 (jumps)" - got, err := ParsePlayerName(input) - want := "divislight" - if err != nil { - t.Fatalf("player returned error. Error: %s\n", err) - } - if got != want { - t.Errorf("got %s\nwant %s\n", got, want) - } - - // Test an invalid input string to ensure error is returned - input = "" - got, err = ParsePlayerName(input) - want = "" - if err == nil { - t.Fatal("Expected error when player not present. No error found.") - } - if got != want { - t.Errorf("got %s\n want %s\n", got, want) - } - - // Test an invalid input string to ensure error is returned - input = "1 tracked objective(s) for 😂:- jumps: 178 (jumps)" - got, err = ParsePlayerName(input) - want = "😂" - if err != nil { - t.Fatalf("player returned error. Error: %s\n", err) - } - if got != want { - t.Errorf("got %s\n want %s\n", got, want) - } -} - -// TestParseScoreboard tests different Minecraft RCON inputs for scoreboard stats. -func TestParseScoreboard(t *testing.T) { - // test a valid input string to ensure stats are parsed correctly. - input := `1 tracked objective(s) for divislight:- jumps: 178 (jumps)- sword: 5 (sword)` - got, err := ParseScoreboard(input) - if err != nil { - t.Fatal("Unexpected error") - } - - want := []Score{ - { - Name: "jumps", - Value: 178, - }, - { - Name: "sword", - Value: 5, - }, - } - - if !reflect.DeepEqual(got, want) { - t.Errorf("Got: \n%#v\nWant: %#v", got, want) - } - - // Tests a partial input string. - input = `1 tracked objective(s) for divislight:- jumps: (jumps)- sword: 5 (sword)` - got, err = ParseScoreboard(input) - - if err != nil { - t.Fatal("Unexpected error") - } - - want = []Score{ - { - Name: "sword", - Value: 5, - }, - } - - if !reflect.DeepEqual(got, want) { - t.Errorf("Got: \n%#v\nWant:\n%#v", got, want) - } - - // Tests an empty string. - input = `` - _, err = ParseScoreboard(input) - if err == nil { - t.Fatal("Expected input error, but error was nil") - } - - // Tests when a number isn't an integer. - input = `1 tracked objective(s) for divislight:- jumps: 178.5 (jumps)- sword: 5 (sword)` - got, err = ParseScoreboard(input) - if err != nil { - t.Fatal("Unexpected error") - } - - want = []Score{ - { - Name: "sword", - Value: 5, - }, - } - - if !reflect.DeepEqual(got, want) { - t.Errorf("Got: \n%#v\nWant: %#v", got, want) - } - - //Testing a real life data scenario with unicode characters - input = `7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)` - got, err = ParseScoreboard(input) - if err != nil { - t.Fatal("Unexpected error") - } - - want = []Score{ - { - Name: "total_kills", - Value: 39, - }, - { - Name: "dalevel", - Value: 37, - }, - { - Name: "lvl", - Value: 37, - }, - { - Name: "jumps", - Value: 1290, - }, - { - Name: "iron_pickaxe", - Value: 284, - }, - { - Name: "cow_kills", - Value: 1, - }, - { - Name: "😂", - Value: 37, - }, - } - - if !reflect.DeepEqual(got, want) { - t.Errorf("Got: \n%#v\nWant: %#v", got, want) - } - -} - type MockClient struct { - Result []string - Err error + ConnectF func() error + PlayersF func() ([]string, error) + ScoresF func(player string) ([]Score, error) } -func (m *MockClient) Gather(d RCONClientProducer) ([]string, error) { - return m.Result, m.Err +func (c *MockClient) Connect() error { + return c.ConnectF() +} + +func (c *MockClient) Players() ([]string, error) { + return c.PlayersF() +} + +func (c *MockClient) Scores(player string) ([]Score, error) { + return c.ScoresF(player) } func TestGather(t *testing.T) { - var acc testutil.Accumulator - testConfig := Minecraft{ - Server: "biffsgang.net", - Port: "25575", - client: &MockClient{ - Result: []string{ - `1 tracked objective(s) for divislight:- jumps: 178 (jumps)`, - `7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)`, - `5 tracked objective(s) for torham:- total_kills: 29 (total_kills)- "howdy doody": 33 (dalevel)- howdy: 33 (lvl)- jumps: 263 (jumps)- "asdf": 33 (😂)`, + now := time.Unix(0, 0) + + tests := []struct { + name string + client *MockClient + metrics []telegraf.Metric + err error + }{ + { + name: "no players", + client: &MockClient{ + ConnectF: func() error { + return nil + }, + PlayersF: func() ([]string, error) { + return []string{}, nil + }, }, - Err: nil, + metrics: []telegraf.Metric{}, }, - clientSet: true, - } - - err := testConfig.Gather(&acc) - - if err != nil { - t.Fatalf("gather returned error. Error: %s\n", err) - } - - if !testConfig.clientSet { - t.Fatalf("clientSet should be true, client should be set") - } - - tags := map[string]string{ - "player": "divislight", - "server": "biffsgang.net:25575", - } - - assertContainsTaggedStat(t, &acc, "minecraft", "jumps", 178, tags) - tags["player"] = "mauxlaim" - assertContainsTaggedStat(t, &acc, "minecraft", "cow_kills", 1, tags) - tags["player"] = "torham" - assertContainsTaggedStat(t, &acc, "minecraft", "total_kills", 29, tags) - -} - -func assertContainsTaggedStat( - t *testing.T, - acc *testutil.Accumulator, - measurement string, - field string, - expectedValue int, - tags map[string]string, -) { - var actualValue int - for _, pt := range acc.Metrics { - if pt.Measurement == measurement && reflect.DeepEqual(pt.Tags, tags) { - for fieldname, value := range pt.Fields { - if fieldname == field { - actualValue = value.(int) - if value == expectedValue { - return + { + name: "one player without scores", + client: &MockClient{ + ConnectF: func() error { + return nil + }, + PlayersF: func() ([]string, error) { + return []string{"Etho"}, nil + }, + ScoresF: func(player string) ([]Score, error) { + switch player { + case "Etho": + return []Score{}, nil + default: + panic("unknown player") } - t.Errorf("Expected value %d\n got value %d\n", expectedValue, value) - } - } - } + }, + }, + metrics: []telegraf.Metric{}, + }, + { + name: "one player with scores", + client: &MockClient{ + ConnectF: func() error { + return nil + }, + PlayersF: func() ([]string, error) { + return []string{"Etho"}, nil + }, + ScoresF: func(player string) ([]Score, error) { + switch player { + case "Etho": + return []Score{{Name: "jumps", Value: 42}}, nil + default: + panic("unknown player") + } + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "minecraft", + map[string]string{ + "player": "Etho", + "server": "example.org:25575", + "source": "example.org", + "port": "25575", + }, + map[string]interface{}{ + "jumps": 42, + }, + now, + ), + }, + }, } - msg := fmt.Sprintf( - "Could not find measurement \"%s\" with requested tags within %s, Actual: %d", - measurement, field, actualValue) - t.Fatal(msg) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + plugin := &Minecraft{ + Server: "example.org", + Port: "25575", + Password: "xyzzy", + client: tt.client, + } + var acc testutil.Accumulator + acc.TimeFunc = func() time.Time { return now } + + err := plugin.Gather(&acc) + + require.Equal(t, tt.err, err) + testutil.RequireMetricsEqual(t, tt.metrics, acc.GetTelegrafMetrics()) + }) + } } diff --git a/plugins/inputs/minecraft/rcon.go b/plugins/inputs/minecraft/rcon.go deleted file mode 100644 index f42fc8ba4..000000000 --- a/plugins/inputs/minecraft/rcon.go +++ /dev/null @@ -1,112 +0,0 @@ -package minecraft - -import ( - "strconv" - "strings" - - "github.com/influxdata/telegraf/plugins/inputs/minecraft/internal/rcon" -) - -const ( - // NoMatches is a sentinel value returned when there are no statistics defined on the - //minecraft server - NoMatches = `All matches failed` - // ScoreboardPlayerList is the command to see all player statistics - ScoreboardPlayerList = `scoreboard players list *` -) - -// RCONClient is a representation of RCON command authorizaiton and exectution -type RCONClient interface { - Authorize(password string) (*rcon.Packet, error) - Execute(command string) (*rcon.Packet, error) -} - -// RCON represents a RCON server connection -type RCON struct { - Server string - Port string - Password string - client RCONClient -} - -// RCONClientProducer is an interface which defines how a new client will be -// produced in the event of a network disconnect. It exists mainly for -// testing purposes -type RCONClientProducer interface { - newClient() (RCONClient, error) -} - -type defaultClientProducer struct { - Server string - Port string -} - -func (d defaultClientProducer) newClient() (RCONClient, error) { - return newClient(d.Server, d.Port) -} - -// NewRCON creates a new RCON -func NewRCON(server, port, password string) (*RCON, error) { - client, err := newClient(server, port) - if err != nil { - return nil, err - } - - return &RCON{ - Server: server, - Port: port, - Password: password, - client: client, - }, nil -} - -func newClient(server, port string) (*rcon.Client, error) { - p, err := strconv.Atoi(port) - if err != nil { - return nil, err - } - - client, err := rcon.NewClient(server, p) - - // If we've encountered any error, the contents of `client` could be corrupted, - // so we must return nil, err - if err != nil { - return nil, err - } - return client, nil -} - -// Gather receives all player scoreboard information and returns it per player. -func (r *RCON) Gather(producer RCONClientProducer) ([]string, error) { - if r.client == nil { - var err error - r.client, err = producer.newClient() - if err != nil { - return nil, err - } - } - - if _, err := r.client.Authorize(r.Password); err != nil { - // Potentially a network problem where the client will need to be - // re-initialized - r.client = nil - return nil, err - } - - packet, err := r.client.Execute(ScoreboardPlayerList) - if err != nil { - // Potentially a network problem where the client will need to be - // re-initialized - r.client = nil - return nil, err - } - - if !strings.Contains(packet.Body, NoMatches) { - players := strings.Split(packet.Body, "Showing") - if len(players) > 1 { - return players[1:], nil - } - } - - return []string{}, nil -} diff --git a/plugins/inputs/minecraft/rcon_disconnect_error_test.go b/plugins/inputs/minecraft/rcon_disconnect_error_test.go deleted file mode 100644 index c89308e06..000000000 --- a/plugins/inputs/minecraft/rcon_disconnect_error_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package minecraft - -import ( - "errors" - "testing" -) - -type MockRCONProducer struct { - Err error -} - -func (m *MockRCONProducer) newClient() (RCONClient, error) { - return nil, m.Err -} - -func TestRCONErrorHandling(t *testing.T) { - m := &MockRCONProducer{ - Err: errors.New("Error: failed connection"), - } - c := &RCON{ - Server: "craftstuff.com", - Port: "2222", - Password: "pass", - //Force fetching of new client - client: nil, - } - - _, err := c.Gather(m) - if err == nil { - t.Errorf("Error nil, unexpected result") - } - - if c.client != nil { - t.Fatal("c.client should be nil, unexpected result") - } -} diff --git a/plugins/inputs/minecraft/rcon_test.go b/plugins/inputs/minecraft/rcon_test.go deleted file mode 100644 index 1660b53f9..000000000 --- a/plugins/inputs/minecraft/rcon_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package minecraft - -import ( - "testing" - - "github.com/influxdata/telegraf/plugins/inputs/minecraft/internal/rcon" -) - -type MockRCONClient struct { - Result *rcon.Packet - Err error -} - -func (m *MockRCONClient) Authorize(password string) (*rcon.Packet, error) { - return m.Result, m.Err -} -func (m *MockRCONClient) Execute(command string) (*rcon.Packet, error) { - return m.Result, m.Err -} - -// TestRCONGather test the RCON gather function -func TestRCONGather(t *testing.T) { - mock := &MockRCONClient{ - Result: &rcon.Packet{ - Body: `Showing 1 tracked objective(s) for divislight:- jumps: 178 (jumps)Showing 7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)Showing 5 tracked objective(s) for torham:- total_kills: 29 (total_kills)- "howdy doody": 33 (dalevel)- howdy: 33 (lvl)- jumps: 263 (jumps)- "asdf": 33 (😂)`, - }, - Err: nil, - } - - want := []string{ - ` 1 tracked objective(s) for divislight:- jumps: 178 (jumps)`, - ` 7 tracked objective(s) for mauxlaim:- total_kills: 39 (total_kills)- "howdy doody": 37 (dalevel)- howdy: 37 (lvl)- jumps: 1290 (jumps)- iron_pickaxe: 284 (iron_pickaxe)- cow_kills: 1 (cow_kills)- "asdf": 37 (😂)`, - ` 5 tracked objective(s) for torham:- total_kills: 29 (total_kills)- "howdy doody": 33 (dalevel)- howdy: 33 (lvl)- jumps: 263 (jumps)- "asdf": 33 (😂)`, - } - - client := &RCON{ - Server: "craftstuff.com", - Port: "2222", - Password: "pass", - client: mock, - } - - d := defaultClientProducer{} - got, err := client.Gather(d) - if err != nil { - t.Fatalf("Gather returned an error. Error %s\n", err) - } - for i, s := range got { - if want[i] != s { - t.Fatalf("Got %s at index %d, want %s at index %d", s, i, want[i], i) - } - } - - client.client = &MockRCONClient{ - Result: &rcon.Packet{ - Body: "", - }, - Err: nil, - } - - got, err = client.Gather(defaultClientProducer{}) - if err != nil { - t.Fatalf("Gather returned an error. Error %s\n", err) - } - if len(got) != 0 { - t.Fatalf("Expected empty slice of length %d, got slice of length %d", 0, len(got)) - } -} diff --git a/plugins/inputs/modbus/README.md b/plugins/inputs/modbus/README.md new file mode 100644 index 000000000..629c79027 --- /dev/null +++ b/plugins/inputs/modbus/README.md @@ -0,0 +1,99 @@ +# Modbus Input Plugin + +The Modbus plugin collects Discrete Inputs, Coils, Input Registers and Holding +Registers via Modbus TCP or Modbus RTU/ASCII. + +### Configuration + +```toml +[[inputs.modbus]] + ## Connection Configuration + ## + ## The module supports connections to PLCs via MODBUS/TCP or + ## via serial line communication in binary (RTU) or readable (ASCII) encoding + ## + ## Device name + name = "Device" + + ## Slave ID - addresses a MODBUS device on the bus + ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] + slave_id = 1 + + ## Timeout for each request + timeout = "1s" + + ## Maximum number of retries and the time to wait between retries + ## when a slave-device is busy. + # busy_retries = 0 + # busy_retries_wait = "100ms" + + # TCP - connect via Modbus/TCP + controller = "tcp://localhost:502" + + ## Serial (RS485; RS232) + # controller = "file:///dev/ttyUSB0" + # baud_rate = 9600 + # data_bits = 8 + # parity = "N" + # stop_bits = 1 + # transmission_mode = "RTU" + + + ## Measurements + ## + + ## Digital Variables, Discrete Inputs and Coils + ## name - the variable name + ## address - variable address + + discrete_inputs = [ + { name = "Start", address = [0]}, + { name = "Stop", address = [1]}, + { name = "Reset", address = [2]}, + { name = "EmergencyStop", address = [3]}, + ] + coils = [ + { name = "Motor1-Run", address = [0]}, + { name = "Motor1-Jog", address = [1]}, + { name = "Motor1-Stop", address = [2]}, + ] + + ## Analog Variables, Input Registers and Holding Registers + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## byte_order - the ordering of bytes + ## |---AB, ABCD - Big Endian + ## |---BA, DCBA - Little Endian + ## |---BADC - Mid-Big Endian + ## |---CDAB - Mid-Little Endian + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) + ## scale - the final numeric variable representation + ## address - variable address + + holding_registers = [ + { name = "PowerFactor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, + { name = "Voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, + { name = "Energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, + { name = "Current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, + { name = "Frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, + { name = "Power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, + ] + input_registers = [ + { name = "TankLevel", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, + { name = "TankPH", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, + { name = "Pump1-Speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, + ] +``` + +### Metrics + +Metric are custom and configured using the `discrete_inputs`, `coils`, +`holding_register` and `input_registers` options. + + +### Example Output + +``` +$ ./telegraf -config telegraf.conf -input-filter modbus -test +modbus.InputRegisters,host=orangepizero Current=0,Energy=0,Frecuency=60,Power=0,PowerFactor=0,Voltage=123.9000015258789 1554079521000000000 +``` diff --git a/plugins/inputs/modbus/modbus.go b/plugins/inputs/modbus/modbus.go new file mode 100644 index 000000000..c1ff56bab --- /dev/null +++ b/plugins/inputs/modbus/modbus.go @@ -0,0 +1,705 @@ +package modbus + +import ( + "encoding/binary" + "fmt" + "log" + "math" + "net" + "net/url" + "sort" + "time" + + mb "github.com/goburrow/modbus" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Modbus holds all data relevant to the plugin +type Modbus struct { + Name string `toml:"name"` + Controller string `toml:"controller"` + TransmissionMode string `toml:"transmission_mode"` + BaudRate int `toml:"baud_rate"` + DataBits int `toml:"data_bits"` + Parity string `toml:"parity"` + StopBits int `toml:"stop_bits"` + SlaveID int `toml:"slave_id"` + Timeout internal.Duration `toml:"timeout"` + Retries int `toml:"busy_retries"` + RetriesWaitTime internal.Duration `toml:"busy_retries_wait"` + DiscreteInputs []fieldContainer `toml:"discrete_inputs"` + Coils []fieldContainer `toml:"coils"` + HoldingRegisters []fieldContainer `toml:"holding_registers"` + InputRegisters []fieldContainer `toml:"input_registers"` + registers []register + isConnected bool + tcpHandler *mb.TCPClientHandler + rtuHandler *mb.RTUClientHandler + asciiHandler *mb.ASCIIClientHandler + client mb.Client +} + +type register struct { + Type string + RegistersRange []registerRange + Fields []fieldContainer +} + +type fieldContainer struct { + Measurement string `toml:"measurement"` + Name string `toml:"name"` + ByteOrder string `toml:"byte_order"` + DataType string `toml:"data_type"` + Scale float64 `toml:"scale"` + Address []uint16 `toml:"address"` + value interface{} +} + +type registerRange struct { + address uint16 + length uint16 +} + +const ( + cDiscreteInputs = "discrete_input" + cCoils = "coil" + cHoldingRegisters = "holding_register" + cInputRegisters = "input_register" +) + +const description = `Retrieve data from MODBUS slave devices` +const sampleConfig = ` + ## Connection Configuration + ## + ## The plugin supports connections to PLCs via MODBUS/TCP or + ## via serial line communication in binary (RTU) or readable (ASCII) encoding + ## + ## Device name + name = "Device" + + ## Slave ID - addresses a MODBUS device on the bus + ## Range: 0 - 255 [0 = broadcast; 248 - 255 = reserved] + slave_id = 1 + + ## Timeout for each request + timeout = "1s" + + ## Maximum number of retries and the time to wait between retries + ## when a slave-device is busy. + # busy_retries = 0 + # busy_retries_wait = "100ms" + + # TCP - connect via Modbus/TCP + controller = "tcp://localhost:502" + + ## Serial (RS485; RS232) + # controller = "file:///dev/ttyUSB0" + # baud_rate = 9600 + # data_bits = 8 + # parity = "N" + # stop_bits = 1 + # transmission_mode = "RTU" + + + ## Measurements + ## + + ## Digital Variables, Discrete Inputs and Coils + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## address - variable address + + discrete_inputs = [ + { name = "start", address = [0]}, + { name = "stop", address = [1]}, + { name = "reset", address = [2]}, + { name = "emergency_stop", address = [3]}, + ] + coils = [ + { name = "motor1_run", address = [0]}, + { name = "motor1_jog", address = [1]}, + { name = "motor1_stop", address = [2]}, + ] + + ## Analog Variables, Input Registers and Holding Registers + ## measurement - the (optional) measurement name, defaults to "modbus" + ## name - the variable name + ## byte_order - the ordering of bytes + ## |---AB, ABCD - Big Endian + ## |---BA, DCBA - Little Endian + ## |---BADC - Mid-Big Endian + ## |---CDAB - Mid-Little Endian + ## data_type - INT16, UINT16, INT32, UINT32, INT64, UINT64, FLOAT32, FLOAT32-IEEE (the IEEE 754 binary representation) + ## scale - the final numeric variable representation + ## address - variable address + + holding_registers = [ + { name = "power_factor", byte_order = "AB", data_type = "FLOAT32", scale=0.01, address = [8]}, + { name = "voltage", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [0]}, + { name = "energy", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [5,6]}, + { name = "current", byte_order = "ABCD", data_type = "FLOAT32", scale=0.001, address = [1,2]}, + { name = "frequency", byte_order = "AB", data_type = "FLOAT32", scale=0.1, address = [7]}, + { name = "power", byte_order = "ABCD", data_type = "FLOAT32", scale=0.1, address = [3,4]}, + ] + input_registers = [ + { name = "tank_level", byte_order = "AB", data_type = "INT16", scale=1.0, address = [0]}, + { name = "tank_ph", byte_order = "AB", data_type = "INT16", scale=1.0, address = [1]}, + { name = "pump1_speed", byte_order = "ABCD", data_type = "INT32", scale=1.0, address = [3,4]}, + ] +` + +// SampleConfig returns a basic configuration for the plugin +func (m *Modbus) SampleConfig() string { + return sampleConfig +} + +// Description returns a short description of what the plugin does +func (m *Modbus) Description() string { + return description +} + +func (m *Modbus) Init() error { + //check device name + if m.Name == "" { + return fmt.Errorf("device name is empty") + } + + if m.Retries < 0 { + return fmt.Errorf("retries cannot be negative") + } + + err := m.InitRegister(m.DiscreteInputs, cDiscreteInputs) + if err != nil { + return err + } + + err = m.InitRegister(m.Coils, cCoils) + if err != nil { + return err + } + + err = m.InitRegister(m.HoldingRegisters, cHoldingRegisters) + if err != nil { + return err + } + + err = m.InitRegister(m.InputRegisters, cInputRegisters) + if err != nil { + return err + } + + return nil +} + +func (m *Modbus) InitRegister(fields []fieldContainer, name string) error { + if len(fields) == 0 { + return nil + } + + err := validateFieldContainers(fields, name) + if err != nil { + return err + } + + addrs := []uint16{} + for _, field := range fields { + for _, a := range field.Address { + addrs = append(addrs, a) + } + } + + addrs = removeDuplicates(addrs) + sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] }) + + ii := 0 + var registersRange []registerRange + + // Get range of consecutive integers + // [1, 2, 3, 5, 6, 10, 11, 12, 14] + // (1, 3) , (5, 2) , (10, 3), (14 , 1) + for range addrs { + if ii < len(addrs) { + start := addrs[ii] + end := start + + for ii < len(addrs)-1 && addrs[ii+1]-addrs[ii] == 1 { + end = addrs[ii+1] + ii++ + } + ii++ + registersRange = append(registersRange, registerRange{start, end - start + 1}) + } + } + + m.registers = append(m.registers, register{name, registersRange, fields}) + + return nil +} + +// Connect to a MODBUS Slave device via Modbus/[TCP|RTU|ASCII] +func connect(m *Modbus) error { + u, err := url.Parse(m.Controller) + if err != nil { + return err + } + + switch u.Scheme { + case "tcp": + var host, port string + host, port, err = net.SplitHostPort(u.Host) + if err != nil { + return err + } + m.tcpHandler = mb.NewTCPClientHandler(host + ":" + port) + m.tcpHandler.Timeout = m.Timeout.Duration + m.tcpHandler.SlaveId = byte(m.SlaveID) + m.client = mb.NewClient(m.tcpHandler) + err := m.tcpHandler.Connect() + if err != nil { + return err + } + m.isConnected = true + return nil + case "file": + if m.TransmissionMode == "RTU" { + m.rtuHandler = mb.NewRTUClientHandler(u.Path) + m.rtuHandler.Timeout = m.Timeout.Duration + m.rtuHandler.SlaveId = byte(m.SlaveID) + m.rtuHandler.BaudRate = m.BaudRate + m.rtuHandler.DataBits = m.DataBits + m.rtuHandler.Parity = m.Parity + m.rtuHandler.StopBits = m.StopBits + m.client = mb.NewClient(m.rtuHandler) + err := m.rtuHandler.Connect() + if err != nil { + return err + } + m.isConnected = true + return nil + } else if m.TransmissionMode == "ASCII" { + m.asciiHandler = mb.NewASCIIClientHandler(u.Path) + m.asciiHandler.Timeout = m.Timeout.Duration + m.asciiHandler.SlaveId = byte(m.SlaveID) + m.asciiHandler.BaudRate = m.BaudRate + m.asciiHandler.DataBits = m.DataBits + m.asciiHandler.Parity = m.Parity + m.asciiHandler.StopBits = m.StopBits + m.client = mb.NewClient(m.asciiHandler) + err := m.asciiHandler.Connect() + if err != nil { + return err + } + m.isConnected = true + return nil + } else { + return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) + } + default: + return fmt.Errorf("invalid controller") + } +} + +func disconnect(m *Modbus) error { + u, err := url.Parse(m.Controller) + if err != nil { + return err + } + + switch u.Scheme { + case "tcp": + m.tcpHandler.Close() + return nil + case "file": + if m.TransmissionMode == "RTU" { + m.rtuHandler.Close() + return nil + } else if m.TransmissionMode == "ASCII" { + m.asciiHandler.Close() + return nil + } else { + return fmt.Errorf("invalid protocol '%s' - '%s' ", u.Scheme, m.TransmissionMode) + } + default: + return fmt.Errorf("invalid controller") + } +} + +func validateFieldContainers(t []fieldContainer, n string) error { + nameEncountered := map[string]bool{} + for _, item := range t { + //check empty name + if item.Name == "" { + return fmt.Errorf("empty name in '%s'", n) + } + + //search name duplicate + canonical_name := item.Measurement + "." + item.Name + if nameEncountered[canonical_name] { + return fmt.Errorf("name '%s' is duplicated in measurement '%s' '%s' - '%s'", item.Name, item.Measurement, n, item.Name) + } else { + nameEncountered[canonical_name] = true + } + + if n == cInputRegisters || n == cHoldingRegisters { + // search byte order + switch item.ByteOrder { + case "AB", "BA", "ABCD", "CDAB", "BADC", "DCBA", "ABCDEFGH", "HGFEDCBA", "BADCFEHG", "GHEFCDAB": + break + default: + return fmt.Errorf("invalid byte order '%s' in '%s' - '%s'", item.ByteOrder, n, item.Name) + } + + // search data type + switch item.DataType { + case "UINT16", "INT16", "UINT32", "INT32", "UINT64", "INT64", "FLOAT32-IEEE", "FLOAT32": + break + default: + return fmt.Errorf("invalid data type '%s' in '%s' - '%s'", item.DataType, n, item.Name) + } + + // check scale + if item.Scale == 0.0 { + return fmt.Errorf("invalid scale '%f' in '%s' - '%s'", item.Scale, n, item.Name) + } + } + + // check address + if len(item.Address) != 1 && len(item.Address) != 2 && len(item.Address) != 4 { + return fmt.Errorf("invalid address '%v' length '%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) + } + + if n == cInputRegisters || n == cHoldingRegisters { + if 2*len(item.Address) != len(item.ByteOrder) { + return fmt.Errorf("invalid byte order '%s' and address '%v' in '%s' - '%s'", item.ByteOrder, item.Address, n, item.Name) + } + + // search duplicated + if len(item.Address) > len(removeDuplicates(item.Address)) { + return fmt.Errorf("duplicate address '%v' in '%s' - '%s'", item.Address, n, item.Name) + } + } else if len(item.Address) != 1 { + return fmt.Errorf("invalid address'%v' length'%v' in '%s' - '%s'", item.Address, len(item.Address), n, item.Name) + } + } + return nil +} + +func removeDuplicates(elements []uint16) []uint16 { + encountered := map[uint16]bool{} + result := []uint16{} + + for v := range elements { + if encountered[elements[v]] { + } else { + encountered[elements[v]] = true + result = append(result, elements[v]) + } + } + + return result +} + +func readRegisterValues(m *Modbus, rt string, rr registerRange) ([]byte, error) { + if rt == cDiscreteInputs { + return m.client.ReadDiscreteInputs(uint16(rr.address), uint16(rr.length)) + } else if rt == cCoils { + return m.client.ReadCoils(uint16(rr.address), uint16(rr.length)) + } else if rt == cInputRegisters { + return m.client.ReadInputRegisters(uint16(rr.address), uint16(rr.length)) + } else if rt == cHoldingRegisters { + return m.client.ReadHoldingRegisters(uint16(rr.address), uint16(rr.length)) + } else { + return []byte{}, fmt.Errorf("not Valid function") + } +} + +func (m *Modbus) getFields() error { + for _, register := range m.registers { + rawValues := make(map[uint16][]byte) + bitRawValues := make(map[uint16]uint16) + for _, rr := range register.RegistersRange { + address := rr.address + readValues, err := readRegisterValues(m, register.Type, rr) + if err != nil { + return err + } + + // Raw Values + if register.Type == cDiscreteInputs || register.Type == cCoils { + for _, readValue := range readValues { + for bitPosition := 0; bitPosition < 8; bitPosition++ { + bitRawValues[address] = getBitValue(readValue, bitPosition) + address = address + 1 + if address+1 > rr.length { + break + } + } + } + } + + // Raw Values + if register.Type == cInputRegisters || register.Type == cHoldingRegisters { + batchSize := 2 + for batchSize < len(readValues) { + rawValues[address] = readValues[0:batchSize:batchSize] + address = address + 1 + readValues = readValues[batchSize:] + } + + rawValues[address] = readValues[0:batchSize:batchSize] + } + } + + if register.Type == cDiscreteInputs || register.Type == cCoils { + for i := 0; i < len(register.Fields); i++ { + register.Fields[i].value = bitRawValues[register.Fields[i].Address[0]] + } + } + + if register.Type == cInputRegisters || register.Type == cHoldingRegisters { + for i := 0; i < len(register.Fields); i++ { + var values_t []byte + + for j := 0; j < len(register.Fields[i].Address); j++ { + tempArray := rawValues[register.Fields[i].Address[j]] + for x := 0; x < len(tempArray); x++ { + values_t = append(values_t, tempArray[x]) + } + } + + register.Fields[i].value = convertDataType(register.Fields[i], values_t) + } + + } + } + + return nil +} + +func getBitValue(n byte, pos int) uint16 { + return uint16(n >> uint(pos) & 0x01) +} + +func convertDataType(t fieldContainer, bytes []byte) interface{} { + switch t.DataType { + case "UINT16": + e16 := convertEndianness16(t.ByteOrder, bytes) + return scaleUint16(t.Scale, e16) + case "INT16": + e16 := convertEndianness16(t.ByteOrder, bytes) + f16 := int16(e16) + return scaleInt16(t.Scale, f16) + case "UINT32": + e32 := convertEndianness32(t.ByteOrder, bytes) + return scaleUint32(t.Scale, e32) + case "INT32": + e32 := convertEndianness32(t.ByteOrder, bytes) + f32 := int32(e32) + return scaleInt32(t.Scale, f32) + case "UINT64": + e64 := convertEndianness64(t.ByteOrder, bytes) + f64 := format64(t.DataType, e64).(uint64) + return scaleUint64(t.Scale, f64) + case "INT64": + e64 := convertEndianness64(t.ByteOrder, bytes) + f64 := format64(t.DataType, e64).(int64) + return scaleInt64(t.Scale, f64) + case "FLOAT32-IEEE": + e32 := convertEndianness32(t.ByteOrder, bytes) + f32 := math.Float32frombits(e32) + return scaleFloat32(t.Scale, f32) + case "FLOAT32": + if len(bytes) == 2 { + e16 := convertEndianness16(t.ByteOrder, bytes) + return scale16toFloat32(t.Scale, e16) + } else if len(bytes) == 4 { + e32 := convertEndianness32(t.ByteOrder, bytes) + return scale32toFloat32(t.Scale, e32) + } else { + e64 := convertEndianness64(t.ByteOrder, bytes) + return scale64toFloat32(t.Scale, e64) + } + default: + return 0 + } +} + +func convertEndianness16(o string, b []byte) uint16 { + switch o { + case "AB": + return binary.BigEndian.Uint16(b) + case "BA": + return binary.LittleEndian.Uint16(b) + default: + return 0 + } +} + +func convertEndianness32(o string, b []byte) uint32 { + switch o { + case "ABCD": + return binary.BigEndian.Uint32(b) + case "DCBA": + return binary.LittleEndian.Uint32(b) + case "BADC": + return uint32(binary.LittleEndian.Uint16(b[0:]))<<16 | uint32(binary.LittleEndian.Uint16(b[2:])) + case "CDAB": + return uint32(binary.BigEndian.Uint16(b[2:]))<<16 | uint32(binary.BigEndian.Uint16(b[0:])) + default: + return 0 + } +} + +func convertEndianness64(o string, b []byte) uint64 { + switch o { + case "ABCDEFGH": + return binary.BigEndian.Uint64(b) + case "HGFEDCBA": + return binary.LittleEndian.Uint64(b) + case "BADCFEHG": + return uint64(binary.LittleEndian.Uint16(b[0:]))<<48 | uint64(binary.LittleEndian.Uint16(b[2:]))<<32 | uint64(binary.LittleEndian.Uint16(b[4:]))<<16 | uint64(binary.LittleEndian.Uint16(b[6:])) + case "GHEFCDAB": + return uint64(binary.BigEndian.Uint16(b[6:]))<<48 | uint64(binary.BigEndian.Uint16(b[4:]))<<32 | uint64(binary.BigEndian.Uint16(b[2:]))<<16 | uint64(binary.BigEndian.Uint16(b[0:])) + default: + return 0 + } +} + +func format16(f string, r uint16) interface{} { + switch f { + case "UINT16": + return r + case "INT16": + return int16(r) + default: + return r + } +} + +func format32(f string, r uint32) interface{} { + switch f { + case "UINT32": + return r + case "INT32": + return int32(r) + case "FLOAT32-IEEE": + return math.Float32frombits(r) + default: + return r + } +} + +func format64(f string, r uint64) interface{} { + switch f { + case "UINT64": + return r + case "INT64": + return int64(r) + default: + return r + } +} + +func scale16toFloat32(s float64, v uint16) float64 { + return float64(v) * s +} + +func scale32toFloat32(s float64, v uint32) float64 { + return float64(float64(v) * float64(s)) +} + +func scale64toFloat32(s float64, v uint64) float64 { + return float64(float64(v) * float64(s)) +} + +func scaleInt16(s float64, v int16) int16 { + return int16(float64(v) * s) +} + +func scaleUint16(s float64, v uint16) uint16 { + return uint16(float64(v) * s) +} + +func scaleUint32(s float64, v uint32) uint32 { + return uint32(float64(v) * float64(s)) +} + +func scaleInt32(s float64, v int32) int32 { + return int32(float64(v) * float64(s)) +} + +func scaleFloat32(s float64, v float32) float32 { + return float32(float64(v) * s) +} + +func scaleUint64(s float64, v uint64) uint64 { + return uint64(float64(v) * float64(s)) +} + +func scaleInt64(s float64, v int64) int64 { + return int64(float64(v) * float64(s)) +} + +// Gather implements the telegraf plugin interface method for data accumulation +func (m *Modbus) Gather(acc telegraf.Accumulator) error { + if !m.isConnected { + err := connect(m) + if err != nil { + m.isConnected = false + return err + } + } + + timestamp := time.Now() + for retry := 0; retry <= m.Retries; retry += 1 { + timestamp = time.Now() + err := m.getFields() + if err != nil { + mberr, ok := err.(*mb.ModbusError) + if ok && mberr.ExceptionCode == mb.ExceptionCodeServerDeviceBusy && retry < m.Retries { + log.Printf("I! [inputs.modbus] device busy! Retrying %d more time(s)...", m.Retries-retry) + time.Sleep(m.RetriesWaitTime.Duration) + continue + } + disconnect(m) + m.isConnected = false + return err + } + // Reading was successful, leave the retry loop + break + } + + grouper := metric.NewSeriesGrouper() + for _, reg := range m.registers { + tags := map[string]string{ + "name": m.Name, + "type": reg.Type, + } + + for _, field := range reg.Fields { + // In case no measurement was specified we use "modbus" as default + measurement := "modbus" + if field.Measurement != "" { + measurement = field.Measurement + } + + // Group the data by series + grouper.Add(measurement, tags, timestamp, field.Name, field.value) + } + + // Add the metrics grouped by series to the accumulator + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } + } + + return nil +} + +// Add this plugin to telegraf +func init() { + inputs.Add("modbus", func() telegraf.Input { return &Modbus{} }) +} diff --git a/plugins/inputs/modbus/modbus_test.go b/plugins/inputs/modbus/modbus_test.go new file mode 100644 index 000000000..97265769d --- /dev/null +++ b/plugins/inputs/modbus/modbus_test.go @@ -0,0 +1,624 @@ +package modbus + +import ( + "testing" + + m "github.com/goburrow/modbus" + "github.com/stretchr/testify/assert" + "github.com/tbrandon/mbserver" + + "github.com/influxdata/telegraf/testutil" +) + +func TestCoils(t *testing.T) { + var coilTests = []struct { + name string + address uint16 + quantity uint16 + write []byte + read uint16 + }{ + { + name: "coil0_turn_off", + address: 0, + quantity: 1, + write: []byte{0x00}, + read: 0, + }, + { + name: "coil0_turn_on", + address: 0, + quantity: 1, + write: []byte{0x01}, + read: 1, + }, + { + name: "coil1_turn_on", + address: 1, + quantity: 1, + write: []byte{0x01}, + read: 1, + }, + { + name: "coil2_turn_on", + address: 2, + quantity: 1, + write: []byte{0x01}, + read: 1, + }, + { + name: "coil3_turn_on", + address: 3, + quantity: 1, + write: []byte{0x01}, + read: 1, + }, + { + name: "coil1_turn_off", + address: 1, + quantity: 1, + write: []byte{0x00}, + read: 0, + }, + { + name: "coil2_turn_off", + address: 2, + quantity: 1, + write: []byte{0x00}, + read: 0, + }, + { + name: "coil3_turn_off", + address: 3, + quantity: 1, + write: []byte{0x00}, + read: 0, + }, + } + + serv := mbserver.NewServer() + err := serv.ListenTCP("localhost:1502") + defer serv.Close() + assert.NoError(t, err) + + handler := m.NewTCPClientHandler("localhost:1502") + err = handler.Connect() + assert.NoError(t, err) + defer handler.Close() + client := m.NewClient(handler) + + for _, ct := range coilTests { + t.Run(ct.name, func(t *testing.T) { + _, err = client.WriteMultipleCoils(ct.address, ct.quantity, ct.write) + assert.NoError(t, err) + + modbus := Modbus{ + Name: "TestCoils", + Controller: "tcp://localhost:1502", + SlaveID: 1, + Coils: []fieldContainer{ + { + Name: ct.name, + Address: []uint16{ct.address}, + }, + }, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + err = modbus.Gather(&acc) + assert.NoError(t, err) + assert.NotEmpty(t, modbus.registers) + + for _, coil := range modbus.registers { + assert.Equal(t, ct.read, coil.Fields[0].value) + } + }) + } +} + +func TestHoldingRegisters(t *testing.T) { + var holdingRegisterTests = []struct { + name string + address []uint16 + quantity uint16 + byteOrder string + dataType string + scale float64 + write []byte + read interface{} + }{ + { + name: "register0_ab_float32", + address: []uint16{0}, + quantity: 1, + byteOrder: "AB", + dataType: "FLOAT32", + scale: 0.1, + write: []byte{0x08, 0x98}, + read: float64(220), + }, + { + name: "register0_register1_ab_float32", + address: []uint16{0, 1}, + quantity: 2, + byteOrder: "ABCD", + dataType: "FLOAT32", + scale: 0.001, + write: []byte{0x00, 0x00, 0x03, 0xE8}, + read: float64(1), + }, + { + name: "register1_register2_abcd_float32", + address: []uint16{1, 2}, + quantity: 2, + byteOrder: "ABCD", + dataType: "FLOAT32", + scale: 0.1, + write: []byte{0x00, 0x00, 0x08, 0x98}, + read: float64(220), + }, + { + name: "register3_register4_abcd_float32", + address: []uint16{3, 4}, + quantity: 2, + byteOrder: "ABCD", + dataType: "FLOAT32", + scale: 0.1, + write: []byte{0x00, 0x00, 0x08, 0x98}, + read: float64(220), + }, + { + name: "register7_ab_float32", + address: []uint16{7}, + quantity: 1, + byteOrder: "AB", + dataType: "FLOAT32", + scale: 0.1, + write: []byte{0x01, 0xF4}, + read: float64(50), + }, + { + name: "register10_ab_uint16", + address: []uint16{10}, + quantity: 1, + byteOrder: "AB", + dataType: "UINT16", + scale: 1, + write: []byte{0xAB, 0xCD}, + read: uint16(43981), + }, + { + name: "register10_ab_uint16-scale_.1", + address: []uint16{10}, + quantity: 1, + byteOrder: "AB", + dataType: "UINT16", + scale: .1, + write: []byte{0xAB, 0xCD}, + read: uint16(4398), + }, + { + name: "register10_ab_uint16_scale_10", + address: []uint16{10}, + quantity: 1, + byteOrder: "AB", + dataType: "UINT16", + scale: 10, + write: []byte{0x00, 0x2A}, + read: uint16(420), + }, + { + name: "register20_ba_uint16", + address: []uint16{20}, + quantity: 1, + byteOrder: "BA", + dataType: "UINT16", + scale: 1, + write: []byte{0xAB, 0xCD}, + read: uint16(52651), + }, + { + name: "register30_ab_int16", + address: []uint16{20}, + quantity: 1, + byteOrder: "AB", + dataType: "INT16", + scale: 1, + write: []byte{0xAB, 0xCD}, + read: int16(-21555), + }, + { + name: "register40_ba_int16", + address: []uint16{40}, + quantity: 1, + byteOrder: "BA", + dataType: "INT16", + scale: 1, + write: []byte{0xAB, 0xCD}, + read: int16(-12885), + }, + { + name: "register50_register51_abcd_int32_scaled", + address: []uint16{50, 51}, + quantity: 2, + byteOrder: "ABCD", + dataType: "INT32", + scale: 10, + write: []byte{0x00, 0x00, 0xAB, 0xCD}, + read: int32(439810), + }, + { + name: "register50_register51_abcd_int32", + address: []uint16{50, 51}, + quantity: 2, + byteOrder: "ABCD", + dataType: "INT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: int32(-1430532899), + }, + { + name: "register60_register61_dcba_int32", + address: []uint16{60, 61}, + quantity: 2, + byteOrder: "DCBA", + dataType: "INT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: int32(-573785174), + }, + { + name: "register70_register71_badc_int32", + address: []uint16{70, 71}, + quantity: 2, + byteOrder: "BADC", + dataType: "INT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: int32(-1146430004), + }, + { + name: "register80_register81_cdab_int32", + address: []uint16{80, 81}, + quantity: 2, + byteOrder: "CDAB", + dataType: "INT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: int32(-857888069), + }, + { + name: "register90_register91_abcd_uint32", + address: []uint16{90, 91}, + quantity: 2, + byteOrder: "ABCD", + dataType: "UINT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: uint32(2864434397), + }, + { + name: "register100_register101_dcba_uint32", + address: []uint16{100, 101}, + quantity: 2, + byteOrder: "DCBA", + dataType: "UINT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: uint32(3721182122), + }, + { + name: "register110_register111_badc_uint32", + address: []uint16{110, 111}, + quantity: 2, + byteOrder: "BADC", + dataType: "UINT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: uint32(3148537292), + }, + { + name: "register120_register121_cdab_uint32", + address: []uint16{120, 121}, + quantity: 2, + byteOrder: "CDAB", + dataType: "UINT32", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: uint32(3437079227), + }, + { + name: "register130_register131_abcd_float32_ieee", + address: []uint16{130, 131}, + quantity: 2, + byteOrder: "ABCD", + dataType: "FLOAT32-IEEE", + scale: 1, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: float32(-3.3360025e-13), + }, + { + name: "register130_register131_abcd_float32_ieee_scaled", + address: []uint16{130, 131}, + quantity: 2, + byteOrder: "ABCD", + dataType: "FLOAT32-IEEE", + scale: 10, + write: []byte{0xAA, 0xBB, 0xCC, 0xDD}, + read: float32(-3.3360025e-12), + }, + { + name: "register140_to_register143_abcdefgh_int64_scaled", + address: []uint16{140, 141, 142, 143}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "INT64", + scale: 10, + write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD}, + read: int64(10995116717570), + }, + { + name: "register140_to_register143_abcdefgh_int64", + address: []uint16{140, 141, 142, 143}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "INT64", + scale: 1, + write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD}, + read: int64(1099511671757), + }, + { + name: "register150_to_register153_hgfedcba_int64", + address: []uint16{150, 151, 152, 153}, + quantity: 4, + byteOrder: "HGFEDCBA", + dataType: "INT64", + scale: 1, + write: []byte{0x84, 0xF6, 0x45, 0xF9, 0xBC, 0xFE, 0xFF, 0xFF}, + read: int64(-1387387292028), + }, + { + name: "register160_to_register163_badcfehg_int64", + address: []uint16{160, 161, 162, 163}, + quantity: 4, + byteOrder: "BADCFEHG", + dataType: "INT64", + scale: 1, + write: []byte{0xFF, 0xFF, 0xBC, 0xFE, 0x45, 0xF9, 0x84, 0xF6}, + read: int64(-1387387292028), + }, + { + name: "register170_to_register173_ghefcdab_int64", + address: []uint16{170, 171, 172, 173}, + quantity: 4, + byteOrder: "GHEFCDAB", + dataType: "INT64", + scale: 1, + write: []byte{0xF6, 0x84, 0xF9, 0x45, 0xFE, 0xBC, 0xFF, 0xFF}, + read: int64(-1387387292028), + }, + { + name: "register180_to_register183_abcdefgh_uint64_scaled", + address: []uint16{180, 181, 182, 183}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "UINT64", + scale: 10, + write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD}, + read: uint64(10995116717570), + }, + { + name: "register180_to_register183_abcdefgh_uint64", + address: []uint16{180, 181, 182, 183}, + quantity: 4, + byteOrder: "ABCDEFGH", + dataType: "UINT64", + scale: 1, + write: []byte{0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0xAB, 0xCD}, + read: uint64(1099511671757), + }, + { + name: "register190_to_register193_hgfedcba_uint64", + address: []uint16{190, 191, 192, 193}, + quantity: 4, + byteOrder: "HGFEDCBA", + dataType: "UINT64", + scale: 1, + write: []byte{0x84, 0xF6, 0x45, 0xF9, 0xBC, 0xFE, 0xFF, 0xFF}, + read: uint64(18446742686322259968), + }, + { + name: "register200_to_register203_badcfehg_uint64", + address: []uint16{200, 201, 202, 203}, + quantity: 4, + byteOrder: "BADCFEHG", + dataType: "UINT64", + scale: 1, + write: []byte{0xFF, 0xFF, 0xBC, 0xFE, 0x45, 0xF9, 0x84, 0xF6}, + read: uint64(18446742686322259968), + }, + { + name: "register210_to_register213_ghefcdab_uint64", + address: []uint16{210, 211, 212, 213}, + quantity: 4, + byteOrder: "GHEFCDAB", + dataType: "UINT64", + scale: 1, + write: []byte{0xF6, 0x84, 0xF9, 0x45, 0xFE, 0xBC, 0xFF, 0xFF}, + read: uint64(18446742686322259968), + }, + } + + serv := mbserver.NewServer() + err := serv.ListenTCP("localhost:1502") + defer serv.Close() + assert.NoError(t, err) + + handler := m.NewTCPClientHandler("localhost:1502") + err = handler.Connect() + assert.NoError(t, err) + defer handler.Close() + client := m.NewClient(handler) + + for _, hrt := range holdingRegisterTests { + t.Run(hrt.name, func(t *testing.T) { + _, err = client.WriteMultipleRegisters(hrt.address[0], hrt.quantity, hrt.write) + assert.NoError(t, err) + + modbus := Modbus{ + Name: "TestHoldingRegisters", + Controller: "tcp://localhost:1502", + SlaveID: 1, + HoldingRegisters: []fieldContainer{ + { + Name: hrt.name, + ByteOrder: hrt.byteOrder, + DataType: hrt.dataType, + Scale: hrt.scale, + Address: hrt.address, + }, + }, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + modbus.Gather(&acc) + assert.NotEmpty(t, modbus.registers) + + for _, coil := range modbus.registers { + assert.Equal(t, hrt.read, coil.Fields[0].value) + } + }) + } +} + +func TestRetrySuccessful(t *testing.T) { + retries := 0 + maxretries := 2 + value := 1 + + serv := mbserver.NewServer() + err := serv.ListenTCP("localhost:1502") + assert.NoError(t, err) + defer serv.Close() + + // Make read on coil-registers fail for some trials by making the device + // to appear busy + serv.RegisterFunctionHandler(1, + func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) { + data := make([]byte, 2) + data[0] = byte(1) + data[1] = byte(value) + + except := &mbserver.SlaveDeviceBusy + if retries >= maxretries { + except = &mbserver.Success + } + retries += 1 + + return data, except + }) + + t.Run("retry_success", func(t *testing.T) { + modbus := Modbus{ + Name: "TestRetry", + Controller: "tcp://localhost:1502", + SlaveID: 1, + Retries: maxretries, + Coils: []fieldContainer{ + { + Name: "retry_success", + Address: []uint16{0}, + }, + }, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + err = modbus.Gather(&acc) + assert.NoError(t, err) + assert.NotEmpty(t, modbus.registers) + + for _, coil := range modbus.registers { + assert.Equal(t, uint16(value), coil.Fields[0].value) + } + }) +} + +func TestRetryFail(t *testing.T) { + maxretries := 2 + + serv := mbserver.NewServer() + err := serv.ListenTCP("localhost:1502") + assert.NoError(t, err) + defer serv.Close() + + // Make the read on coils fail with busy + serv.RegisterFunctionHandler(1, + func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) { + data := make([]byte, 2) + data[0] = byte(1) + data[1] = byte(0) + + return data, &mbserver.SlaveDeviceBusy + }) + + t.Run("retry_fail", func(t *testing.T) { + modbus := Modbus{ + Name: "TestRetryFail", + Controller: "tcp://localhost:1502", + SlaveID: 1, + Retries: maxretries, + Coils: []fieldContainer{ + { + Name: "retry_fail", + Address: []uint16{0}, + }, + }, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + err = modbus.Gather(&acc) + assert.Error(t, err) + }) + + // Make the read on coils fail with illegal function preventing retry + counter := 0 + serv.RegisterFunctionHandler(1, + func(s *mbserver.Server, frame mbserver.Framer) ([]byte, *mbserver.Exception) { + counter += 1 + data := make([]byte, 2) + data[0] = byte(1) + data[1] = byte(0) + + return data, &mbserver.IllegalFunction + }) + + t.Run("retry_fail", func(t *testing.T) { + modbus := Modbus{ + Name: "TestRetryFail", + Controller: "tcp://localhost:1502", + SlaveID: 1, + Retries: maxretries, + Coils: []fieldContainer{ + { + Name: "retry_fail", + Address: []uint16{0}, + }, + }, + } + + err = modbus.Init() + assert.NoError(t, err) + var acc testutil.Accumulator + err = modbus.Gather(&acc) + assert.Error(t, err) + assert.Equal(t, counter, 1) + }) +} diff --git a/plugins/inputs/mongodb/README.md b/plugins/inputs/mongodb/README.md index 96852d724..cce93dc07 100644 --- a/plugins/inputs/mongodb/README.md +++ b/plugins/inputs/mongodb/README.md @@ -11,9 +11,21 @@ ## mongodb://10.10.3.33:18832, servers = ["mongodb://127.0.0.1:27017"] + ## When true, collect cluster status. + ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which + ## may have an impact on performance. + # gather_cluster_status = true + ## When true, collect per database stats # gather_perdb_stats = false + ## When true, collect per collection stats + # gather_col_stats = false + + ## List of db where collections stats are collected + ## If empty, all db are concerned + # col_stats_dbs = ["local"] + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -39,65 +51,181 @@ Telegraf logs similar to: Error in input [mongodb]: not authorized on admin to execute command { serverStatus: 1, recordStats: 0 } ``` +Some permission related errors are logged at debug level, you can check these +messages by setting `debug = true` in the agent section of the configuration or +by running Telegraf with the `--debug` argument. + ### Metrics: - mongodb - tags: - hostname + - node_type + - rs_name - fields: - active_reads (integer) - active_writes (integer) - - commands_per_sec (integer) - - cursor_timed_out (integer) - - cursor_no_timeout (integer) - - cursor_pinned (integer) - - cursor_total (integer) - - deletes_per_sec (integer) - - flushes_per_sec (integer) - - getmores_per_sec (integer) - - inserts_per_sec (integer) + - aggregate_command_failed (integer) + - aggregate_command_total (integer) + - assert_msg (integer) + - assert_regular (integer) + - assert_rollovers (integer) + - assert_user (integer) + - assert_warning (integer) + - available_reads (integer) + - available_writes (integer) + - commands (integer) + - connections_available (integer) + - connections_current (integer) + - connections_total_created (integer) + - count_command_failed (integer) + - count_command_total (integer) + - cursor_no_timeout_count (integer) + - cursor_pinned_count (integer) + - cursor_timed_out_count (integer) + - cursor_total_count (integer) + - delete_command_failed (integer) + - delete_command_total (integer) + - deletes (integer) + - distinct_command_failed (integer) + - distinct_command_total (integer) + - document_deleted (integer) + - document_inserted (integer) + - document_returned (integer) + - document_updated (integer) + - find_and_modify_command_failed (integer) + - find_and_modify_command_total (integer) + - find_command_failed (integer) + - find_command_total (integer) + - flushes (integer) + - flushes_total_time_ns (integer) + - get_more_command_failed (integer) + - get_more_command_total (integer) + - getmores (integer) + - insert_command_failed (integer) + - insert_command_total (integer) + - inserts (integer) - jumbo_chunks (integer) + - latency_commands_count (integer) + - latency_commands (integer) + - latency_reads_count (integer) + - latency_reads (integer) + - latency_writes_count (integer) + - latency_writes (integer) - member_status (string) - - net_in_bytes (integer) - - net_out_bytes (integer) + - net_in_bytes_count (integer) + - net_out_bytes_count (integer) - open_connections (integer) + - operation_scan_and_order (integer) + - operation_write_conflicts (integer) + - page_faults (integer) - percent_cache_dirty (float) - percent_cache_used (float) - - queries_per_sec (integer) + - queries (integer) - queued_reads (integer) - queued_writes (integer) - - repl_commands_per_sec (integer) - - repl_deletes_per_sec (integer) - - repl_getmores_per_sec (integer) - - repl_inserts_per_sec (integer) + - repl_apply_batches_num (integer) + - repl_apply_batches_total_millis (integer) + - repl_apply_ops (integer) + - repl_buffer_count (integer) + - repl_buffer_size_bytes (integer) + - repl_commands (integer) + - repl_deletes (integer) + - repl_executor_pool_in_progress_count (integer) + - repl_executor_queues_network_in_progress (integer) + - repl_executor_queues_sleepers (integer) + - repl_executor_unsignaled_events (integer) + - repl_getmores (integer) + - repl_inserts (integer) - repl_lag (integer) - - repl_queries_per_sec (integer) - - repl_updates_per_sec (integer) + - repl_network_bytes (integer) + - repl_network_getmores_num (integer) + - repl_network_getmores_total_millis (integer) + - repl_network_ops (integer) + - repl_queries (integer) + - repl_updates (integer) - repl_oplog_window_sec (integer) + - repl_state (integer) - resident_megabytes (integer) - state (string) + - storage_freelist_search_bucket_exhausted (integer) + - storage_freelist_search_requests (integer) + - storage_freelist_search_scanned (integer) + - tcmalloc_central_cache_free_bytes (integer) + - tcmalloc_current_allocated_bytes (integer) + - tcmalloc_current_total_thread_cache_bytes (integer) + - tcmalloc_heap_size (integer) + - tcmalloc_max_total_thread_cache_bytes (integer) + - tcmalloc_pageheap_commit_count (integer) + - tcmalloc_pageheap_committed_bytes (integer) + - tcmalloc_pageheap_decommit_count (integer) + - tcmalloc_pageheap_free_bytes (integer) + - tcmalloc_pageheap_reserve_count (integer) + - tcmalloc_pageheap_scavenge_count (integer) + - tcmalloc_pageheap_total_commit_bytes (integer) + - tcmalloc_pageheap_total_decommit_bytes (integer) + - tcmalloc_pageheap_total_reserve_bytes (integer) + - tcmalloc_pageheap_unmapped_bytes (integer) + - tcmalloc_spinlock_total_delay_ns (integer) + - tcmalloc_thread_cache_free_bytes (integer) + - tcmalloc_total_free_bytes (integer) + - tcmalloc_transfer_cache_free_bytes (integer) - total_available (integer) - total_created (integer) + - total_docs_scanned (integer) - total_in_use (integer) + - total_keys_scanned (integer) - total_refreshing (integer) - - ttl_deletes_per_sec (integer) - - ttl_passes_per_sec (integer) - - updates_per_sec (integer) + - total_tickets_reads (integer) + - total_tickets_writes (integer) + - ttl_deletes (integer) + - ttl_passes (integer) + - update_command_failed (integer) + - update_command_total (integer) + - updates (integer) + - uptime_ns (integer) + - version (string) - vsize_megabytes (integer) - wtcache_app_threads_page_read_count (integer) - wtcache_app_threads_page_read_time (integer) - wtcache_app_threads_page_write_count (integer) - wtcache_bytes_read_into (integer) - wtcache_bytes_written_from (integer) + - wtcache_pages_read_into (integer) + - wtcache_pages_requested_from (integer) - wtcache_current_bytes (integer) - wtcache_max_bytes_configured (integer) + - wtcache_internal_pages_evicted (integer) + - wtcache_modified_pages_evicted (integer) + - wtcache_unmodified_pages_evicted (integer) - wtcache_pages_evicted_by_app_thread (integer) - wtcache_pages_queued_for_eviction (integer) - wtcache_server_evicting_pages (integer) - wtcache_tracked_dirty_bytes (integer) - wtcache_worker_thread_evictingpages (integer) + - commands_per_sec (integer, deprecated in 1.10; use `commands`)) + - cursor_no_timeout (integer, opened/sec, deprecated in 1.10; use `cursor_no_timeout_count`)) + - cursor_pinned (integer, opened/sec, deprecated in 1.10; use `cursor_pinned_count`)) + - cursor_timed_out (integer, opened/sec, deprecated in 1.10; use `cursor_timed_out_count`)) + - cursor_total (integer, opened/sec, deprecated in 1.10; use `cursor_total_count`)) + - deletes_per_sec (integer, deprecated in 1.10; use `deletes`)) + - flushes_per_sec (integer, deprecated in 1.10; use `flushes`)) + - getmores_per_sec (integer, deprecated in 1.10; use `getmores`)) + - inserts_per_sec (integer, deprecated in 1.10; use `inserts`)) + - net_in_bytes (integer, bytes/sec, deprecated in 1.10; use `net_out_bytes_count`)) + - net_out_bytes (integer, bytes/sec, deprecated in 1.10; use `net_out_bytes_count`)) + - queries_per_sec (integer, deprecated in 1.10; use `queries`)) + - repl_commands_per_sec (integer, deprecated in 1.10; use `repl_commands`)) + - repl_deletes_per_sec (integer, deprecated in 1.10; use `repl_deletes`) + - repl_getmores_per_sec (integer, deprecated in 1.10; use `repl_getmores`) + - repl_inserts_per_sec (integer, deprecated in 1.10; use `repl_inserts`)) + - repl_queries_per_sec (integer, deprecated in 1.10; use `repl_queries`)) + - repl_updates_per_sec (integer, deprecated in 1.10; use `repl_updates`)) + - ttl_deletes_per_sec (integer, deprecated in 1.10; use `ttl_deletes`)) + - ttl_passes_per_sec (integer, deprecated in 1.10; use `ttl_passes`)) + - updates_per_sec (integer, deprecated in 1.10; use `updates`)) -- mongodb_db_stats ++ mongodb_db_stats - tags: - db_name - hostname @@ -113,6 +241,20 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta - storage_size (integer) - type (string) +- mongodb_col_stats + - tags: + - hostname + - collection + - db_name + - fields: + - size (integer) + - avg_obj_size (integer) + - storage_size (integer) + - total_index_size (integer) + - ok (integer) + - count (integer) + - type (string) + - mongodb_shard_stats - tags: - hostname @@ -124,7 +266,10 @@ Error in input [mongodb]: not authorized on admin to execute command { serverSta ### Example Output: ``` -mongodb,hostname=127.0.0.1:27017 active_reads=0i,active_writes=0i,commands_per_sec=6i,cursor_no_timeout=0i,cursor_pinned=0i,cursor_timed_out=0i,cursor_total=0i,deletes_per_sec=0i,flushes_per_sec=0i,getmores_per_sec=1i,inserts_per_sec=0i,jumbo_chunks=0i,member_status="PRI",net_in_bytes=851i,net_out_bytes=23904i,open_connections=6i,percent_cache_dirty=0,percent_cache_used=0,queries_per_sec=2i,queued_reads=0i,queued_writes=0i,repl_commands_per_sec=0i,repl_deletes_per_sec=0i,repl_getmores_per_sec=0i,repl_inserts_per_sec=0i,repl_lag=0i,repl_queries_per_sec=0i,repl_updates_per_sec=0i,resident_megabytes=67i,state="PRIMARY",total_available=0i,total_created=0i,total_in_use=0i,total_refreshing=0i,ttl_deletes_per_sec=0i,ttl_passes_per_sec=0i,updates_per_sec=0i,vsize_megabytes=729i,wtcache_app_threads_page_read_count=4i,wtcache_app_threads_page_read_time=18i,wtcache_app_threads_page_write_count=6i,wtcache_bytes_read_into=10075i,wtcache_bytes_written_from=115711i,wtcache_current_bytes=86038i,wtcache_max_bytes_configured=1073741824i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=0i,wtcache_worker_thread_evictingpages=0i 1522798796000000000 -mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=818.625,collections=5i,data_size=6549i,index_size=86016i,indexes=4i,num_extents=0i,objects=8i,ok=1i,storage_size=118784i,type="db_stat" 1522799074000000000 +mongodb,hostname=127.0.0.1:27017 active_reads=3i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=87210i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=0i,assert_warning=0i,available_reads=125i,available_writes=128i,commands=218126i,commands_per_sec=1876i,connections_available=838853i,connections_current=7i,connections_total_created=8i,count_command_failed=0i,count_command_total=7i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=0i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=87190i,document_deleted=0i,document_inserted=0i,document_returned=7i,document_updated=43595i,find_and_modify_command_failed=0i,find_and_modify_command_total=43595i,find_command_failed=0i,find_command_total=348819i,flushes=1i,flushes_per_sec=0i,flushes_total_time_ns=5000000i,get_more_command_failed=0i,get_more_command_total=0i,getmores=7i,getmores_per_sec=1i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=44179i,latency_commands_count=122i,latency_reads=36662189i,latency_reads_count=523229i,latency_writes=6768713i,latency_writes_count=87190i,net_in_bytes=837378i,net_in_bytes_count=97692502i,net_out_bytes=690836i,net_out_bytes_count=75377383i,open_connections=7i,operation_scan_and_order=87193i,operation_write_conflicts=7i,page_faults=0i,percent_cache_dirty=0.9,percent_cache_used=1,queries=348816i,queries_per_sec=2988i,queued_reads=0i,queued_writes=0i,resident_megabytes=77i,storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=280136i,tcmalloc_current_allocated_bytes=77677288i,tcmalloc_current_total_thread_cache_bytes=1222608i,tcmalloc_heap_size=142659584i,tcmalloc_max_total_thread_cache_bytes=260046848i,tcmalloc_pageheap_commit_count=1898i,tcmalloc_pageheap_committed_bytes=130084864i,tcmalloc_pageheap_decommit_count=889i,tcmalloc_pageheap_free_bytes=50610176i,tcmalloc_pageheap_reserve_count=50i,tcmalloc_pageheap_scavenge_count=884i,tcmalloc_pageheap_total_commit_bytes=13021937664i,tcmalloc_pageheap_total_decommit_bytes=12891852800i,tcmalloc_pageheap_total_reserve_bytes=142659584i,tcmalloc_pageheap_unmapped_bytes=12574720i,tcmalloc_spinlock_total_delay_ns=9767500i,tcmalloc_thread_cache_free_bytes=1222608i,tcmalloc_total_free_bytes=1797400i,tcmalloc_transfer_cache_free_bytes=294656i,total_available=0i,total_created=0i,total_docs_scanned=43595i,total_in_use=0i,total_keys_scanned=130805i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=0i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=43595i,updates=43595i,updates_per_sec=372i,uptime_ns=60023000000i,version="3.6.17",vsize_megabytes=1048i,wtcache_app_threads_page_read_count=108i,wtcache_app_threads_page_read_time=25995i,wtcache_app_threads_page_write_count=0i,wtcache_bytes_read_into=2487250i,wtcache_bytes_written_from=74i,wtcache_current_bytes=5014530i,wtcache_internal_pages_evicted=0i,wtcache_max_bytes_configured=505413632i,wtcache_modified_pages_evicted=0i,wtcache_pages_evicted_by_app_thread=0i,wtcache_pages_queued_for_eviction=0i,wtcache_pages_read_into=139i,wtcache_pages_requested_from=699135i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=4797426i,wtcache_unmodified_pages_evicted=0i,wtcache_worker_thread_evictingpages=0i 1586379818000000000 +mongodb,hostname=127.0.0.1:27017,node_type=SEC,rs_name=rs0 active_reads=1i,active_writes=0i,aggregate_command_failed=0i,aggregate_command_total=1i,assert_msg=0i,assert_regular=0i,assert_rollovers=0i,assert_user=79i,assert_warning=0i,available_reads=127i,available_writes=128i,commands=1121855i,commands_per_sec=10i,connections_available=51183i,connections_current=17i,connections_total_created=557i,count_command_failed=0i,count_command_total=46307i,cursor_no_timeout=0i,cursor_no_timeout_count=0i,cursor_pinned=0i,cursor_pinned_count=0i,cursor_timed_out=0i,cursor_timed_out_count=28i,cursor_total=0i,cursor_total_count=0i,delete_command_failed=0i,delete_command_total=0i,deletes=0i,deletes_per_sec=0i,distinct_command_failed=0i,distinct_command_total=0i,document_deleted=0i,document_inserted=0i,document_returned=2248129i,document_updated=0i,find_and_modify_command_failed=0i,find_and_modify_command_total=0i,find_command_failed=2i,find_command_total=8764i,flushes=7850i,flushes_per_sec=0i,flushes_total_time_ns=4535446000000i,get_more_command_failed=0i,get_more_command_total=1993i,getmores=2018i,getmores_per_sec=0i,insert_command_failed=0i,insert_command_total=0i,inserts=0i,inserts_per_sec=0i,jumbo_chunks=0i,latency_commands=112011949i,latency_commands_count=1072472i,latency_reads=1877142443i,latency_reads_count=57086i,latency_writes=0i,latency_writes_count=0i,member_status="SEC",net_in_bytes=1212i,net_in_bytes_count=263928689i,net_out_bytes=41051i,net_out_bytes_count=2475389483i,open_connections=17i,operation_scan_and_order=34i,operation_write_conflicts=0i,page_faults=317i,percent_cache_dirty=1.6,percent_cache_used=73,queries=8764i,queries_per_sec=0i,queued_reads=0i,queued_writes=0i,repl_apply_batches_num=17839419i,repl_apply_batches_total_millis=399929i,repl_apply_ops=23355263i,repl_buffer_count=0i,repl_buffer_size_bytes=0i,repl_commands=11i,repl_commands_per_sec=0i,repl_deletes=440608i,repl_deletes_per_sec=0i,repl_executor_pool_in_progress_count=0i,repl_executor_queues_network_in_progress=0i,repl_executor_queues_sleepers=4i,repl_executor_unsignaled_events=0i,repl_getmores=0i,repl_getmores_per_sec=0i,repl_inserts=1875729i,repl_inserts_per_sec=0i,repl_lag=0i,repl_network_bytes=39122199371i,repl_network_getmores_num=34908797i,repl_network_getmores_total_millis=434805356i,repl_network_ops=23199086i,repl_oplog_window_sec=619292i,repl_queries=0i,repl_queries_per_sec=0i,repl_updates=21034729i,repl_updates_per_sec=38i,repl_state=2,resident_megabytes=6721i,state="SECONDARY",storage_freelist_search_bucket_exhausted=0i,storage_freelist_search_requests=0i,storage_freelist_search_scanned=0i,tcmalloc_central_cache_free_bytes=358512400i,tcmalloc_current_allocated_bytes=5427379424i,tcmalloc_current_total_thread_cache_bytes=70349552i,tcmalloc_heap_size=10199310336i,tcmalloc_max_total_thread_cache_bytes=1073741824i,tcmalloc_pageheap_commit_count=790819i,tcmalloc_pageheap_committed_bytes=7064821760i,tcmalloc_pageheap_decommit_count=533347i,tcmalloc_pageheap_free_bytes=1207816192i,tcmalloc_pageheap_reserve_count=7706i,tcmalloc_pageheap_scavenge_count=426235i,tcmalloc_pageheap_total_commit_bytes=116127649792i,tcmalloc_pageheap_total_decommit_bytes=109062828032i,tcmalloc_pageheap_total_reserve_bytes=10199310336i,tcmalloc_pageheap_unmapped_bytes=3134488576i,tcmalloc_spinlock_total_delay_ns=2518474348i,tcmalloc_thread_cache_free_bytes=70349552i,tcmalloc_total_free_bytes=429626144i,tcmalloc_transfer_cache_free_bytes=764192i,total_available=0i,total_created=0i,total_docs_scanned=735004782i,total_in_use=0i,total_keys_scanned=6188216i,total_refreshing=0i,total_tickets_reads=128i,total_tickets_writes=128i,ttl_deletes=0i,ttl_deletes_per_sec=0i,ttl_passes=7892i,ttl_passes_per_sec=0i,update_command_failed=0i,update_command_total=0i,updates=0i,updates_per_sec=0i,uptime_ns=473590288000000i,version="3.6.17",vsize_megabytes=11136i,wtcache_app_threads_page_read_count=11467625i,wtcache_app_threads_page_read_time=1700336840i,wtcache_app_threads_page_write_count=13268184i,wtcache_bytes_read_into=348022587843i,wtcache_bytes_written_from=322571702254i,wtcache_current_bytes=5509459274i,wtcache_internal_pages_evicted=109108i,wtcache_max_bytes_configured=7547650048i,wtcache_modified_pages_evicted=911196i,wtcache_pages_evicted_by_app_thread=17366i,wtcache_pages_queued_for_eviction=16572754i,wtcache_pages_read_into=11689764i,wtcache_pages_requested_from=499825861i,wtcache_server_evicting_pages=0i,wtcache_tracked_dirty_bytes=117487510i,wtcache_unmodified_pages_evicted=11058458i,wtcache_worker_thread_evictingpages=11907226i 1586379707000000000 +mongodb_db_stats,db_name=admin,hostname=127.0.0.1:27017 avg_obj_size=241,collections=2i,data_size=723i,index_size=49152i,indexes=3i,num_extents=0i,objects=3i,ok=1i,storage_size=53248i,type="db_stat" 1547159491000000000 +mongodb_db_stats,db_name=local,hostname=127.0.0.1:27017 avg_obj_size=813.9705882352941,collections=6i,data_size=55350i,index_size=102400i,indexes=5i,num_extents=0i,objects=68i,ok=1i,storage_size=204800i,type="db_stat" 1547159491000000000 +mongodb_col_stats,collection=foo,db_name=local,hostname=127.0.0.1:27017 size=375005928i,avg_obj_size=5494,type="col_stat",storage_size=249307136i,total_index_size=2138112i,ok=1i,count=68251i 1547159491000000000 mongodb_shard_stats,hostname=127.0.0.1:27017,in_use=3i,available=3i,created=4i,refreshing=0i 1522799074000000000 ``` diff --git a/plugins/inputs/mongodb/mongodb.go b/plugins/inputs/mongodb/mongodb.go index 895667dee..016515ea9 100644 --- a/plugins/inputs/mongodb/mongodb.go +++ b/plugins/inputs/mongodb/mongodb.go @@ -4,7 +4,6 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "log" "net" "net/url" "strings" @@ -18,11 +17,16 @@ import ( ) type MongoDB struct { - Servers []string - Ssl Ssl - mongos map[string]*Server - GatherPerdbStats bool + Servers []string + Ssl Ssl + mongos map[string]*Server + GatherClusterStatus bool + GatherPerdbStats bool + GatherColStats bool + ColStatsDbs []string tlsint.ClientConfig + + Log telegraf.Logger } type Ssl struct { @@ -38,9 +42,21 @@ var sampleConfig = ` ## mongodb://10.10.3.33:18832, servers = ["mongodb://127.0.0.1:27017"] + ## When true, collect cluster status + ## Note that the query that counts jumbo chunks triggers a COLLSCAN, which + ## may have an impact on performance. + # gather_cluster_status = true + ## When true, collect per database stats # gather_perdb_stats = false + ## When true, collect per collection stats + # gather_col_stats = false + + ## List of db where collections stats are collected + ## If empty, all db are concerned + # col_stats_dbs = ["local"] + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -73,24 +89,27 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error { // Preserve backwards compatibility for hostnames without a // scheme, broken in go 1.8. Remove in Telegraf 2.0 serv = "mongodb://" + serv - log.Printf("W! [inputs.mongodb] Using %q as connection URL; please update your configuration to use an URL", serv) + m.Log.Warnf("Using %q as connection URL; please update your configuration to use an URL", serv) m.Servers[i] = serv } u, err := url.Parse(serv) if err != nil { - acc.AddError(fmt.Errorf("Unable to parse address %q: %s", serv, err)) + m.Log.Errorf("Unable to parse address %q: %s", serv, err.Error()) continue } if u.Host == "" { - acc.AddError(fmt.Errorf("Unable to parse address %q", serv)) + m.Log.Errorf("Unable to parse address %q", serv) continue } wg.Add(1) go func(srv *Server) { defer wg.Done() - acc.AddError(m.gatherServer(srv, acc)) + err := m.gatherServer(srv, acc) + if err != nil { + m.Log.Errorf("Error in plugin: %v", err) + } }(m.getMongoServer(u)) } @@ -101,6 +120,7 @@ func (m *MongoDB) Gather(acc telegraf.Accumulator) error { func (m *MongoDB) getMongoServer(url *url.URL) *Server { if _, ok := m.mongos[url.Host]; !ok { m.mongos[url.Host] = &Server{ + Log: m.Log, Url: url, } } @@ -117,8 +137,7 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { } dialInfo, err := mgo.ParseURL(dialAddrs[0]) if err != nil { - return fmt.Errorf("Unable to parse URL (%s), %s\n", - dialAddrs[0], err.Error()) + return fmt.Errorf("unable to parse URL %q: %s", dialAddrs[0], err.Error()) } dialInfo.Direct = true dialInfo.Timeout = 5 * time.Second @@ -160,17 +179,21 @@ func (m *MongoDB) gatherServer(server *Server, acc telegraf.Accumulator) error { sess, err := mgo.DialWithInfo(dialInfo) if err != nil { - return fmt.Errorf("Unable to connect to MongoDB, %s\n", err.Error()) + return fmt.Errorf("unable to connect to MongoDB: %s", err.Error()) } server.Session = sess } - return server.gatherData(acc, m.GatherPerdbStats) + return server.gatherData(acc, m.GatherClusterStatus, m.GatherPerdbStats, m.GatherColStats, m.ColStatsDbs) } func init() { inputs.Add("mongodb", func() telegraf.Input { return &MongoDB{ - mongos: make(map[string]*Server), + mongos: make(map[string]*Server), + GatherClusterStatus: true, + GatherPerdbStats: false, + GatherColStats: false, + ColStatsDbs: []string{"local"}, } }) } diff --git a/plugins/inputs/mongodb/mongodb_data.go b/plugins/inputs/mongodb/mongodb_data.go index 0c69670d5..7659a1a35 100644 --- a/plugins/inputs/mongodb/mongodb_data.go +++ b/plugins/inputs/mongodb/mongodb_data.go @@ -13,6 +13,7 @@ type MongodbData struct { Fields map[string]interface{} Tags map[string]string DbData []DbData + ColData []ColData ShardHostData []DbData } @@ -21,6 +22,12 @@ type DbData struct { Fields map[string]interface{} } +type ColData struct { + Name string + DbName string + Fields map[string]interface{} +} + func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { return &MongodbData{ StatLine: statLine, @@ -31,28 +38,49 @@ func NewMongodbData(statLine *StatLine, tags map[string]string) *MongodbData { } var DefaultStats = map[string]string{ + "uptime_ns": "UptimeNanos", + "inserts": "InsertCnt", "inserts_per_sec": "Insert", + "queries": "QueryCnt", "queries_per_sec": "Query", + "updates": "UpdateCnt", "updates_per_sec": "Update", + "deletes": "DeleteCnt", "deletes_per_sec": "Delete", + "getmores": "GetMoreCnt", "getmores_per_sec": "GetMore", + "commands": "CommandCnt", "commands_per_sec": "Command", + "flushes": "FlushesCnt", "flushes_per_sec": "Flushes", + "flushes_total_time_ns": "FlushesTotalTime", "vsize_megabytes": "Virtual", "resident_megabytes": "Resident", "queued_reads": "QueuedReaders", "queued_writes": "QueuedWriters", "active_reads": "ActiveReaders", "active_writes": "ActiveWriters", + "available_reads": "AvailableReaders", + "available_writes": "AvailableWriters", + "total_tickets_reads": "TotalTicketsReaders", + "total_tickets_writes": "TotalTicketsWriters", + "net_in_bytes_count": "NetInCnt", "net_in_bytes": "NetIn", + "net_out_bytes_count": "NetOutCnt", "net_out_bytes": "NetOut", "open_connections": "NumConnections", + "ttl_deletes": "DeletedDocumentsCnt", "ttl_deletes_per_sec": "DeletedDocuments", + "ttl_passes": "PassesCnt", "ttl_passes_per_sec": "Passes", "cursor_timed_out": "TimedOutC", + "cursor_timed_out_count": "TimedOutCCnt", "cursor_no_timeout": "NoTimeoutC", + "cursor_no_timeout_count": "NoTimeoutCCnt", "cursor_pinned": "PinnedC", + "cursor_pinned_count": "PinnedCCnt", "cursor_total": "TotalC", + "cursor_total_count": "TotalCCnt", "document_deleted": "DeletedD", "document_inserted": "InsertedD", "document_returned": "ReturnedD", @@ -60,19 +88,80 @@ var DefaultStats = map[string]string{ "connections_current": "CurrentC", "connections_available": "AvailableC", "connections_total_created": "TotalCreatedC", + "operation_scan_and_order": "ScanAndOrderOp", + "operation_write_conflicts": "WriteConflictsOp", + "total_keys_scanned": "TotalKeysScanned", + "total_docs_scanned": "TotalObjectsScanned", +} + +var DefaultAssertsStats = map[string]string{ + "assert_regular": "Regular", + "assert_warning": "Warning", + "assert_msg": "Msg", + "assert_user": "User", + "assert_rollovers": "Rollovers", +} + +var DefaultCommandsStats = map[string]string{ + "aggregate_command_total": "AggregateCommandTotal", + "aggregate_command_failed": "AggregateCommandFailed", + "count_command_total": "CountCommandTotal", + "count_command_failed": "CountCommandFailed", + "delete_command_total": "DeleteCommandTotal", + "delete_command_failed": "DeleteCommandFailed", + "distinct_command_total": "DistinctCommandTotal", + "distinct_command_failed": "DistinctCommandFailed", + "find_command_total": "FindCommandTotal", + "find_command_failed": "FindCommandFailed", + "find_and_modify_command_total": "FindAndModifyCommandTotal", + "find_and_modify_command_failed": "FindAndModifyCommandFailed", + "get_more_command_total": "GetMoreCommandTotal", + "get_more_command_failed": "GetMoreCommandFailed", + "insert_command_total": "InsertCommandTotal", + "insert_command_failed": "InsertCommandFailed", + "update_command_total": "UpdateCommandTotal", + "update_command_failed": "UpdateCommandFailed", +} + +var DefaultLatencyStats = map[string]string{ + "latency_writes_count": "WriteOpsCnt", + "latency_writes": "WriteLatency", + "latency_reads_count": "ReadOpsCnt", + "latency_reads": "ReadLatency", + "latency_commands_count": "CommandOpsCnt", + "latency_commands": "CommandLatency", } var DefaultReplStats = map[string]string{ - "repl_inserts_per_sec": "InsertR", - "repl_queries_per_sec": "QueryR", - "repl_updates_per_sec": "UpdateR", - "repl_deletes_per_sec": "DeleteR", - "repl_getmores_per_sec": "GetMoreR", - "repl_commands_per_sec": "CommandR", - "member_status": "NodeType", - "state": "NodeState", - "repl_lag": "ReplLag", - "repl_oplog_window_sec": "OplogTimeDiff", + "repl_inserts": "InsertRCnt", + "repl_inserts_per_sec": "InsertR", + "repl_queries": "QueryRCnt", + "repl_queries_per_sec": "QueryR", + "repl_updates": "UpdateRCnt", + "repl_updates_per_sec": "UpdateR", + "repl_deletes": "DeleteRCnt", + "repl_deletes_per_sec": "DeleteR", + "repl_getmores": "GetMoreRCnt", + "repl_getmores_per_sec": "GetMoreR", + "repl_commands": "CommandRCnt", + "repl_commands_per_sec": "CommandR", + "member_status": "NodeType", + "state": "NodeState", + "repl_state": "NodeStateInt", + "repl_lag": "ReplLag", + "repl_network_bytes": "ReplNetworkBytes", + "repl_network_getmores_num": "ReplNetworkGetmoresNum", + "repl_network_getmores_total_millis": "ReplNetworkGetmoresTotalMillis", + "repl_network_ops": "ReplNetworkOps", + "repl_buffer_count": "ReplBufferCount", + "repl_buffer_size_bytes": "ReplBufferSizeBytes", + "repl_apply_batches_num": "ReplApplyBatchesNum", + "repl_apply_batches_total_millis": "ReplApplyBatchesTotalMillis", + "repl_apply_ops": "ReplApplyOps", + "repl_executor_pool_in_progress_count": "ReplExecutorPoolInProgressCount", + "repl_executor_queues_network_in_progress": "ReplExecutorQueuesNetworkInProgress", + "repl_executor_queues_sleepers": "ReplExecutorQueuesSleepers", + "repl_executor_unsignaled_events": "ReplExecutorUnsignaledEvents", } var DefaultClusterStats = map[string]string{ @@ -96,6 +185,7 @@ var ShardHostStats = map[string]string{ var MmapStats = map[string]string{ "mapped_megabytes": "Mapped", "non-mapped_megabytes": "NonMapped", + "page_faults": "FaultsCnt", "page_faults_per_sec": "Faults", } @@ -115,8 +205,41 @@ var WiredTigerExtStats = map[string]string{ "wtcache_bytes_read_into": "BytesReadInto", "wtcache_pages_evicted_by_app_thread": "PagesEvictedByAppThread", "wtcache_pages_queued_for_eviction": "PagesQueuedForEviction", + "wtcache_pages_read_into": "PagesReadIntoCache", + "wtcache_pages_requested_from": "PagesRequestedFromCache", "wtcache_server_evicting_pages": "ServerEvictingPages", "wtcache_worker_thread_evictingpages": "WorkerThreadEvictingPages", + "wtcache_internal_pages_evicted": "InternalPagesEvicted", + "wtcache_modified_pages_evicted": "ModifiedPagesEvicted", + "wtcache_unmodified_pages_evicted": "UnmodifiedPagesEvicted", +} + +var DefaultTCMallocStats = map[string]string{ + "tcmalloc_current_allocated_bytes": "TCMallocCurrentAllocatedBytes", + "tcmalloc_heap_size": "TCMallocHeapSize", + "tcmalloc_central_cache_free_bytes": "TCMallocCentralCacheFreeBytes", + "tcmalloc_current_total_thread_cache_bytes": "TCMallocCurrentTotalThreadCacheBytes", + "tcmalloc_max_total_thread_cache_bytes": "TCMallocMaxTotalThreadCacheBytes", + "tcmalloc_total_free_bytes": "TCMallocTotalFreeBytes", + "tcmalloc_transfer_cache_free_bytes": "TCMallocTransferCacheFreeBytes", + "tcmalloc_thread_cache_free_bytes": "TCMallocThreadCacheFreeBytes", + "tcmalloc_spinlock_total_delay_ns": "TCMallocSpinLockTotalDelayNanos", + "tcmalloc_pageheap_free_bytes": "TCMallocPageheapFreeBytes", + "tcmalloc_pageheap_unmapped_bytes": "TCMallocPageheapUnmappedBytes", + "tcmalloc_pageheap_committed_bytes": "TCMallocPageheapComittedBytes", + "tcmalloc_pageheap_scavenge_count": "TCMallocPageheapScavengeCount", + "tcmalloc_pageheap_commit_count": "TCMallocPageheapCommitCount", + "tcmalloc_pageheap_total_commit_bytes": "TCMallocPageheapTotalCommitBytes", + "tcmalloc_pageheap_decommit_count": "TCMallocPageheapDecommitCount", + "tcmalloc_pageheap_total_decommit_bytes": "TCMallocPageheapTotalDecommitBytes", + "tcmalloc_pageheap_reserve_count": "TCMallocPageheapReserveCount", + "tcmalloc_pageheap_total_reserve_bytes": "TCMallocPageheapTotalReserveBytes", +} + +var DefaultStorageStats = map[string]string{ + "storage_freelist_search_bucket_exhausted": "StorageFreelistSearchBucketExhausted", + "storage_freelist_search_requests": "StorageFreelistSearchRequests", + "storage_freelist_search_scanned": "StorageFreelistSearchScanned", } var DbDataStats = map[string]string{ @@ -131,6 +254,15 @@ var DbDataStats = map[string]string{ "ok": "Ok", } +var ColDataStats = map[string]string{ + "count": "Count", + "size": "Size", + "avg_obj_size": "AvgObjSize", + "storage_size": "StorageSize", + "total_index_size": "TotalIndexSize", + "ok": "Ok", +} + func (d *MongodbData) AddDbStats() { for _, dbstat := range d.StatLine.DbStatsLines { dbStatLine := reflect.ValueOf(&dbstat).Elem() @@ -147,6 +279,23 @@ func (d *MongodbData) AddDbStats() { } } +func (d *MongodbData) AddColStats() { + for _, colstat := range d.StatLine.ColStatsLines { + colStatLine := reflect.ValueOf(&colstat).Elem() + newColData := &ColData{ + Name: colstat.Name, + DbName: colstat.DbName, + Fields: make(map[string]interface{}), + } + newColData.Fields["type"] = "col_stat" + for key, value := range ColDataStats { + val := colStatLine.FieldByName(value).Interface() + newColData.Fields[key] = val + } + d.ColData = append(d.ColData, *newColData) + } +} + func (d *MongodbData) AddShardHostStats() { for host, hostStat := range d.StatLine.ShardHostStatsLines { hostStatLine := reflect.ValueOf(&hostStat).Elem() @@ -168,9 +317,32 @@ func (d *MongodbData) AddDefaultStats() { d.addStat(statLine, DefaultStats) if d.StatLine.NodeType != "" { d.addStat(statLine, DefaultReplStats) + d.Tags["node_type"] = d.StatLine.NodeType } + + if d.StatLine.ReadLatency > 0 { + d.addStat(statLine, DefaultLatencyStats) + } + + if d.StatLine.ReplSetName != "" { + d.Tags["rs_name"] = d.StatLine.ReplSetName + } + + if d.StatLine.OplogStats != nil { + d.add("repl_oplog_window_sec", d.StatLine.OplogStats.TimeDiff) + } + + if d.StatLine.Version != "" { + d.add("version", d.StatLine.Version) + } + + d.addStat(statLine, DefaultAssertsStats) d.addStat(statLine, DefaultClusterStats) + d.addStat(statLine, DefaultCommandsStats) d.addStat(statLine, DefaultShardStats) + d.addStat(statLine, DefaultStorageStats) + d.addStat(statLine, DefaultTCMallocStats) + if d.StatLine.StorageEngine == "mmapv1" || d.StatLine.StorageEngine == "rocksdb" { d.addStat(statLine, MmapStats) } else if d.StatLine.StorageEngine == "wiredTiger" { @@ -181,6 +353,7 @@ func (d *MongodbData) AddDefaultStats() { d.add(key, floatVal) } d.addStat(statLine, WiredTigerExtStats) + d.add("page_faults", d.StatLine.FaultsCnt) } } @@ -214,6 +387,17 @@ func (d *MongodbData) flush(acc telegraf.Accumulator) { ) db.Fields = make(map[string]interface{}) } + for _, col := range d.ColData { + d.Tags["collection"] = col.Name + d.Tags["db_name"] = col.DbName + acc.AddFields( + "mongodb_col_stats", + col.Fields, + d.Tags, + d.StatLine.Time, + ) + col.Fields = make(map[string]interface{}) + } for _, host := range d.ShardHostData { d.Tags["hostname"] = host.Name acc.AddFields( diff --git a/plugins/inputs/mongodb/mongodb_data_test.go b/plugins/inputs/mongodb/mongodb_data_test.go index 5f4dd4c2c..38e6cd6ad 100644 --- a/plugins/inputs/mongodb/mongodb_data_test.go +++ b/plugins/inputs/mongodb/mongodb_data_test.go @@ -14,37 +14,48 @@ var tags = make(map[string]string) func TestAddNonReplStats(t *testing.T) { d := NewMongodbData( &StatLine{ - StorageEngine: "", - Time: time.Now(), - Insert: 0, - Query: 0, - Update: 0, - Delete: 0, - GetMore: 0, - Command: 0, - Flushes: 0, - Virtual: 0, - Resident: 0, - QueuedReaders: 0, - QueuedWriters: 0, - ActiveReaders: 0, - ActiveWriters: 0, - NetIn: 0, - NetOut: 0, - NumConnections: 0, - Passes: 0, - DeletedDocuments: 0, - TimedOutC: 0, - NoTimeoutC: 0, - PinnedC: 0, - TotalC: 0, - DeletedD: 0, - InsertedD: 0, - ReturnedD: 0, - UpdatedD: 0, - CurrentC: 0, - AvailableC: 0, - TotalCreatedC: 0, + StorageEngine: "", + Time: time.Now(), + UptimeNanos: 0, + Insert: 0, + Query: 0, + Update: 0, + UpdateCnt: 0, + Delete: 0, + GetMore: 0, + Command: 0, + Flushes: 0, + FlushesCnt: 0, + Virtual: 0, + Resident: 0, + QueuedReaders: 0, + QueuedWriters: 0, + ActiveReaders: 0, + ActiveWriters: 0, + AvailableReaders: 0, + AvailableWriters: 0, + TotalTicketsReaders: 0, + TotalTicketsWriters: 0, + NetIn: 0, + NetOut: 0, + NumConnections: 0, + Passes: 0, + DeletedDocuments: 0, + TimedOutC: 0, + NoTimeoutC: 0, + PinnedC: 0, + TotalC: 0, + DeletedD: 0, + InsertedD: 0, + ReturnedD: 0, + UpdatedD: 0, + CurrentC: 0, + AvailableC: 0, + TotalCreatedC: 0, + ScanAndOrderOp: 0, + WriteConflictsOp: 0, + TotalKeysScanned: 0, + TotalObjectsScanned: 0, }, tags, ) @@ -53,8 +64,8 @@ func TestAddNonReplStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key, _ := range DefaultStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range DefaultStats { + assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) } } @@ -74,8 +85,8 @@ func TestAddReplStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key, _ := range MmapStats { - assert.True(t, acc.HasInt64Field("mongodb", key)) + for key := range MmapStats { + assert.True(t, acc.HasInt64Field("mongodb", key), key) } } @@ -97,6 +108,7 @@ func TestAddWiredTigerStats(t *testing.T) { PagesQueuedForEviction: 0, ServerEvictingPages: 0, WorkerThreadEvictingPages: 0, + FaultsCnt: 204, }, tags, ) @@ -106,9 +118,15 @@ func TestAddWiredTigerStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key, _ := range WiredTigerStats { - assert.True(t, acc.HasFloatField("mongodb", key)) + for key := range WiredTigerStats { + assert.True(t, acc.HasFloatField("mongodb", key), key) } + + for key := range WiredTigerExtStats { + assert.True(t, acc.HasFloatField("mongodb", key) || acc.HasInt64Field("mongodb", key), key) + } + + assert.True(t, acc.HasInt64Field("mongodb", "page_faults")) } func TestAddShardStats(t *testing.T) { @@ -127,7 +145,143 @@ func TestAddShardStats(t *testing.T) { d.AddDefaultStats() d.flush(&acc) - for key, _ := range DefaultShardStats { + for key := range DefaultShardStats { + assert.True(t, acc.HasInt64Field("mongodb", key)) + } +} + +func TestAddLatencyStats(t *testing.T) { + d := NewMongodbData( + &StatLine{ + CommandOpsCnt: 73, + CommandLatency: 364, + ReadOpsCnt: 113, + ReadLatency: 201, + WriteOpsCnt: 7, + WriteLatency: 55, + }, + tags, + ) + + var acc testutil.Accumulator + + d.AddDefaultStats() + d.flush(&acc) + + for key := range DefaultLatencyStats { + assert.True(t, acc.HasInt64Field("mongodb", key)) + } +} + +func TestAddAssertsStats(t *testing.T) { + d := NewMongodbData( + &StatLine{ + Regular: 3, + Warning: 9, + Msg: 2, + User: 34, + Rollovers: 0, + }, + tags, + ) + + var acc testutil.Accumulator + + d.AddDefaultStats() + d.flush(&acc) + + for key := range DefaultAssertsStats { + assert.True(t, acc.HasInt64Field("mongodb", key)) + } +} + +func TestAddCommandsStats(t *testing.T) { + d := NewMongodbData( + &StatLine{ + AggregateCommandTotal: 12, + AggregateCommandFailed: 2, + CountCommandTotal: 18, + CountCommandFailed: 5, + DeleteCommandTotal: 73, + DeleteCommandFailed: 364, + DistinctCommandTotal: 87, + DistinctCommandFailed: 19, + FindCommandTotal: 113, + FindCommandFailed: 201, + FindAndModifyCommandTotal: 7, + FindAndModifyCommandFailed: 55, + GetMoreCommandTotal: 4, + GetMoreCommandFailed: 55, + InsertCommandTotal: 34, + InsertCommandFailed: 65, + UpdateCommandTotal: 23, + UpdateCommandFailed: 6, + }, + tags, + ) + + var acc testutil.Accumulator + + d.AddDefaultStats() + d.flush(&acc) + + for key := range DefaultCommandsStats { + assert.True(t, acc.HasInt64Field("mongodb", key)) + } +} + +func TestAddTCMallocStats(t *testing.T) { + d := NewMongodbData( + &StatLine{ + TCMallocCurrentAllocatedBytes: 5877253096, + TCMallocHeapSize: 8067108864, + TCMallocPageheapFreeBytes: 1054994432, + TCMallocPageheapUnmappedBytes: 677859328, + TCMallocMaxTotalThreadCacheBytes: 1073741824, + TCMallocCurrentTotalThreadCacheBytes: 80405312, + TCMallocTotalFreeBytes: 457002008, + TCMallocCentralCacheFreeBytes: 375131800, + TCMallocTransferCacheFreeBytes: 1464896, + TCMallocThreadCacheFreeBytes: 80405312, + TCMallocPageheapComittedBytes: 7389249536, + TCMallocPageheapScavengeCount: 396394, + TCMallocPageheapCommitCount: 641765, + TCMallocPageheapTotalCommitBytes: 102248751104, + TCMallocPageheapDecommitCount: 396394, + TCMallocPageheapTotalDecommitBytes: 94859501568, + TCMallocPageheapReserveCount: 6179, + TCMallocPageheapTotalReserveBytes: 8067108864, + TCMallocSpinLockTotalDelayNanos: 2344453860, + }, + tags, + ) + + var acc testutil.Accumulator + + d.AddDefaultStats() + d.flush(&acc) + + for key := range DefaultTCMallocStats { + assert.True(t, acc.HasInt64Field("mongodb", key)) + } +} + +func TestAddStorageStats(t *testing.T) { + d := NewMongodbData( + &StatLine{ + StorageFreelistSearchBucketExhausted: 0, + StorageFreelistSearchRequests: 0, + StorageFreelistSearchScanned: 0, + }, + tags, + ) + + var acc testutil.Accumulator + + d.AddDefaultStats() + d.flush(&acc) + + for key := range DefaultStorageStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } @@ -156,8 +310,8 @@ func TestAddShardHostStats(t *testing.T) { d.flush(&acc) var hostsFound []string - for host, _ := range hostStatLines { - for key, _ := range ShardHostStats { + for host := range hostStatLines { + for key := range ShardHostStats { assert.True(t, acc.HasInt64Field("mongodb_shard_stats", key)) } @@ -178,61 +332,155 @@ func TestStateTag(t *testing.T) { Query: 0, NodeType: "PRI", NodeState: "PRIMARY", + ReplSetName: "rs1", + Version: "3.6.17", }, tags, ) stateTags := make(map[string]string) + stateTags["node_type"] = "PRI" + stateTags["rs_name"] = "rs1" var acc testutil.Accumulator d.AddDefaultStats() d.flush(&acc) fields := map[string]interface{}{ - "active_reads": int64(0), - "active_writes": int64(0), - "commands_per_sec": int64(0), - "deletes_per_sec": int64(0), - "flushes_per_sec": int64(0), - "getmores_per_sec": int64(0), - "inserts_per_sec": int64(0), - "member_status": "PRI", - "state": "PRIMARY", - "net_in_bytes": int64(0), - "net_out_bytes": int64(0), - "open_connections": int64(0), - "queries_per_sec": int64(0), - "queued_reads": int64(0), - "queued_writes": int64(0), - "repl_commands_per_sec": int64(0), - "repl_deletes_per_sec": int64(0), - "repl_getmores_per_sec": int64(0), - "repl_inserts_per_sec": int64(0), - "repl_queries_per_sec": int64(0), - "repl_updates_per_sec": int64(0), - "repl_lag": int64(0), - "repl_oplog_window_sec": int64(0), - "resident_megabytes": int64(0), - "updates_per_sec": int64(0), - "vsize_megabytes": int64(0), - "ttl_deletes_per_sec": int64(0), - "ttl_passes_per_sec": int64(0), - "jumbo_chunks": int64(0), - "total_in_use": int64(0), - "total_available": int64(0), - "total_created": int64(0), - "total_refreshing": int64(0), - "cursor_timed_out": int64(0), - "cursor_no_timeout": int64(0), - "cursor_pinned": int64(0), - "cursor_total": int64(0), - "document_deleted": int64(0), - "document_inserted": int64(0), - "document_returned": int64(0), - "document_updated": int64(0), - "connections_current": int64(0), - "connections_available": int64(0), - "connections_total_created": int64(0), + "active_reads": int64(0), + "active_writes": int64(0), + "aggregate_command_failed": int64(0), + "aggregate_command_total": int64(0), + "assert_msg": int64(0), + "assert_regular": int64(0), + "assert_rollovers": int64(0), + "assert_user": int64(0), + "assert_warning": int64(0), + "available_reads": int64(0), + "available_writes": int64(0), + "commands": int64(0), + "commands_per_sec": int64(0), + "connections_available": int64(0), + "connections_current": int64(0), + "connections_total_created": int64(0), + "count_command_failed": int64(0), + "count_command_total": int64(0), + "cursor_no_timeout": int64(0), + "cursor_no_timeout_count": int64(0), + "cursor_pinned": int64(0), + "cursor_pinned_count": int64(0), + "cursor_timed_out": int64(0), + "cursor_timed_out_count": int64(0), + "cursor_total": int64(0), + "cursor_total_count": int64(0), + "delete_command_failed": int64(0), + "delete_command_total": int64(0), + "deletes": int64(0), + "deletes_per_sec": int64(0), + "distinct_command_failed": int64(0), + "distinct_command_total": int64(0), + "document_deleted": int64(0), + "document_inserted": int64(0), + "document_returned": int64(0), + "document_updated": int64(0), + "find_and_modify_command_failed": int64(0), + "find_and_modify_command_total": int64(0), + "find_command_failed": int64(0), + "find_command_total": int64(0), + "flushes": int64(0), + "flushes_per_sec": int64(0), + "flushes_total_time_ns": int64(0), + "get_more_command_failed": int64(0), + "get_more_command_total": int64(0), + "getmores": int64(0), + "getmores_per_sec": int64(0), + "insert_command_failed": int64(0), + "insert_command_total": int64(0), + "inserts": int64(0), + "inserts_per_sec": int64(0), + "jumbo_chunks": int64(0), + "member_status": "PRI", + "net_in_bytes": int64(0), + "net_in_bytes_count": int64(0), + "net_out_bytes": int64(0), + "net_out_bytes_count": int64(0), + "open_connections": int64(0), + "operation_scan_and_order": int64(0), + "operation_write_conflicts": int64(0), + "queries": int64(0), + "queries_per_sec": int64(0), + "queued_reads": int64(0), + "queued_writes": int64(0), + "repl_apply_batches_num": int64(0), + "repl_apply_batches_total_millis": int64(0), + "repl_apply_ops": int64(0), + "repl_buffer_count": int64(0), + "repl_buffer_size_bytes": int64(0), + "repl_commands": int64(0), + "repl_commands_per_sec": int64(0), + "repl_deletes": int64(0), + "repl_deletes_per_sec": int64(0), + "repl_executor_pool_in_progress_count": int64(0), + "repl_executor_queues_network_in_progress": int64(0), + "repl_executor_queues_sleepers": int64(0), + "repl_executor_unsignaled_events": int64(0), + "repl_getmores": int64(0), + "repl_getmores_per_sec": int64(0), + "repl_inserts": int64(0), + "repl_inserts_per_sec": int64(0), + "repl_lag": int64(0), + "repl_network_bytes": int64(0), + "repl_network_getmores_num": int64(0), + "repl_network_getmores_total_millis": int64(0), + "repl_network_ops": int64(0), + "repl_queries": int64(0), + "repl_queries_per_sec": int64(0), + "repl_updates": int64(0), + "repl_updates_per_sec": int64(0), + "repl_state": int64(0), + "resident_megabytes": int64(0), + "state": "PRIMARY", + "storage_freelist_search_bucket_exhausted": int64(0), + "storage_freelist_search_requests": int64(0), + "storage_freelist_search_scanned": int64(0), + "tcmalloc_central_cache_free_bytes": int64(0), + "tcmalloc_current_allocated_bytes": int64(0), + "tcmalloc_current_total_thread_cache_bytes": int64(0), + "tcmalloc_heap_size": int64(0), + "tcmalloc_max_total_thread_cache_bytes": int64(0), + "tcmalloc_pageheap_commit_count": int64(0), + "tcmalloc_pageheap_committed_bytes": int64(0), + "tcmalloc_pageheap_decommit_count": int64(0), + "tcmalloc_pageheap_free_bytes": int64(0), + "tcmalloc_pageheap_reserve_count": int64(0), + "tcmalloc_pageheap_scavenge_count": int64(0), + "tcmalloc_pageheap_total_commit_bytes": int64(0), + "tcmalloc_pageheap_total_decommit_bytes": int64(0), + "tcmalloc_pageheap_total_reserve_bytes": int64(0), + "tcmalloc_pageheap_unmapped_bytes": int64(0), + "tcmalloc_spinlock_total_delay_ns": int64(0), + "tcmalloc_thread_cache_free_bytes": int64(0), + "tcmalloc_total_free_bytes": int64(0), + "tcmalloc_transfer_cache_free_bytes": int64(0), + "total_available": int64(0), + "total_created": int64(0), + "total_docs_scanned": int64(0), + "total_in_use": int64(0), + "total_keys_scanned": int64(0), + "total_refreshing": int64(0), + "total_tickets_reads": int64(0), + "total_tickets_writes": int64(0), + "ttl_deletes": int64(0), + "ttl_deletes_per_sec": int64(0), + "ttl_passes": int64(0), + "ttl_passes_per_sec": int64(0), + "update_command_failed": int64(0), + "update_command_total": int64(0), + "updates": int64(0), + "updates_per_sec": int64(0), + "uptime_ns": int64(0), + "version": "3.6.17", + "vsize_megabytes": int64(0), } acc.AssertContainsTaggedFields(t, "mongodb", fields, stateTags) } diff --git a/plugins/inputs/mongodb/mongodb_server.go b/plugins/inputs/mongodb/mongodb_server.go index 1d9db9181..5af48c10a 100644 --- a/plugins/inputs/mongodb/mongodb_server.go +++ b/plugins/inputs/mongodb/mongodb_server.go @@ -1,8 +1,9 @@ package mongodb import ( - "log" + "fmt" "net/url" + "strings" "time" "github.com/influxdata/telegraf" @@ -14,6 +15,8 @@ type Server struct { Url *url.URL Session *mgo.Session lastResult *MongoStatus + + Log telegraf.Logger } func (s *Server) getDefaultTags() map[string]string { @@ -26,41 +29,20 @@ type oplogEntry struct { Timestamp bson.MongoTimestamp `bson:"ts"` } -func (s *Server) gatherOplogStats() *OplogStats { - stats := &OplogStats{} - localdb := s.Session.DB("local") - - op_first := oplogEntry{} - op_last := oplogEntry{} - query := bson.M{"ts": bson.M{"$exists": true}} - - for _, collection_name := range []string{"oplog.rs", "oplog.$main"} { - if err := localdb.C(collection_name).Find(query).Sort("$natural").Limit(1).One(&op_first); err != nil { - if err == mgo.ErrNotFound { - continue - } - log.Println("E! Error getting first oplog entry (" + err.Error() + ")") - return stats - } - if err := localdb.C(collection_name).Find(query).Sort("-$natural").Limit(1).One(&op_last); err != nil { - if err == mgo.ErrNotFound { - continue - } - log.Println("E! Error getting last oplog entry (" + err.Error() + ")") - return stats - } - } - - op_first_time := time.Unix(int64(op_first.Timestamp>>32), 0) - op_last_time := time.Unix(int64(op_last.Timestamp>>32), 0) - stats.TimeDiff = int64(op_last_time.Sub(op_first_time).Seconds()) - return stats +func IsAuthorization(err error) bool { + return strings.Contains(err.Error(), "not authorized") } -func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error { - s.Session.SetMode(mgo.Eventual, true) - s.Session.SetSocketTimeout(0) - result_server := &ServerStatus{} +func (s *Server) authLog(err error) { + if IsAuthorization(err) { + s.Log.Debug(err.Error()) + } else { + s.Log.Error(err.Error()) + } +} + +func (s *Server) gatherServerStatus() (*ServerStatus, error) { + serverStatus := &ServerStatus{} err := s.Session.DB("admin").Run(bson.D{ { Name: "serverStatus", @@ -70,79 +52,221 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error Name: "recordStats", Value: 0, }, - }, result_server) + }, serverStatus) if err != nil { - return err + return nil, err } - result_repl := &ReplSetStatus{} - // ignore error because it simply indicates that the db is not a member - // in a replica set, which is fine. - _ = s.Session.DB("admin").Run(bson.D{ + return serverStatus, nil +} + +func (s *Server) gatherReplSetStatus() (*ReplSetStatus, error) { + replSetStatus := &ReplSetStatus{} + err := s.Session.DB("admin").Run(bson.D{ { Name: "replSetGetStatus", Value: 1, }, - }, result_repl) + }, replSetStatus) + if err != nil { + return nil, err + } + return replSetStatus, nil +} - jumbo_chunks, _ := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() - - result_cluster := &ClusterStatus{ - JumboChunksCount: int64(jumbo_chunks), +func (s *Server) gatherClusterStatus() (*ClusterStatus, error) { + chunkCount, err := s.Session.DB("config").C("chunks").Find(bson.M{"jumbo": true}).Count() + if err != nil { + return nil, err } - resultShards := &ShardStats{} - err = s.Session.DB("admin").Run(bson.D{ + return &ClusterStatus{ + JumboChunksCount: int64(chunkCount), + }, nil +} + +func (s *Server) gatherShardConnPoolStats() (*ShardStats, error) { + shardStats := &ShardStats{} + err := s.Session.DB("admin").Run(bson.D{ { Name: "shardConnPoolStats", Value: 1, }, - }, &resultShards) + }, &shardStats) if err != nil { - log.Println("E! Error getting database shard stats (" + err.Error() + ")") + return nil, err + } + return shardStats, nil +} + +func (s *Server) gatherDBStats(name string) (*Db, error) { + stats := &DbStatsData{} + err := s.Session.DB(name).Run(bson.D{ + { + Name: "dbStats", + Value: 1, + }, + }, stats) + if err != nil { + return nil, err } - oplogStats := s.gatherOplogStats() + return &Db{ + Name: name, + DbStatsData: stats, + }, nil +} - result_db_stats := &DbStats{} - if gatherDbStats == true { - names := []string{} - names, err = s.Session.DatabaseNames() - if err != nil { - log.Println("E! Error getting database names (" + err.Error() + ")") - } - for _, db_name := range names { - db_stat_line := &DbStatsData{} - err = s.Session.DB(db_name).Run(bson.D{ - { - Name: "dbStats", - Value: 1, - }, - }, db_stat_line) +func (s *Server) getOplogReplLag(collection string) (*OplogStats, error) { + query := bson.M{"ts": bson.M{"$exists": true}} + + var first oplogEntry + err := s.Session.DB("local").C(collection).Find(query).Sort("$natural").Limit(1).One(&first) + if err != nil { + return nil, err + } + + var last oplogEntry + err = s.Session.DB("local").C(collection).Find(query).Sort("-$natural").Limit(1).One(&last) + if err != nil { + return nil, err + } + + firstTime := time.Unix(int64(first.Timestamp>>32), 0) + lastTime := time.Unix(int64(last.Timestamp>>32), 0) + stats := &OplogStats{ + TimeDiff: int64(lastTime.Sub(firstTime).Seconds()), + } + return stats, nil +} + +// The "oplog.rs" collection is stored on all replica set members. +// +// The "oplog.$main" collection is created on the master node of a +// master-slave replicated deployment. As of MongoDB 3.2, master-slave +// replication has been deprecated. +func (s *Server) gatherOplogStats() (*OplogStats, error) { + stats, err := s.getOplogReplLag("oplog.rs") + if err == nil { + return stats, nil + } + + return s.getOplogReplLag("oplog.$main") +} + +func (s *Server) gatherCollectionStats(colStatsDbs []string) (*ColStats, error) { + names, err := s.Session.DatabaseNames() + if err != nil { + return nil, err + } + + results := &ColStats{} + for _, dbName := range names { + if stringInSlice(dbName, colStatsDbs) || len(colStatsDbs) == 0 { + var colls []string + colls, err = s.Session.DB(dbName).CollectionNames() if err != nil { - log.Println("E! Error getting db stats from " + db_name + "(" + err.Error() + ")") + s.Log.Errorf("Error getting collection names: %s", err.Error()) + continue } - db := &Db{ - Name: db_name, - DbStatsData: db_stat_line, + for _, colName := range colls { + colStatLine := &ColStatsData{} + err = s.Session.DB(dbName).Run(bson.D{ + { + Name: "collStats", + Value: colName, + }, + }, colStatLine) + if err != nil { + s.authLog(fmt.Errorf("error getting col stats from %q: %v", colName, err)) + continue + } + collection := &Collection{ + Name: colName, + DbName: dbName, + ColStatsData: colStatLine, + } + results.Collections = append(results.Collections, *collection) } + } + } + return results, nil +} - result_db_stats.Dbs = append(result_db_stats.Dbs, *db) +func (s *Server) gatherData(acc telegraf.Accumulator, gatherClusterStatus bool, gatherDbStats bool, gatherColStats bool, colStatsDbs []string) error { + s.Session.SetMode(mgo.Eventual, true) + s.Session.SetSocketTimeout(0) + + serverStatus, err := s.gatherServerStatus() + if err != nil { + return err + } + + // Get replica set status, an error indicates that the server is not a + // member of a replica set. + replSetStatus, err := s.gatherReplSetStatus() + if err != nil { + s.Log.Debugf("Unable to gather replica set status: %s", err.Error()) + } + + // Gather the oplog if we are a member of a replica set. Non-replica set + // members do not have the oplog collections. + var oplogStats *OplogStats + if replSetStatus != nil { + oplogStats, err = s.gatherOplogStats() + if err != nil { + s.authLog(fmt.Errorf("Unable to get oplog stats: %v", err)) + } + } + + var clusterStatus *ClusterStatus + if gatherClusterStatus { + status, err := s.gatherClusterStatus() + if err != nil { + s.Log.Debugf("Unable to gather cluster status: %s", err.Error()) + } + clusterStatus = status + } + + shardStats, err := s.gatherShardConnPoolStats() + if err != nil { + s.authLog(fmt.Errorf("unable to gather shard connection pool stats: %s", err.Error())) + } + + var collectionStats *ColStats + if gatherColStats { + stats, err := s.gatherCollectionStats(colStatsDbs) + if err != nil { + return err + } + collectionStats = stats + } + + dbStats := &DbStats{} + if gatherDbStats { + names, err := s.Session.DatabaseNames() + if err != nil { + return err + } + + for _, name := range names { + db, err := s.gatherDBStats(name) + if err != nil { + s.Log.Debugf("Error getting db stats from %q: %s", name, err.Error()) + } + dbStats.Dbs = append(dbStats.Dbs, *db) } } result := &MongoStatus{ - ServerStatus: result_server, - ReplSetStatus: result_repl, - ClusterStatus: result_cluster, - DbStats: result_db_stats, - ShardStats: resultShards, + ServerStatus: serverStatus, + ReplSetStatus: replSetStatus, + ClusterStatus: clusterStatus, + DbStats: dbStats, + ColStats: collectionStats, + ShardStats: shardStats, OplogStats: oplogStats, } - defer func() { - s.lastResult = result - }() - result.SampleTime = time.Now() if s.lastResult != nil && result != nil { duration := result.SampleTime.Sub(s.lastResult.SampleTime) @@ -156,8 +280,20 @@ func (s *Server) gatherData(acc telegraf.Accumulator, gatherDbStats bool) error ) data.AddDefaultStats() data.AddDbStats() + data.AddColStats() data.AddShardHostStats() data.flush(acc) } + + s.lastResult = result return nil } + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/plugins/inputs/mongodb/mongodb_server_test.go b/plugins/inputs/mongodb/mongodb_server_test.go index c8ef5f240..91a3c0709 100644 --- a/plugins/inputs/mongodb/mongodb_server_test.go +++ b/plugins/inputs/mongodb/mongodb_server_test.go @@ -35,7 +35,7 @@ func TestAddDefaultStats(t *testing.T) { err = server.gatherData(&acc, false) require.NoError(t, err) - for key, _ := range DefaultStats { + for key := range DefaultStats { assert.True(t, acc.HasInt64Field("mongodb", key)) } } diff --git a/plugins/inputs/mongodb/mongostat.go b/plugins/inputs/mongodb/mongostat.go index dcfd7f89d..70a0edf09 100644 --- a/plugins/inputs/mongodb/mongostat.go +++ b/plugins/inputs/mongodb/mongostat.go @@ -1,7 +1,7 @@ /*** The code contained here came from https://github.com/mongodb/mongo-tools/blob/master/mongostat/stat_types.go and contains modifications so that no other dependency from that project is needed. Other modifications included -removing uneccessary code specific to formatting the output and determine the current state of the database. It +removing unnecessary code specific to formatting the output and determine the current state of the database. It is licensed under Apache Version 2.0, http://www.apache.org/licenses/LICENSE-2.0.html ***/ @@ -34,6 +34,7 @@ type MongoStatus struct { ReplSetStatus *ReplSetStatus ClusterStatus *ClusterStatus DbStats *DbStats + ColStats *ColStats ShardStats *ShardStats OplogStats *OplogStats } @@ -47,7 +48,7 @@ type ServerStatus struct { UptimeMillis int64 `bson:"uptimeMillis"` UptimeEstimate int64 `bson:"uptimeEstimate"` LocalTime time.Time `bson:"localTime"` - Asserts map[string]int64 `bson:"asserts"` + Asserts *AssertsStats `bson:"asserts"` BackgroundFlushing *FlushStats `bson:"backgroundFlushing"` ExtraInfo *ExtraInfo `bson:"extra_info"` Connections *ConnectionStats `bson:"connections"` @@ -57,6 +58,7 @@ type ServerStatus struct { Network *NetworkStats `bson:"network"` Opcounters *OpcountStats `bson:"opcounters"` OpcountersRepl *OpcountStats `bson:"opcountersRepl"` + OpLatencies *OpLatenciesStats `bson:"opLatencies"` RecordStats *DBRecordStats `bson:"recordStats"` Mem *MemStats `bson:"mem"` Repl *ReplStatus `bson:"repl"` @@ -64,6 +66,7 @@ type ServerStatus struct { StorageEngine map[string]string `bson:"storageEngine"` WiredTiger *WiredTiger `bson:"wiredTiger"` Metrics *MetricsStats `bson:"metrics"` + TCMallocStats *TCMallocStats `bson:"tcmalloc"` } // DbStats stores stats from all dbs @@ -92,6 +95,26 @@ type DbStatsData struct { GleStats interface{} `bson:"gleStats"` } +type ColStats struct { + Collections []Collection +} + +type Collection struct { + Name string + DbName string + ColStatsData *ColStatsData +} + +type ColStatsData struct { + Collection string `bson:"ns"` + Count int64 `bson:"count"` + Size int64 `bson:"size"` + AvgObjSize float64 `bson:"avgObjSize"` + StorageSize int64 `bson:"storageSize"` + TotalIndexSize int64 `bson:"totalIndexSize"` + Ok int64 `bson:"ok"` +} + // ClusterStatus stores information related to the whole cluster type ClusterStatus struct { JumboChunksCount int64 @@ -152,7 +175,18 @@ type ConcurrentTransactions struct { } type ConcurrentTransStats struct { - Out int64 `bson:"out"` + Out int64 `bson:"out"` + Available int64 `bson:"available"` + TotalTickets int64 `bson:"totalTickets"` +} + +// AssertsStats stores information related to assertions raised since the MongoDB process started +type AssertsStats struct { + Regular int64 `bson:"regular"` + Warning int64 `bson:"warning"` + Msg int64 `bson:"msg"` + User int64 `bson:"user"` + Rollovers int64 `bson:"rollovers"` } // CacheStats stores cache statistics for WiredTiger. @@ -168,13 +202,19 @@ type CacheStats struct { BytesReadInto int64 `bson:"bytes read into cache"` PagesEvictedByAppThread int64 `bson:"pages evicted by application threads"` PagesQueuedForEviction int64 `bson:"pages queued for eviction"` + PagesReadIntoCache int64 `bson:"pages read into cache"` + PagesRequestedFromCache int64 `bson:"pages requested from the cache"` ServerEvictingPages int64 `bson:"eviction server evicting pages"` WorkerThreadEvictingPages int64 `bson:"eviction worker thread evicting pages"` + InternalPagesEvicted int64 `bson:"internal pages evicted"` + ModifiedPagesEvicted int64 `bson:"modified pages evicted"` + UnmodifiedPagesEvicted int64 `bson:"unmodified pages evicted"` } // TransactionStats stores transaction checkpoints in WiredTiger. type TransactionStats struct { - TransCheckpoints int64 `bson:"transaction checkpoints"` + TransCheckpointsTotalTimeMsecs int64 `bson:"transaction checkpoint total time (msecs)"` + TransCheckpoints int64 `bson:"transaction checkpoints"` } // ReplStatus stores data related to replica sets. @@ -225,7 +265,7 @@ type FlushStats struct { type ConnectionStats struct { Current int64 `bson:"current"` Available int64 `bson:"available"` - TotalCreated int64 `bson:"total_created"` + TotalCreated int64 `bson:"totalCreated"` } // DurTiming stores information related to journaling. @@ -277,7 +317,7 @@ type NetworkStats struct { NumRequests int64 `bson:"numRequests"` } -// OpcountStats stores information related to comamnds and basic CRUD operations. +// OpcountStats stores information related to commands and basic CRUD operations. type OpcountStats struct { Insert int64 `bson:"insert"` Query int64 `bson:"query"` @@ -287,11 +327,29 @@ type OpcountStats struct { Command int64 `bson:"command"` } +// OpLatenciesStats stores information related to operation latencies for the database as a whole +type OpLatenciesStats struct { + Reads *LatencyStats `bson:"reads"` + Writes *LatencyStats `bson:"writes"` + Commands *LatencyStats `bson:"commands"` +} + +// LatencyStats lists total latency in microseconds and count of operations, enabling you to obtain an average +type LatencyStats struct { + Latency int64 `bson:"latency"` + Ops int64 `bson:"ops"` +} + // MetricsStats stores information related to metrics type MetricsStats struct { - TTL *TTLStats `bson:"ttl"` - Cursor *CursorStats `bson:"cursor"` - Document *DocumentStats `bson:"document"` + TTL *TTLStats `bson:"ttl"` + Cursor *CursorStats `bson:"cursor"` + Document *DocumentStats `bson:"document"` + Commands *CommandsStats `bson:"commands"` + Operation *OperationStats `bson:"operation"` + QueryExecutor *QueryExecutorStats `bson:"queryExecutor"` + Repl *ReplStats `bson:"repl"` + Storage *StorageStats `bson:"storage"` } // TTLStats stores information related to documents with a ttl index. @@ -314,6 +372,24 @@ type DocumentStats struct { Updated int64 `bson:"updated"` } +// CommandsStats stores information related to document metrics. +type CommandsStats struct { + Aggregate *CommandsStatsValue `bson:"aggregate"` + Count *CommandsStatsValue `bson:"count"` + Delete *CommandsStatsValue `bson:"delete"` + Distinct *CommandsStatsValue `bson:"distinct"` + Find *CommandsStatsValue `bson:"find"` + FindAndModify *CommandsStatsValue `bson:"findAndModify"` + GetMore *CommandsStatsValue `bson:"getMore"` + Insert *CommandsStatsValue `bson:"insert"` + Update *CommandsStatsValue `bson:"update"` +} + +type CommandsStatsValue struct { + Failed int64 `bson:"failed"` + Total int64 `bson:"total"` +} + // OpenCursorStats stores information related to open cursor metrics type OpenCursorStats struct { NoTimeout int64 `bson:"noTimeout"` @@ -321,6 +397,59 @@ type OpenCursorStats struct { Total int64 `bson:"total"` } +// OperationStats stores information related to query operations +// using special operation types +type OperationStats struct { + ScanAndOrder int64 `bson:"scanAndOrder"` + WriteConflicts int64 `bson:"writeConflicts"` +} + +// QueryExecutorStats stores information related to query execution +type QueryExecutorStats struct { + Scanned int64 `bson:"scanned"` + ScannedObjects int64 `bson:"scannedObjects"` +} + +// ReplStats stores information related to replication process +type ReplStats struct { + Apply *ReplApplyStats `bson:"apply"` + Buffer *ReplBufferStats `bson:"buffer"` + Executor *ReplExecutorStats `bson:"executor,omitempty"` + Network *ReplNetworkStats `bson:"network"` +} + +// ReplApplyStats stores information related to oplog application process +type ReplApplyStats struct { + Batches *BasicStats `bson:"batches"` + Ops int64 `bson:"ops"` +} + +// ReplBufferStats stores information related to oplog buffer +type ReplBufferStats struct { + Count int64 `bson:"count"` + SizeBytes int64 `bson:"sizeBytes"` +} + +// ReplExecutorStats stores information related to replication executor +type ReplExecutorStats struct { + Pool map[string]int64 `bson:"pool"` + Queues map[string]int64 `bson:"queues"` + UnsignaledEvents int64 `bson:"unsignaledEvents"` +} + +// ReplNetworkStats stores information related to network usage by replication process +type ReplNetworkStats struct { + Bytes int64 `bson:"bytes"` + GetMores *BasicStats `bson:"getmores"` + Ops int64 `bson:"ops"` +} + +// BasicStats stores information about an operation +type BasicStats struct { + Num int64 `bson:"num"` + TotalMillis int64 `bson:"totalMillis"` +} + // ReadWriteLockTimes stores time spent holding read/write locks. type ReadWriteLockTimes struct { Read int64 `bson:"R"` @@ -347,6 +476,46 @@ type ExtraInfo struct { PageFaults *int64 `bson:"page_faults"` } +// TCMallocStats stores information related to TCMalloc memory allocator metrics +type TCMallocStats struct { + Generic *GenericTCMAllocStats `bson:"generic"` + TCMalloc *DetailedTCMallocStats `bson:"tcmalloc"` +} + +// GenericTCMAllocStats stores generic TCMalloc memory allocator metrics +type GenericTCMAllocStats struct { + CurrentAllocatedBytes int64 `bson:"current_allocated_bytes"` + HeapSize int64 `bson:"heap_size"` +} + +// DetailedTCMallocStats stores detailed TCMalloc memory allocator metrics +type DetailedTCMallocStats struct { + PageheapFreeBytes int64 `bson:"pageheap_free_bytes"` + PageheapUnmappedBytes int64 `bson:"pageheap_unmapped_bytes"` + MaxTotalThreadCacheBytes int64 `bson:"max_total_thread_cache_bytes"` + CurrentTotalThreadCacheBytes int64 `bson:"current_total_thread_cache_bytes"` + TotalFreeBytes int64 `bson:"total_free_bytes"` + CentralCacheFreeBytes int64 `bson:"central_cache_free_bytes"` + TransferCacheFreeBytes int64 `bson:"transfer_cache_free_bytes"` + ThreadCacheFreeBytes int64 `bson:"thread_cache_free_bytes"` + PageheapComittedBytes int64 `bson:"pageheap_committed_bytes"` + PageheapScavengeCount int64 `bson:"pageheap_scavenge_count"` + PageheapCommitCount int64 `bson:"pageheap_commit_count"` + PageheapTotalCommitBytes int64 `bson:"pageheap_total_commit_bytes"` + PageheapDecommitCount int64 `bson:"pageheap_decommit_count"` + PageheapTotalDecommitBytes int64 `bson:"pageheap_total_decommit_bytes"` + PageheapReserveCount int64 `bson:"pageheap_reserve_count"` + PageheapTotalReserveBytes int64 `bson:"pageheap_total_reserve_bytes"` + SpinLockTotalDelayNanos int64 `bson:"spinlock_total_delay_ns"` +} + +// StorageStats stores information related to record allocations +type StorageStats struct { + FreelistSearchBucketExhausted int64 `bson:"freelist.search.bucketExhausted"` + FreelistSearchRequests int64 `bson:"freelist.search.requests"` + FreelistSearchScanned int64 `bson:"freelist.search.scanned"` +} + // StatHeader describes a single column for mongostat's terminal output, // its formatting, and in which modes it should be displayed. type StatHeader struct { @@ -449,6 +618,9 @@ type StatLine struct { Error error IsMongos bool Host string + Version string + + UptimeNanos int64 // The time at which this StatLine was generated. Time time.Time @@ -457,18 +629,58 @@ type StatLine struct { LastPrinted time.Time // Opcounter fields - Insert, Query, Update, Delete, GetMore, Command int64 + Insert, InsertCnt int64 + Query, QueryCnt int64 + Update, UpdateCnt int64 + Delete, DeleteCnt int64 + GetMore, GetMoreCnt int64 + Command, CommandCnt int64 + + // Asserts fields + Regular int64 + Warning int64 + Msg int64 + User int64 + Rollovers int64 + + // OpLatency fields + WriteOpsCnt int64 + WriteLatency int64 + ReadOpsCnt int64 + ReadLatency int64 + CommandOpsCnt int64 + CommandLatency int64 // TTL fields - Passes, DeletedDocuments int64 + Passes, PassesCnt int64 + DeletedDocuments, DeletedDocumentsCnt int64 // Cursor fields - TimedOutC int64 - NoTimeoutC, PinnedC, TotalC int64 + TimedOutC, TimedOutCCnt int64 + NoTimeoutC, NoTimeoutCCnt int64 + PinnedC, PinnedCCnt int64 + TotalC, TotalCCnt int64 // Document fields DeletedD, InsertedD, ReturnedD, UpdatedD int64 + //Commands fields + AggregateCommandTotal, AggregateCommandFailed int64 + CountCommandTotal, CountCommandFailed int64 + DeleteCommandTotal, DeleteCommandFailed int64 + DistinctCommandTotal, DistinctCommandFailed int64 + FindCommandTotal, FindCommandFailed int64 + FindAndModifyCommandTotal, FindAndModifyCommandFailed int64 + GetMoreCommandTotal, GetMoreCommandFailed int64 + InsertCommandTotal, InsertCommandFailed int64 + UpdateCommandTotal, UpdateCommandFailed int64 + + // Operation fields + ScanAndOrderOp, WriteConflictsOp int64 + + // Query Executor fields + TotalKeysScanned, TotalObjectsScanned int64 + // Connection fields CurrentC, AvailableC, TotalCreatedC int64 @@ -479,7 +691,7 @@ type StatLine struct { CacheDirtyPercent float64 CacheUsedPercent float64 - // Cache ultilization extended (wiredtiger only) + // Cache utilization extended (wiredtiger only) TrackedDirtyBytes int64 CurrentCachedBytes int64 MaxBytesConfigured int64 @@ -490,24 +702,54 @@ type StatLine struct { BytesReadInto int64 PagesEvictedByAppThread int64 PagesQueuedForEviction int64 + PagesReadIntoCache int64 + PagesRequestedFromCache int64 ServerEvictingPages int64 WorkerThreadEvictingPages int64 + InternalPagesEvicted int64 + ModifiedPagesEvicted int64 + UnmodifiedPagesEvicted int64 // Replicated Opcounter fields - InsertR, QueryR, UpdateR, DeleteR, GetMoreR, CommandR int64 - ReplLag int64 - OplogTimeDiff int64 - Flushes int64 - Mapped, Virtual, Resident, NonMapped int64 - Faults int64 - HighestLocked *LockStatus - QueuedReaders, QueuedWriters int64 - ActiveReaders, ActiveWriters int64 - NetIn, NetOut int64 - NumConnections int64 - ReplSetName string - NodeType string - NodeState string + InsertR, InsertRCnt int64 + QueryR, QueryRCnt int64 + UpdateR, UpdateRCnt int64 + DeleteR, DeleteRCnt int64 + GetMoreR, GetMoreRCnt int64 + CommandR, CommandRCnt int64 + ReplLag int64 + OplogStats *OplogStats + Flushes, FlushesCnt int64 + FlushesTotalTime int64 + Mapped, Virtual, Resident, NonMapped int64 + Faults, FaultsCnt int64 + HighestLocked *LockStatus + QueuedReaders, QueuedWriters int64 + ActiveReaders, ActiveWriters int64 + AvailableReaders, AvailableWriters int64 + TotalTicketsReaders, TotalTicketsWriters int64 + NetIn, NetInCnt int64 + NetOut, NetOutCnt int64 + NumConnections int64 + ReplSetName string + NodeType string + NodeState string + NodeStateInt int64 + + // Replicated Metrics fields + ReplNetworkBytes int64 + ReplNetworkGetmoresNum int64 + ReplNetworkGetmoresTotalMillis int64 + ReplNetworkOps int64 + ReplBufferCount int64 + ReplBufferSizeBytes int64 + ReplApplyBatchesNum int64 + ReplApplyBatchesTotalMillis int64 + ReplApplyOps int64 + ReplExecutorPoolInProgressCount int64 + ReplExecutorQueuesNetworkInProgress int64 + ReplExecutorQueuesSleepers int64 + ReplExecutorUnsignaledEvents int64 // Cluster fields JumboChunksCount int64 @@ -515,11 +757,40 @@ type StatLine struct { // DB stats field DbStatsLines []DbStatLine + // Col Stats field + ColStatsLines []ColStatLine + // Shard stats TotalInUse, TotalAvailable, TotalCreated, TotalRefreshing int64 // Shard Hosts stats field ShardHostStatsLines map[string]ShardHostStatLine + + // TCMalloc stats field + TCMallocCurrentAllocatedBytes int64 + TCMallocHeapSize int64 + TCMallocCentralCacheFreeBytes int64 + TCMallocCurrentTotalThreadCacheBytes int64 + TCMallocMaxTotalThreadCacheBytes int64 + TCMallocTotalFreeBytes int64 + TCMallocTransferCacheFreeBytes int64 + TCMallocThreadCacheFreeBytes int64 + TCMallocSpinLockTotalDelayNanos int64 + TCMallocPageheapFreeBytes int64 + TCMallocPageheapUnmappedBytes int64 + TCMallocPageheapComittedBytes int64 + TCMallocPageheapScavengeCount int64 + TCMallocPageheapCommitCount int64 + TCMallocPageheapTotalCommitBytes int64 + TCMallocPageheapDecommitCount int64 + TCMallocPageheapTotalDecommitBytes int64 + TCMallocPageheapReserveCount int64 + TCMallocPageheapTotalReserveBytes int64 + + // Storage stats field + StorageFreelistSearchBucketExhausted int64 + StorageFreelistSearchRequests int64 + StorageFreelistSearchScanned int64 } type DbStatLine struct { @@ -534,6 +805,16 @@ type DbStatLine struct { IndexSize int64 Ok int64 } +type ColStatLine struct { + Name string + DbName string + Count int64 + Size int64 + AvgObjSize float64 + StorageSize int64 + TotalIndexSize int64 + Ok int64 +} type ShardHostStatLine struct { InUse int64 @@ -576,12 +857,12 @@ func computeLockDiffs(prevLocks, curLocks map[string]LockUsage) []LockUsage { return lockUsages } -func diff(newVal, oldVal, sampleTime int64) int64 { +func diff(newVal, oldVal, sampleTime int64) (int64, int64) { d := newVal - oldVal if d < 0 { d = newVal } - return d / sampleTime + return d / sampleTime, newVal } // NewStatLine constructs a StatLine object from two MongoStatus objects. @@ -592,6 +873,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal := &StatLine{ Key: key, Host: newStat.Host, + Version: newStat.Version, Mapped: -1, Virtual: -1, Resident: -1, @@ -599,6 +881,13 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec Faults: -1, } + returnVal.UptimeNanos = 1000 * 1000 * newStat.UptimeMillis + + // set connection info + returnVal.CurrentC = newStat.Connections.Current + returnVal.AvailableC = newStat.Connections.Available + returnVal.TotalCreatedC = newStat.Connections.TotalCreated + // set the storage engine appropriately if newStat.StorageEngine != nil && newStat.StorageEngine["name"] != "" { returnVal.StorageEngine = newStat.StorageEngine["name"] @@ -607,42 +896,176 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if newStat.Opcounters != nil && oldStat.Opcounters != nil { - returnVal.Insert = diff(newStat.Opcounters.Insert, oldStat.Opcounters.Insert, sampleSecs) - returnVal.Query = diff(newStat.Opcounters.Query, oldStat.Opcounters.Query, sampleSecs) - returnVal.Update = diff(newStat.Opcounters.Update, oldStat.Opcounters.Update, sampleSecs) - returnVal.Delete = diff(newStat.Opcounters.Delete, oldStat.Opcounters.Delete, sampleSecs) - returnVal.GetMore = diff(newStat.Opcounters.GetMore, oldStat.Opcounters.GetMore, sampleSecs) - returnVal.Command = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs) + returnVal.Insert, returnVal.InsertCnt = diff(newStat.Opcounters.Insert, oldStat.Opcounters.Insert, sampleSecs) + returnVal.Query, returnVal.QueryCnt = diff(newStat.Opcounters.Query, oldStat.Opcounters.Query, sampleSecs) + returnVal.Update, returnVal.UpdateCnt = diff(newStat.Opcounters.Update, oldStat.Opcounters.Update, sampleSecs) + returnVal.Delete, returnVal.DeleteCnt = diff(newStat.Opcounters.Delete, oldStat.Opcounters.Delete, sampleSecs) + returnVal.GetMore, returnVal.GetMoreCnt = diff(newStat.Opcounters.GetMore, oldStat.Opcounters.GetMore, sampleSecs) + returnVal.Command, returnVal.CommandCnt = diff(newStat.Opcounters.Command, oldStat.Opcounters.Command, sampleSecs) + } + + if newStat.OpLatencies != nil { + if newStat.OpLatencies.Reads != nil { + returnVal.ReadOpsCnt = newStat.OpLatencies.Reads.Ops + returnVal.ReadLatency = newStat.OpLatencies.Reads.Latency + } + if newStat.OpLatencies.Writes != nil { + returnVal.WriteOpsCnt = newStat.OpLatencies.Writes.Ops + returnVal.WriteLatency = newStat.OpLatencies.Writes.Latency + } + if newStat.OpLatencies.Commands != nil { + returnVal.CommandOpsCnt = newStat.OpLatencies.Commands.Ops + returnVal.CommandLatency = newStat.OpLatencies.Commands.Latency + } + } + + if newStat.Asserts != nil { + returnVal.Regular = newStat.Asserts.Regular + returnVal.Warning = newStat.Asserts.Warning + returnVal.Msg = newStat.Asserts.Msg + returnVal.User = newStat.Asserts.User + returnVal.Rollovers = newStat.Asserts.Rollovers + } + + if newStat.TCMallocStats != nil { + if newStat.TCMallocStats.Generic != nil { + returnVal.TCMallocCurrentAllocatedBytes = newStat.TCMallocStats.Generic.CurrentAllocatedBytes + returnVal.TCMallocHeapSize = newStat.TCMallocStats.Generic.HeapSize + } + if newStat.TCMallocStats.TCMalloc != nil { + returnVal.TCMallocCentralCacheFreeBytes = newStat.TCMallocStats.TCMalloc.CentralCacheFreeBytes + returnVal.TCMallocCurrentTotalThreadCacheBytes = newStat.TCMallocStats.TCMalloc.CurrentTotalThreadCacheBytes + returnVal.TCMallocMaxTotalThreadCacheBytes = newStat.TCMallocStats.TCMalloc.MaxTotalThreadCacheBytes + returnVal.TCMallocTransferCacheFreeBytes = newStat.TCMallocStats.TCMalloc.TransferCacheFreeBytes + returnVal.TCMallocThreadCacheFreeBytes = newStat.TCMallocStats.TCMalloc.ThreadCacheFreeBytes + returnVal.TCMallocTotalFreeBytes = newStat.TCMallocStats.TCMalloc.TotalFreeBytes + returnVal.TCMallocSpinLockTotalDelayNanos = newStat.TCMallocStats.TCMalloc.SpinLockTotalDelayNanos + + returnVal.TCMallocPageheapFreeBytes = newStat.TCMallocStats.TCMalloc.PageheapFreeBytes + returnVal.TCMallocPageheapUnmappedBytes = newStat.TCMallocStats.TCMalloc.PageheapUnmappedBytes + returnVal.TCMallocPageheapComittedBytes = newStat.TCMallocStats.TCMalloc.PageheapComittedBytes + returnVal.TCMallocPageheapScavengeCount = newStat.TCMallocStats.TCMalloc.PageheapScavengeCount + returnVal.TCMallocPageheapCommitCount = newStat.TCMallocStats.TCMalloc.PageheapCommitCount + returnVal.TCMallocPageheapTotalCommitBytes = newStat.TCMallocStats.TCMalloc.PageheapTotalCommitBytes + returnVal.TCMallocPageheapDecommitCount = newStat.TCMallocStats.TCMalloc.PageheapDecommitCount + returnVal.TCMallocPageheapTotalDecommitBytes = newStat.TCMallocStats.TCMalloc.PageheapTotalDecommitBytes + returnVal.TCMallocPageheapReserveCount = newStat.TCMallocStats.TCMalloc.PageheapReserveCount + returnVal.TCMallocPageheapTotalReserveBytes = newStat.TCMallocStats.TCMalloc.PageheapTotalReserveBytes + } } if newStat.Metrics != nil && oldStat.Metrics != nil { if newStat.Metrics.TTL != nil && oldStat.Metrics.TTL != nil { - returnVal.Passes = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs) - returnVal.DeletedDocuments = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs) + returnVal.Passes, returnVal.PassesCnt = diff(newStat.Metrics.TTL.Passes, oldStat.Metrics.TTL.Passes, sampleSecs) + returnVal.DeletedDocuments, returnVal.DeletedDocumentsCnt = diff(newStat.Metrics.TTL.DeletedDocuments, oldStat.Metrics.TTL.DeletedDocuments, sampleSecs) } if newStat.Metrics.Cursor != nil && oldStat.Metrics.Cursor != nil { - returnVal.TimedOutC = diff(newStat.Metrics.Cursor.TimedOut, oldStat.Metrics.Cursor.TimedOut, sampleSecs) + returnVal.TimedOutC, returnVal.TimedOutCCnt = diff(newStat.Metrics.Cursor.TimedOut, oldStat.Metrics.Cursor.TimedOut, sampleSecs) if newStat.Metrics.Cursor.Open != nil && oldStat.Metrics.Cursor.Open != nil { - returnVal.NoTimeoutC = diff(newStat.Metrics.Cursor.Open.NoTimeout, oldStat.Metrics.Cursor.Open.NoTimeout, sampleSecs) - returnVal.PinnedC = diff(newStat.Metrics.Cursor.Open.Pinned, oldStat.Metrics.Cursor.Open.Pinned, sampleSecs) - returnVal.TotalC = diff(newStat.Metrics.Cursor.Open.Total, oldStat.Metrics.Cursor.Open.Total, sampleSecs) + returnVal.NoTimeoutC, returnVal.NoTimeoutCCnt = diff(newStat.Metrics.Cursor.Open.NoTimeout, oldStat.Metrics.Cursor.Open.NoTimeout, sampleSecs) + returnVal.PinnedC, returnVal.PinnedCCnt = diff(newStat.Metrics.Cursor.Open.Pinned, oldStat.Metrics.Cursor.Open.Pinned, sampleSecs) + returnVal.TotalC, returnVal.TotalCCnt = diff(newStat.Metrics.Cursor.Open.Total, oldStat.Metrics.Cursor.Open.Total, sampleSecs) } } + if newStat.Metrics.Document != nil { + returnVal.DeletedD = newStat.Metrics.Document.Deleted + returnVal.InsertedD = newStat.Metrics.Document.Inserted + returnVal.ReturnedD = newStat.Metrics.Document.Returned + returnVal.UpdatedD = newStat.Metrics.Document.Updated + } + + if newStat.Metrics.Commands != nil { + if newStat.Metrics.Commands.Aggregate != nil { + returnVal.AggregateCommandTotal = newStat.Metrics.Commands.Aggregate.Total + returnVal.AggregateCommandFailed = newStat.Metrics.Commands.Aggregate.Failed + } + if newStat.Metrics.Commands.Count != nil { + returnVal.CountCommandTotal = newStat.Metrics.Commands.Count.Total + returnVal.CountCommandFailed = newStat.Metrics.Commands.Count.Failed + } + if newStat.Metrics.Commands.Delete != nil { + returnVal.DeleteCommandTotal = newStat.Metrics.Commands.Delete.Total + returnVal.DeleteCommandFailed = newStat.Metrics.Commands.Delete.Failed + } + if newStat.Metrics.Commands.Distinct != nil { + returnVal.DistinctCommandTotal = newStat.Metrics.Commands.Distinct.Total + returnVal.DistinctCommandFailed = newStat.Metrics.Commands.Distinct.Failed + } + if newStat.Metrics.Commands.Find != nil { + returnVal.FindCommandTotal = newStat.Metrics.Commands.Find.Total + returnVal.FindCommandFailed = newStat.Metrics.Commands.Find.Failed + } + if newStat.Metrics.Commands.FindAndModify != nil { + returnVal.FindAndModifyCommandTotal = newStat.Metrics.Commands.FindAndModify.Total + returnVal.FindAndModifyCommandFailed = newStat.Metrics.Commands.FindAndModify.Failed + } + if newStat.Metrics.Commands.GetMore != nil { + returnVal.GetMoreCommandTotal = newStat.Metrics.Commands.GetMore.Total + returnVal.GetMoreCommandFailed = newStat.Metrics.Commands.GetMore.Failed + } + if newStat.Metrics.Commands.Insert != nil { + returnVal.InsertCommandTotal = newStat.Metrics.Commands.Insert.Total + returnVal.InsertCommandFailed = newStat.Metrics.Commands.Insert.Failed + } + if newStat.Metrics.Commands.Update != nil { + returnVal.UpdateCommandTotal = newStat.Metrics.Commands.Update.Total + returnVal.UpdateCommandFailed = newStat.Metrics.Commands.Update.Failed + } + } + + if newStat.Metrics.Operation != nil { + returnVal.ScanAndOrderOp = newStat.Metrics.Operation.ScanAndOrder + returnVal.WriteConflictsOp = newStat.Metrics.Operation.WriteConflicts + } + + if newStat.Metrics.QueryExecutor != nil { + returnVal.TotalKeysScanned = newStat.Metrics.QueryExecutor.Scanned + returnVal.TotalObjectsScanned = newStat.Metrics.QueryExecutor.ScannedObjects + } + + if newStat.Metrics.Repl != nil { + if newStat.Metrics.Repl.Apply != nil { + returnVal.ReplApplyBatchesNum = newStat.Metrics.Repl.Apply.Batches.Num + returnVal.ReplApplyBatchesTotalMillis = newStat.Metrics.Repl.Apply.Batches.TotalMillis + returnVal.ReplApplyOps = newStat.Metrics.Repl.Apply.Ops + } + if newStat.Metrics.Repl.Buffer != nil { + returnVal.ReplBufferCount = newStat.Metrics.Repl.Buffer.Count + returnVal.ReplBufferSizeBytes = newStat.Metrics.Repl.Buffer.SizeBytes + } + if newStat.Metrics.Repl.Executor != nil { + returnVal.ReplExecutorPoolInProgressCount = newStat.Metrics.Repl.Executor.Pool["inProgressCount"] + returnVal.ReplExecutorQueuesNetworkInProgress = newStat.Metrics.Repl.Executor.Queues["networkInProgress"] + returnVal.ReplExecutorQueuesSleepers = newStat.Metrics.Repl.Executor.Queues["sleepers"] + returnVal.ReplExecutorUnsignaledEvents = newStat.Metrics.Repl.Executor.UnsignaledEvents + } + if newStat.Metrics.Repl.Network != nil { + returnVal.ReplNetworkBytes = newStat.Metrics.Repl.Network.Bytes + returnVal.ReplNetworkGetmoresNum = newStat.Metrics.Repl.Network.GetMores.Num + returnVal.ReplNetworkGetmoresTotalMillis = newStat.Metrics.Repl.Network.GetMores.TotalMillis + returnVal.ReplNetworkOps = newStat.Metrics.Repl.Network.Ops + } + } + + if newStat.Metrics.Storage != nil { + returnVal.StorageFreelistSearchBucketExhausted = newStat.Metrics.Storage.FreelistSearchBucketExhausted + returnVal.StorageFreelistSearchRequests = newStat.Metrics.Storage.FreelistSearchRequests + returnVal.StorageFreelistSearchScanned = newStat.Metrics.Storage.FreelistSearchScanned + } } if newStat.OpcountersRepl != nil && oldStat.OpcountersRepl != nil { - returnVal.InsertR = diff(newStat.OpcountersRepl.Insert, oldStat.OpcountersRepl.Insert, sampleSecs) - returnVal.QueryR = diff(newStat.OpcountersRepl.Query, oldStat.OpcountersRepl.Query, sampleSecs) - returnVal.UpdateR = diff(newStat.OpcountersRepl.Update, oldStat.OpcountersRepl.Update, sampleSecs) - returnVal.DeleteR = diff(newStat.OpcountersRepl.Delete, oldStat.OpcountersRepl.Delete, sampleSecs) - returnVal.GetMoreR = diff(newStat.OpcountersRepl.GetMore, oldStat.OpcountersRepl.GetMore, sampleSecs) - returnVal.CommandR = diff(newStat.OpcountersRepl.Command, oldStat.OpcountersRepl.Command, sampleSecs) + returnVal.InsertR, returnVal.InsertRCnt = diff(newStat.OpcountersRepl.Insert, oldStat.OpcountersRepl.Insert, sampleSecs) + returnVal.QueryR, returnVal.QueryRCnt = diff(newStat.OpcountersRepl.Query, oldStat.OpcountersRepl.Query, sampleSecs) + returnVal.UpdateR, returnVal.UpdateRCnt = diff(newStat.OpcountersRepl.Update, oldStat.OpcountersRepl.Update, sampleSecs) + returnVal.DeleteR, returnVal.DeleteRCnt = diff(newStat.OpcountersRepl.Delete, oldStat.OpcountersRepl.Delete, sampleSecs) + returnVal.GetMoreR, returnVal.GetMoreRCnt = diff(newStat.OpcountersRepl.GetMore, oldStat.OpcountersRepl.GetMore, sampleSecs) + returnVal.CommandR, returnVal.CommandRCnt = diff(newStat.OpcountersRepl.Command, oldStat.OpcountersRepl.Command, sampleSecs) } returnVal.CacheDirtyPercent = -1 returnVal.CacheUsedPercent = -1 - if newStat.WiredTiger != nil && oldStat.WiredTiger != nil { - returnVal.Flushes = newStat.WiredTiger.Transaction.TransCheckpoints - oldStat.WiredTiger.Transaction.TransCheckpoints + if newStat.WiredTiger != nil { returnVal.CacheDirtyPercent = float64(newStat.WiredTiger.Cache.TrackedDirtyBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured) returnVal.CacheUsedPercent = float64(newStat.WiredTiger.Cache.CurrentCachedBytes) / float64(newStat.WiredTiger.Cache.MaxBytesConfigured) @@ -656,10 +1079,21 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec returnVal.BytesReadInto = newStat.WiredTiger.Cache.BytesReadInto returnVal.PagesEvictedByAppThread = newStat.WiredTiger.Cache.PagesEvictedByAppThread returnVal.PagesQueuedForEviction = newStat.WiredTiger.Cache.PagesQueuedForEviction + returnVal.PagesReadIntoCache = newStat.WiredTiger.Cache.PagesReadIntoCache + returnVal.PagesRequestedFromCache = newStat.WiredTiger.Cache.PagesRequestedFromCache returnVal.ServerEvictingPages = newStat.WiredTiger.Cache.ServerEvictingPages returnVal.WorkerThreadEvictingPages = newStat.WiredTiger.Cache.WorkerThreadEvictingPages + + returnVal.InternalPagesEvicted = newStat.WiredTiger.Cache.InternalPagesEvicted + returnVal.ModifiedPagesEvicted = newStat.WiredTiger.Cache.ModifiedPagesEvicted + returnVal.UnmodifiedPagesEvicted = newStat.WiredTiger.Cache.UnmodifiedPagesEvicted + + returnVal.FlushesTotalTime = newStat.WiredTiger.Transaction.TransCheckpointsTotalTimeMsecs * int64(time.Millisecond) + } + if newStat.WiredTiger != nil && oldStat.WiredTiger != nil { + returnVal.Flushes, returnVal.FlushesCnt = diff(newStat.WiredTiger.Transaction.TransCheckpoints, oldStat.WiredTiger.Transaction.TransCheckpoints, sampleSecs) } else if newStat.BackgroundFlushing != nil && oldStat.BackgroundFlushing != nil { - returnVal.Flushes = newStat.BackgroundFlushing.Flushes - oldStat.BackgroundFlushing.Flushes + returnVal.Flushes, returnVal.FlushesCnt = diff(newStat.BackgroundFlushing.Flushes, oldStat.BackgroundFlushing.Flushes, sampleSecs) } returnVal.Time = newMongo.SampleTime @@ -702,7 +1136,7 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec if oldStat.ExtraInfo != nil && newStat.ExtraInfo != nil && oldStat.ExtraInfo.PageFaults != nil && newStat.ExtraInfo.PageFaults != nil { - returnVal.Faults = diff(*(newStat.ExtraInfo.PageFaults), *(oldStat.ExtraInfo.PageFaults), sampleSecs) + returnVal.Faults, returnVal.FaultsCnt = diff(*(newStat.ExtraInfo.PageFaults), *(oldStat.ExtraInfo.PageFaults), sampleSecs) } if !returnVal.IsMongos && oldStat.Locks != nil { globalCheck, hasGlobal := oldStat.Locks["Global"] @@ -794,6 +1228,10 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec if hasWT { returnVal.ActiveReaders = newStat.WiredTiger.Concurrent.Read.Out returnVal.ActiveWriters = newStat.WiredTiger.Concurrent.Write.Out + returnVal.AvailableReaders = newStat.WiredTiger.Concurrent.Read.Available + returnVal.AvailableWriters = newStat.WiredTiger.Concurrent.Write.Available + returnVal.TotalTicketsReaders = newStat.WiredTiger.Concurrent.Read.TotalTickets + returnVal.TotalTicketsWriters = newStat.WiredTiger.Concurrent.Write.TotalTickets } else if newStat.GlobalLock.ActiveClients != nil { returnVal.ActiveReaders = newStat.GlobalLock.ActiveClients.Readers returnVal.ActiveWriters = newStat.GlobalLock.ActiveClients.Writers @@ -801,92 +1239,127 @@ func NewStatLine(oldMongo, newMongo MongoStatus, key string, all bool, sampleSec } if oldStat.Network != nil && newStat.Network != nil { - returnVal.NetIn = diff(newStat.Network.BytesIn, oldStat.Network.BytesIn, sampleSecs) - returnVal.NetOut = diff(newStat.Network.BytesOut, oldStat.Network.BytesOut, sampleSecs) + returnVal.NetIn, returnVal.NetInCnt = diff(newStat.Network.BytesIn, oldStat.Network.BytesIn, sampleSecs) + returnVal.NetOut, returnVal.NetOutCnt = diff(newStat.Network.BytesOut, oldStat.Network.BytesOut, sampleSecs) } if newStat.Connections != nil { returnVal.NumConnections = newStat.Connections.Current } - newReplStat := *newMongo.ReplSetStatus + if newMongo.ReplSetStatus != nil { + newReplStat := *newMongo.ReplSetStatus - if newReplStat.Members != nil { - myName := newStat.Repl.Me - // Find the master and myself - master := ReplSetMember{} - me := ReplSetMember{} - for _, member := range newReplStat.Members { - if member.Name == myName { - // Store my state string - returnVal.NodeState = member.StateStr - if member.State == 1 { - // I'm the master - returnVal.ReplLag = 0 - break - } else { - // I'm secondary - me = member + if newReplStat.Members != nil { + myName := newStat.Repl.Me + // Find the master and myself + master := ReplSetMember{} + me := ReplSetMember{} + for _, member := range newReplStat.Members { + if member.Name == myName { + // Store my state string + returnVal.NodeState = member.StateStr + // Store my state integer + returnVal.NodeStateInt = member.State + + if member.State == 1 { + // I'm the master + returnVal.ReplLag = 0 + break + } else { + // I'm secondary + me = member + } + } else if member.State == 1 { + // Master found + master = member } - } else if member.State == 1 { - // Master found - master = member } - } - if me.State == 2 { - // OptimeDate.Unix() type is int64 - lag := master.OptimeDate.Unix() - me.OptimeDate.Unix() - if lag < 0 { - returnVal.ReplLag = 0 - } else { - returnVal.ReplLag = lag + if me.State == 2 { + // OptimeDate.Unix() type is int64 + lag := master.OptimeDate.Unix() - me.OptimeDate.Unix() + if lag < 0 { + returnVal.ReplLag = 0 + } else { + returnVal.ReplLag = lag + } } } } - newClusterStat := *newMongo.ClusterStatus - returnVal.JumboChunksCount = newClusterStat.JumboChunksCount - returnVal.OplogTimeDiff = newMongo.OplogStats.TimeDiff + if newMongo.ClusterStatus != nil { + newClusterStat := *newMongo.ClusterStatus + returnVal.JumboChunksCount = newClusterStat.JumboChunksCount + } - newDbStats := *newMongo.DbStats - for _, db := range newDbStats.Dbs { - dbStatsData := db.DbStatsData - // mongos doesn't have the db key, so setting the db name - if dbStatsData.Db == "" { - dbStatsData.Db = db.Name + if newMongo.OplogStats != nil { + returnVal.OplogStats = newMongo.OplogStats + } + + if newMongo.DbStats != nil { + newDbStats := *newMongo.DbStats + for _, db := range newDbStats.Dbs { + dbStatsData := db.DbStatsData + // mongos doesn't have the db key, so setting the db name + if dbStatsData.Db == "" { + dbStatsData.Db = db.Name + } + dbStatLine := &DbStatLine{ + Name: dbStatsData.Db, + Collections: dbStatsData.Collections, + Objects: dbStatsData.Objects, + AvgObjSize: dbStatsData.AvgObjSize, + DataSize: dbStatsData.DataSize, + StorageSize: dbStatsData.StorageSize, + NumExtents: dbStatsData.NumExtents, + Indexes: dbStatsData.Indexes, + IndexSize: dbStatsData.IndexSize, + Ok: dbStatsData.Ok, + } + returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine) } - dbStatLine := &DbStatLine{ - Name: dbStatsData.Db, - Collections: dbStatsData.Collections, - Objects: dbStatsData.Objects, - AvgObjSize: dbStatsData.AvgObjSize, - DataSize: dbStatsData.DataSize, - StorageSize: dbStatsData.StorageSize, - NumExtents: dbStatsData.NumExtents, - Indexes: dbStatsData.Indexes, - IndexSize: dbStatsData.IndexSize, - Ok: dbStatsData.Ok, + } + + if newMongo.ColStats != nil { + for _, col := range newMongo.ColStats.Collections { + colStatsData := col.ColStatsData + // mongos doesn't have the db key, so setting the db name + if colStatsData.Collection == "" { + colStatsData.Collection = col.Name + } + colStatLine := &ColStatLine{ + Name: colStatsData.Collection, + DbName: col.DbName, + Count: colStatsData.Count, + Size: colStatsData.Size, + AvgObjSize: colStatsData.AvgObjSize, + StorageSize: colStatsData.StorageSize, + TotalIndexSize: colStatsData.TotalIndexSize, + Ok: colStatsData.Ok, + } + returnVal.ColStatsLines = append(returnVal.ColStatsLines, *colStatLine) } - returnVal.DbStatsLines = append(returnVal.DbStatsLines, *dbStatLine) } // Set shard stats - newShardStats := *newMongo.ShardStats - returnVal.TotalInUse = newShardStats.TotalInUse - returnVal.TotalAvailable = newShardStats.TotalAvailable - returnVal.TotalCreated = newShardStats.TotalCreated - returnVal.TotalRefreshing = newShardStats.TotalRefreshing - returnVal.ShardHostStatsLines = map[string]ShardHostStatLine{} - for host, stats := range newShardStats.Hosts { - shardStatLine := &ShardHostStatLine{ - InUse: stats.InUse, - Available: stats.Available, - Created: stats.Created, - Refreshing: stats.Refreshing, - } + if newMongo.ShardStats != nil { + newShardStats := *newMongo.ShardStats + returnVal.TotalInUse = newShardStats.TotalInUse + returnVal.TotalAvailable = newShardStats.TotalAvailable + returnVal.TotalCreated = newShardStats.TotalCreated + returnVal.TotalRefreshing = newShardStats.TotalRefreshing + returnVal.ShardHostStatsLines = map[string]ShardHostStatLine{} + for host, stats := range newShardStats.Hosts { + shardStatLine := &ShardHostStatLine{ + InUse: stats.InUse, + Available: stats.Available, + Created: stats.Created, + Refreshing: stats.Refreshing, + } - returnVal.ShardHostStatsLines[host] = *shardStatLine + returnVal.ShardHostStatsLines[host] = *shardStatLine + } } return returnVal diff --git a/plugins/inputs/mongodb/mongostat_test.go b/plugins/inputs/mongodb/mongostat_test.go new file mode 100644 index 000000000..5506602a9 --- /dev/null +++ b/plugins/inputs/mongodb/mongostat_test.go @@ -0,0 +1,205 @@ +package mongodb + +import ( + "testing" + //"time" + + //"github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestLatencyStats(t *testing.T) { + + sl := NewStatLine( + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + }, + }, + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + OpLatencies: &OpLatenciesStats{ + Reads: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Writes: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Commands: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + }, + }, + }, + "foo", + true, + 60, + ) + + assert.Equal(t, sl.CommandLatency, int64(0)) + assert.Equal(t, sl.ReadLatency, int64(0)) + assert.Equal(t, sl.WriteLatency, int64(0)) + assert.Equal(t, sl.CommandOpsCnt, int64(0)) + assert.Equal(t, sl.ReadOpsCnt, int64(0)) + assert.Equal(t, sl.WriteOpsCnt, int64(0)) +} + +func TestLatencyStatsDiffZero(t *testing.T) { + + sl := NewStatLine( + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + OpLatencies: &OpLatenciesStats{ + Reads: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Writes: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Commands: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + }, + }, + }, + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + OpLatencies: &OpLatenciesStats{ + Reads: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Writes: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + Commands: &LatencyStats{ + Ops: 0, + Latency: 0, + }, + }, + }, + }, + "foo", + true, + 60, + ) + + assert.Equal(t, sl.CommandLatency, int64(0)) + assert.Equal(t, sl.ReadLatency, int64(0)) + assert.Equal(t, sl.WriteLatency, int64(0)) + assert.Equal(t, sl.CommandOpsCnt, int64(0)) + assert.Equal(t, sl.ReadOpsCnt, int64(0)) + assert.Equal(t, sl.WriteOpsCnt, int64(0)) +} + +func TestLatencyStatsDiff(t *testing.T) { + + sl := NewStatLine( + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + OpLatencies: &OpLatenciesStats{ + Reads: &LatencyStats{ + Ops: 4189041956, + Latency: 2255922322753, + }, + Writes: &LatencyStats{ + Ops: 1691019457, + Latency: 494478256915, + }, + Commands: &LatencyStats{ + Ops: 1019150402, + Latency: 59177710371, + }, + }, + }, + }, + MongoStatus{ + ServerStatus: &ServerStatus{ + Connections: &ConnectionStats{}, + Mem: &MemStats{ + Bits: 0, + Resident: 0, + Virtual: 0, + Supported: false, + Mapped: 0, + MappedWithJournal: 0, + }, + OpLatencies: &OpLatenciesStats{ + Reads: &LatencyStats{ + Ops: 4189049884, + Latency: 2255946760057, + }, + Writes: &LatencyStats{ + Ops: 1691021287, + Latency: 494479456987, + }, + Commands: &LatencyStats{ + Ops: 1019152861, + Latency: 59177981552, + }, + }, + }, + }, + "foo", + true, + 60, + ) + + assert.Equal(t, sl.CommandLatency, int64(59177981552)) + assert.Equal(t, sl.ReadLatency, int64(2255946760057)) + assert.Equal(t, sl.WriteLatency, int64(494479456987)) + assert.Equal(t, sl.CommandOpsCnt, int64(1019152861)) + assert.Equal(t, sl.ReadOpsCnt, int64(4189049884)) + assert.Equal(t, sl.WriteOpsCnt, int64(1691021287)) +} diff --git a/plugins/inputs/monit/README.md b/plugins/inputs/monit/README.md new file mode 100644 index 000000000..be116394d --- /dev/null +++ b/plugins/inputs/monit/README.md @@ -0,0 +1,235 @@ +# Monit Input Plugin + +The `monit` plugin gathers metrics and status information about local processes, +remote hosts, file, file systems, directories and network interfaces managed +and watched over by [Monit][monit]. + +The use this plugin you should first enable the [HTTPD TCP port][httpd] in +Monit. + +Minimum Version of Monit tested with is 5.16. + +[monit]: https://mmonit.com/ +[httpd]: https://mmonit.com/monit/documentation/monit.html#TCP-PORT + +### Configuration + +```toml +[[inputs.monit]] + ## Monit HTTPD address + address = "http://127.0.0.1:2812" + + ## Username and Password for Monit + # username = "" + # password = "" + + ## Amount of time allowed to complete the HTTP request + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Metrics + +- monit_filesystem + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - mode + - block_percent + - block_usage + - block_total + - inode_percent + - inode_usage + - inode_total + ++ monit_directory + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - permissions + +- monit_file + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - size + - permissions + ++ monit_process + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - cpu_percent + - cpu_percent_total + - mem_kb + - mem_kb_total + - mem_percent + - mem_percent_total + - pid + - parent_pid + - threads + - children + +- monit_remote_host + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - hostname + - port_number + - request + - protocol + - type + ++ monit_system + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - cpu_system + - cpu_user + - cpu_wait + - cpu_load_avg_1m + - cpu_load_avg_5m + - cpu_load_avg_15m + - mem_kb + - mem_percent + - swap_kb + - swap_percent + +- monit_fifo + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + - permissions + ++ monit_program + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + +- monit_network + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + ++ monit_program + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + +- monit_network + - tags: + - address + - version + - service + - platform_name + - status + - monitoring_status + - monitoring_mode + - fields: + - status_code + - monitoring_status_code + - monitoring_mode_code + +### Example Output +``` +monit_file,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog_pid,source=xyzzy.local,status=running,version=5.20.0 mode=644i,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,size=3i,status_code=0i 1579735047000000000 +monit_process,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=rsyslog,source=xyzzy.local,status=running,version=5.20.0 children=0i,cpu_percent=0,cpu_percent_total=0,mem_kb=3148i,mem_kb_total=3148i,mem_percent=0.2,mem_percent_total=0.2,monitoring_mode_code=0i,monitoring_status_code=1i,parent_pid=1i,pending_action_code=0i,pid=318i,status_code=0i,threads=4i 1579735047000000000 +monit_program,monitoring_mode=active,monitoring_status=initializing,pending_action=none,platform_name=Linux,service=echo,source=xyzzy.local,status=running,version=5.20.0 monitoring_mode_code=0i,monitoring_status_code=2i,pending_action_code=0i,program_started=0i,program_status=0i,status_code=0i 1579735047000000000 +monit_system,monitoring_mode=active,monitoring_status=monitored,pending_action=none,platform_name=Linux,service=debian-stretch-monit.virt,source=xyzzy.local,status=running,version=5.20.0 cpu_load_avg_15m=0,cpu_load_avg_1m=0,cpu_load_avg_5m=0,cpu_system=0,cpu_user=0,cpu_wait=0,mem_kb=42852i,mem_percent=2.1,monitoring_mode_code=0i,monitoring_status_code=1i,pending_action_code=0i,status_code=0i,swap_kb=0,swap_percent=0 1579735047000000000 +``` diff --git a/plugins/inputs/monit/monit.go b/plugins/inputs/monit/monit.go new file mode 100644 index 000000000..be17762a1 --- /dev/null +++ b/plugins/inputs/monit/monit.go @@ -0,0 +1,409 @@ +package monit + +import ( + "encoding/xml" + "fmt" + "net/http" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "golang.org/x/net/html/charset" +) + +const ( + fileSystem string = "0" + directory = "1" + file = "2" + process = "3" + remoteHost = "4" + system = "5" + fifo = "6" + program = "7" + network = "8" +) + +var pendingActions = []string{"ignore", "alert", "restart", "stop", "exec", "unmonitor", "start", "monitor"} + +type Status struct { + Server Server `xml:"server"` + Platform Platform `xml:"platform"` + Services []Service `xml:"service"` +} + +type Server struct { + ID string `xml:"id"` + Version string `xml:"version"` + Uptime int64 `xml:"uptime"` + Poll int `xml:"poll"` + LocalHostname string `xml:"localhostname"` + StartDelay int `xml:"startdelay"` + ControlFile string `xml:"controlfile"` +} + +type Platform struct { + Name string `xml:"name"` + Release string `xml:"release"` + Version string `xml:"version"` + Machine string `xml:"machine"` + CPU int `xml:"cpu"` + Memory int `xml:"memory"` + Swap int `xml:"swap"` +} + +type Service struct { + Type string `xml:"type,attr"` + Name string `xml:"name"` + Status int `xml:"status"` + MonitoringStatus int `xml:"monitor"` + MonitorMode int `xml:"monitormode"` + PendingAction int `xml:"pendingaction"` + Memory Memory `xml:"memory"` + CPU CPU `xml:"cpu"` + System System `xml:"system"` + Size int64 `xml:"size"` + Mode int `xml:"mode"` + Program Program `xml:"program"` + Block Block `xml:"block"` + Inode Inode `xml:"inode"` + Pid int64 `xml:"pid"` + ParentPid int64 `xml:"ppid"` + Threads int `xml:"threads"` + Children int `xml:"children"` + Port Port `xml:"port"` + Link Link `xml:"link"` +} + +type Link struct { + State int `xml:"state"` + Speed int64 `xml:"speed"` + Duplex int `xml:"duplex"` + Download Download `xml:"download"` + Upload Upload `xml:"upload"` +} + +type Download struct { + Packets struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"packets"` + Bytes struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"bytes"` + Errors struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"errors"` +} + +type Upload struct { + Packets struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"packets"` + Bytes struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"bytes"` + Errors struct { + Now int64 `xml:"now"` + Total int64 `xml:"total"` + } `xml:"errors"` +} + +type Port struct { + Hostname string `xml:"hostname"` + PortNumber int64 `xml:"portnumber"` + Request string `xml:"request"` + Protocol string `xml:"protocol"` + Type string `xml:"type"` +} + +type Block struct { + Percent float64 `xml:"percent"` + Usage float64 `xml:"usage"` + Total float64 `xml:"total"` +} + +type Inode struct { + Percent float64 `xml:"percent"` + Usage float64 `xml:"usage"` + Total float64 `xml:"total"` +} + +type Program struct { + Started int64 `xml:"started"` + Status int `xml:"status"` +} + +type Memory struct { + Percent float64 `xml:"percent"` + PercentTotal float64 `xml:"percenttotal"` + Kilobyte int64 `xml:"kilobyte"` + KilobyteTotal int64 `xml:"kilobytetotal"` +} + +type CPU struct { + Percent float64 `xml:"percent"` + PercentTotal float64 `xml:"percenttotal"` +} + +type System struct { + Load struct { + Avg01 float64 `xml:"avg01"` + Avg05 float64 `xml:"avg05"` + Avg15 float64 `xml:"avg15"` + } `xml:"load"` + CPU struct { + User float64 `xml:"user"` + System float64 `xml:"system"` + Wait float64 `xml:"wait"` + } `xml:"cpu"` + Memory struct { + Percent float64 `xml:"percent"` + Kilobyte int64 `xml:"kilobyte"` + } `xml:"memory"` + Swap struct { + Percent float64 `xml:"percent"` + Kilobyte float64 `xml:"kilobyte"` + } `xml:"swap"` +} + +type Monit struct { + Address string `toml:"address"` + Username string `toml:"username"` + Password string `toml:"password"` + client http.Client + tls.ClientConfig + Timeout internal.Duration `toml:"timeout"` +} + +type Messagebody struct { + Metrics []string `json:"metrics"` +} + +func (m *Monit) Description() string { + return "Read metrics and status information about processes managed by Monit" +} + +var sampleConfig = ` + ## Monit HTTPD address + address = "http://127.0.0.1:2812" + + ## Username and Password for Monit + # username = "" + # password = "" + + ## Amount of time allowed to complete the HTTP request + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +func (m *Monit) SampleConfig() string { + return sampleConfig +} + +func (m *Monit) Init() error { + tlsCfg, err := m.ClientConfig.TLSConfig() + if err != nil { + return err + } + + m.client = http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: m.Timeout.Duration, + } + return nil +} + +func (m *Monit) Gather(acc telegraf.Accumulator) error { + + req, err := http.NewRequest("GET", fmt.Sprintf("%s/_status?format=xml", m.Address), nil) + if err != nil { + return err + } + if len(m.Username) > 0 || len(m.Password) > 0 { + req.SetBasicAuth(m.Username, m.Password) + } + + resp, err := m.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == 200 { + + var status Status + decoder := xml.NewDecoder(resp.Body) + decoder.CharsetReader = charset.NewReaderLabel + if err := decoder.Decode(&status); err != nil { + return fmt.Errorf("error parsing input: %v", err) + } + + tags := map[string]string{ + "version": status.Server.Version, + "source": status.Server.LocalHostname, + "platform_name": status.Platform.Name, + } + + for _, service := range status.Services { + fields := make(map[string]interface{}) + tags["status"] = serviceStatus(service) + fields["status_code"] = service.Status + tags["pending_action"] = pendingAction(service) + fields["pending_action_code"] = service.PendingAction + tags["monitoring_status"] = monitoringStatus(service) + fields["monitoring_status_code"] = service.MonitoringStatus + tags["monitoring_mode"] = monitoringMode(service) + fields["monitoring_mode_code"] = service.MonitorMode + tags["service"] = service.Name + if service.Type == fileSystem { + fields["mode"] = service.Mode + fields["block_percent"] = service.Block.Percent + fields["block_usage"] = service.Block.Usage + fields["block_total"] = service.Block.Total + fields["inode_percent"] = service.Inode.Percent + fields["inode_usage"] = service.Inode.Usage + fields["inode_total"] = service.Inode.Total + acc.AddFields("monit_filesystem", fields, tags) + } else if service.Type == directory { + fields["mode"] = service.Mode + acc.AddFields("monit_directory", fields, tags) + } else if service.Type == file { + fields["size"] = service.Size + fields["mode"] = service.Mode + acc.AddFields("monit_file", fields, tags) + } else if service.Type == process { + fields["cpu_percent"] = service.CPU.Percent + fields["cpu_percent_total"] = service.CPU.PercentTotal + fields["mem_kb"] = service.Memory.Kilobyte + fields["mem_kb_total"] = service.Memory.KilobyteTotal + fields["mem_percent"] = service.Memory.Percent + fields["mem_percent_total"] = service.Memory.PercentTotal + fields["pid"] = service.Pid + fields["parent_pid"] = service.ParentPid + fields["threads"] = service.Threads + fields["children"] = service.Children + acc.AddFields("monit_process", fields, tags) + } else if service.Type == remoteHost { + fields["remote_hostname"] = service.Port.Hostname + fields["port_number"] = service.Port.PortNumber + fields["request"] = service.Port.Request + fields["protocol"] = service.Port.Protocol + fields["type"] = service.Port.Type + acc.AddFields("monit_remote_host", fields, tags) + } else if service.Type == system { + fields["cpu_system"] = service.System.CPU.System + fields["cpu_user"] = service.System.CPU.User + fields["cpu_wait"] = service.System.CPU.Wait + fields["cpu_load_avg_1m"] = service.System.Load.Avg01 + fields["cpu_load_avg_5m"] = service.System.Load.Avg05 + fields["cpu_load_avg_15m"] = service.System.Load.Avg15 + fields["mem_kb"] = service.System.Memory.Kilobyte + fields["mem_percent"] = service.System.Memory.Percent + fields["swap_kb"] = service.System.Swap.Kilobyte + fields["swap_percent"] = service.System.Swap.Percent + acc.AddFields("monit_system", fields, tags) + } else if service.Type == fifo { + fields["mode"] = service.Mode + acc.AddFields("monit_fifo", fields, tags) + } else if service.Type == program { + fields["program_started"] = service.Program.Started * 10000000 + fields["program_status"] = service.Program.Status + acc.AddFields("monit_program", fields, tags) + } else if service.Type == network { + fields["link_state"] = service.Link.State + fields["link_speed"] = service.Link.Speed + fields["link_mode"] = linkMode(service) + fields["download_packets_now"] = service.Link.Download.Packets.Now + fields["download_packets_total"] = service.Link.Download.Packets.Total + fields["download_bytes_now"] = service.Link.Download.Bytes.Now + fields["download_bytes_total"] = service.Link.Download.Bytes.Total + fields["download_errors_now"] = service.Link.Download.Errors.Now + fields["download_errors_total"] = service.Link.Download.Errors.Total + fields["upload_packets_now"] = service.Link.Upload.Packets.Now + fields["upload_packets_total"] = service.Link.Upload.Packets.Total + fields["upload_bytes_now"] = service.Link.Upload.Bytes.Now + fields["upload_bytes_total"] = service.Link.Upload.Bytes.Total + fields["upload_errors_now"] = service.Link.Upload.Errors.Now + fields["upload_errors_total"] = service.Link.Upload.Errors.Total + acc.AddFields("monit_network", fields, tags) + } + } + } else { + return fmt.Errorf("received status code %d (%s), expected 200", + resp.StatusCode, + http.StatusText(resp.StatusCode)) + + } + return nil +} + +func linkMode(s Service) string { + if s.Link.Duplex == 1 { + return "duplex" + } else if s.Link.Duplex == 0 { + return "simplex" + } else { + return "unknown" + } +} + +func serviceStatus(s Service) string { + if s.Status == 0 { + return "running" + } else { + return "failure" + } +} + +func pendingAction(s Service) string { + if s.PendingAction > 0 { + if s.PendingAction >= len(pendingActions) { + return "unknown" + } + return pendingActions[s.PendingAction-1] + } else { + return "none" + } +} + +func monitoringMode(s Service) string { + switch s.MonitorMode { + case 0: + return "active" + case 1: + return "passive" + } + return "unknown" +} + +func monitoringStatus(s Service) string { + switch s.MonitoringStatus { + case 1: + return "monitored" + case 2: + return "initializing" + case 4: + return "waiting" + } + return "not_monitored" +} + +func init() { + inputs.Add("monit", func() telegraf.Input { + return &Monit{} + }) +} diff --git a/plugins/inputs/monit/monit_test.go b/plugins/inputs/monit/monit_test.go new file mode 100644 index 000000000..1d95b45a5 --- /dev/null +++ b/plugins/inputs/monit/monit_test.go @@ -0,0 +1,704 @@ +package monit + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type transportMock struct { +} + +func (t *transportMock) RoundTrip(r *http.Request) (*http.Response, error) { + errorString := "Get http://127.0.0.1:2812/_status?format=xml: " + + "read tcp 192.168.10.2:55610->127.0.0.1:2812: " + + "read: connection reset by peer" + return nil, errors.New(errorString) +} + +func TestServiceType(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric + }{ + { + name: "check filesystem service type", + filename: "testdata/response_servicetype_0.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_filesystem", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "mode": 555, + "block_percent": 29.5, + "block_usage": 4424.0, + "block_total": 14990.0, + "inode_percent": 0.8, + "inode_usage": 59674.0, + "inode_total": 7680000.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check directory service type", + filename: "testdata/response_servicetype_1.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_directory", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "mode": 755, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check file service type", + filename: "testdata/response_servicetype_2.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_file", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "mode": 644, + "size": 1565, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check process service type", + filename: "testdata/response_servicetype_3.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_process", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "cpu_percent": 0.0, + "cpu_percent_total": 0.0, + "mem_kb": 22892, + "mem_kb_total": 22892, + "mem_percent": 0.1, + "mem_percent_total": 0.1, + "pid": 5959, + "parent_pid": 1, + "threads": 31, + "children": 0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check remote host service type", + filename: "testdata/response_servicetype_4.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_remote_host", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "remote_hostname": "192.168.1.10", + "port_number": 2812, + "request": "", + "protocol": "DEFAULT", + "type": "TCP", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check system service type", + filename: "testdata/response_servicetype_5.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_system", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "cpu_system": 0.1, + "cpu_user": 0.0, + "cpu_wait": 0.0, + "cpu_load_avg_1m": 0.00, + "cpu_load_avg_5m": 0.00, + "cpu_load_avg_15m": 0.00, + "mem_kb": 259668, + "mem_percent": 1.5, + "swap_kb": 0.0, + "swap_percent": 0.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check fifo service type", + filename: "testdata/response_servicetype_6.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_fifo", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "mode": 664, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check program service type", + filename: "testdata/response_servicetype_7.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_program", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "program_status": 0, + "program_started": int64(15728504980000000), + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check network service type", + filename: "testdata/response_servicetype_8.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_network", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "link_speed": 1000000000, + "link_mode": "duplex", + "link_state": 1, + "download_packets_now": 0, + "download_packets_total": 15243, + "download_bytes_now": 0, + "download_bytes_total": 5506778, + "download_errors_now": 0, + "download_errors_total": 0, + "upload_packets_now": 0, + "upload_packets_total": 8822, + "upload_bytes_now": 0, + "upload_bytes_total": 1287240, + "upload_errors_now": 0, + "upload_errors_total": 0, + }, + time.Unix(0, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_status": + http.ServeFile(w, r, tt.filename) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer ts.Close() + + plugin := &Monit{ + Address: ts.URL, + } + + plugin.Init() + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) + }) + } +} + +func TestMonitFailure(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric + }{ + { + name: "check monit failure status", + filename: "testdata/response_servicetype_8_failure.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_network", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "failure", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 8388608, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "link_speed": -1, + "link_mode": "unknown", + "link_state": 0, + "download_packets_now": 0, + "download_packets_total": 0, + "download_bytes_now": 0, + "download_bytes_total": 0, + "download_errors_now": 0, + "download_errors_total": 0, + "upload_packets_now": 0, + "upload_packets_total": 0, + "upload_bytes_now": 0, + "upload_bytes_total": 0, + "upload_errors_now": 0, + "upload_errors_total": 0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check passive mode", + filename: "testdata/response_servicetype_8_passivemode.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_network", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "passive", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 1, + "pending_action_code": 0, + "link_speed": 1000000000, + "link_mode": "duplex", + "link_state": 1, + "download_packets_now": 0, + "download_packets_total": 15243, + "download_bytes_now": 0, + "download_bytes_total": 5506778, + "download_errors_now": 0, + "download_errors_total": 0, + "upload_packets_now": 0, + "upload_packets_total": 8822, + "upload_bytes_now": 0, + "upload_bytes_total": 1287240, + "upload_errors_now": 0, + "upload_errors_total": 0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check initializing status", + filename: "testdata/response_servicetype_8_initializingmode.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_network", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "initializing", + "monitoring_mode": "active", + "pending_action": "none", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 2, + "monitoring_mode_code": 0, + "pending_action_code": 0, + "link_speed": 1000000000, + "link_mode": "duplex", + "link_state": 1, + "download_packets_now": 0, + "download_packets_total": 15243, + "download_bytes_now": 0, + "download_bytes_total": 5506778, + "download_errors_now": 0, + "download_errors_total": 0, + "upload_packets_now": 0, + "upload_packets_total": 8822, + "upload_bytes_now": 0, + "upload_bytes_total": 1287240, + "upload_errors_now": 0, + "upload_errors_total": 0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "check pending action", + filename: "testdata/response_servicetype_8_pendingaction.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "monit_network", + map[string]string{ + "version": "5.17.1", + "source": "localhost", + "platform_name": "Linux", + "service": "test", + "status": "running", + "monitoring_status": "monitored", + "monitoring_mode": "active", + "pending_action": "exec", + }, + map[string]interface{}{ + "status_code": 0, + "monitoring_status_code": 1, + "monitoring_mode_code": 0, + "pending_action_code": 5, + "link_speed": 1000000000, + "link_mode": "duplex", + "link_state": 1, + "download_packets_now": 0, + "download_packets_total": 15243, + "download_bytes_now": 0, + "download_bytes_total": 5506778, + "download_errors_now": 0, + "download_errors_total": 0, + "upload_packets_now": 0, + "upload_packets_total": 8822, + "upload_bytes_now": 0, + "upload_bytes_total": 1287240, + "upload_errors_now": 0, + "upload_errors_total": 0, + }, + time.Unix(0, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_status": + http.ServeFile(w, r, tt.filename) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer ts.Close() + + plugin := &Monit{ + Address: ts.URL, + } + + plugin.Init() + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) + }) + } +} + +func checkAuth(r *http.Request, username, password string) bool { + user, pass, ok := r.BasicAuth() + if !ok { + return false + } + return user == username && pass == password +} + +func TestAllowHosts(t *testing.T) { + + r := &Monit{ + Address: "http://127.0.0.1:2812", + Username: "test", + Password: "test", + } + + var acc testutil.Accumulator + + r.client.Transport = &transportMock{} + + err := r.Gather(&acc) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "read: connection reset by peer") + } +} + +func TestConnection(t *testing.T) { + + r := &Monit{ + Address: "http://127.0.0.1:2812", + Username: "test", + Password: "test", + } + + var acc testutil.Accumulator + + r.Init() + + err := r.Gather(&acc) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "connect: connection refused") + } +} + +func TestInvalidUsernameOrPassword(t *testing.T) { + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + if !checkAuth(r, "testing", "testing") { + http.Error(w, "Unauthorized.", 401) + return + } + + switch r.URL.Path { + case "/_status": + http.ServeFile(w, r, "testdata/response_servicetype_0.xml") + default: + panic("Cannot handle request") + } + })) + + defer ts.Close() + + r := &Monit{ + Address: ts.URL, + Username: "test", + Password: "test", + } + + var acc testutil.Accumulator + + r.Init() + + err := r.Gather(&acc) + + assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") +} + +func TestNoUsernameOrPasswordConfiguration(t *testing.T) { + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + if !checkAuth(r, "testing", "testing") { + http.Error(w, "Unauthorized.", 401) + return + } + + switch r.URL.Path { + case "/_status": + http.ServeFile(w, r, "testdata/response_servicetype_0.xml") + default: + panic("Cannot handle request") + } + })) + + defer ts.Close() + + r := &Monit{ + Address: ts.URL, + } + + var acc testutil.Accumulator + + r.Init() + + err := r.Gather(&acc) + + assert.EqualError(t, err, "received status code 401 (Unauthorized), expected 200") +} + +func TestInvalidXMLAndInvalidTypes(t *testing.T) { + + tests := []struct { + name string + filename string + }{ + { + name: "check filesystem service type", + filename: "testdata/response_invalidxml_1.xml", + }, + { + name: "check filesystem service type", + filename: "testdata/response_invalidxml_2.xml", + }, + { + name: "check filesystem service type", + filename: "testdata/response_invalidxml_3.xml", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/_status": + http.ServeFile(w, r, tt.filename) + default: + w.WriteHeader(http.StatusNotFound) + } + })) + defer ts.Close() + + plugin := &Monit{ + Address: ts.URL, + } + + plugin.Init() + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + + if assert.Error(t, err) { + assert.Contains(t, err.Error(), "error parsing input:") + } + }) + } +} diff --git a/plugins/inputs/monit/testdata/response_invalidxml_1.xml b/plugins/inputs/monit/testdata/response_invalidxml_1.xml new file mode 100644 index 000000000..8f1dcbaa0 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_invalidxml_1.xml @@ -0,0 +1,51 @@ + + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850498 + 709694 + 0 + 0 + 1 + 0 + 0 + 555 + 0 + 0 + 4096 + + 29.5 + 4424.0 + 14990.0 + + 0.8 + 59674 + 7680000 + + +
diff --git a/plugins/inputs/monit/testdata/response_invalidxml_2.xml b/plugins/inputs/monit/testdata/response_invalidxml_2.xml new file mode 100644 index 000000000..aab7bc87c --- /dev/null +++ b/plugins/inputs/monit/testdata/response_invalidxml_2.xml @@ -0,0 +1,52 @@ + + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850498 + 709694 + 0.0 + 0 + 1 + 0 + 0 + 555 + 0 + 0 + 4096 + + 29.5 + 4424.0 + 14990.0 + + + 0.8 + 59674 + 7680000 + + +
diff --git a/plugins/inputs/monit/testdata/response_invalidxml_3.xml b/plugins/inputs/monit/testdata/response_invalidxml_3.xml new file mode 100644 index 000000000..9fd7ed31d --- /dev/null +++ b/plugins/inputs/monit/testdata/response_invalidxml_3.xml @@ -0,0 +1,52 @@ + + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850498 + 709694 + 0 + 0 + 1 + 0 + 0 + 555 + 0 + 0 + 4096 + + 29.5 + 4424.0 + 14990.0 + + + 0.8 + 59674 + 7680000 + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_0.xml b/plugins/inputs/monit/testdata/response_servicetype_0.xml new file mode 100644 index 000000000..beaeb2003 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_0.xml @@ -0,0 +1,51 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850498 + 709694 + 0 + 0 + 1 + 0 + 0 + 555 + 0 + 0 + 4096 + + 29.5 + 4424.0 + 14990.0 + + + 0.8 + 59674 + 7680000 + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_1.xml b/plugins/inputs/monit/testdata/response_servicetype_1.xml new file mode 100644 index 000000000..86f02f142 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_1.xml @@ -0,0 +1,41 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850342 + 546082 + 0 + 0 + 1 + 0 + 0 + 755 + 0 + 0 + 1572272434 + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_2.xml b/plugins/inputs/monit/testdata/response_servicetype_2.xml new file mode 100644 index 000000000..709368007 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_2.xml @@ -0,0 +1,42 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1476628305 + 302669 + 0 + 0 + 1 + 0 + 0 + 644 + 1000 + 1000 + 1476518441 + 1565 + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_3.xml b/plugins/inputs/monit/testdata/response_servicetype_3.xml new file mode 100644 index 000000000..14a603dc3 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_3.xml @@ -0,0 +1,52 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1476628305 + 302552 + 0 + 0 + 1 + 0 + 0 + 5959 + 1 + 109870 + 0 + 31 + + 0.1 + 0.1 + 22892 + 22892 + + + 0.0 + 0.0 + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_4.xml b/plugins/inputs/monit/testdata/response_servicetype_4.xml new file mode 100644 index 000000000..d7064e2f7 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_4.xml @@ -0,0 +1,45 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572862451 + 947671 + 0 + 0 + 1 + 0 + 0 + + 192.168.1.10 + 2812 + + DEFAULT + TCP + 0.000145 + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_5.xml b/plugins/inputs/monit/testdata/response_servicetype_5.xml new file mode 100644 index 000000000..d0ee2cfca --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_5.xml @@ -0,0 +1,57 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1476628305 + 302682 + 0 + 0 + 1 + 0 + 0 + + + 0.00 + 0.00 + 0.00 + + + 0.0 + 0.1 + 0.0 + + + 1.5 + 259668 + + + 0.0 + 0 + + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_6.xml b/plugins/inputs/monit/testdata/response_servicetype_6.xml new file mode 100644 index 000000000..5acabe2da --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_6.xml @@ -0,0 +1,41 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572862451 + 947495 + 0 + 0 + 1 + 0 + 0 + 664 + 1000 + 1000 + 1572271731 + +
\ No newline at end of file diff --git a/plugins/inputs/monit/testdata/response_servicetype_7.xml b/plugins/inputs/monit/testdata/response_servicetype_7.xml new file mode 100644 index 000000000..fbda56c5c --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_7.xml @@ -0,0 +1,42 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572850498 + 710675 + 0 + 0 + 1 + 0 + 0 + + 1572850498 + 0 + Stats health check successful. + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8.xml b/plugins/inputs/monit/testdata/response_servicetype_8.xml new file mode 100644 index 000000000..12623a9d4 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_8.xml @@ -0,0 +1,70 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572869770 + 807562 + 0 + 0 + 1 + 0 + 0 + + 1 + 1000000000 + 1 + + + 0 + 15243 + + + 0 + 5506778 + + + 0 + 0 + + + + + 0 + 8822 + + + 0 + 1287240 + + + 0 + 0 + + + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_failure.xml b/plugins/inputs/monit/testdata/response_servicetype_8_failure.xml new file mode 100644 index 000000000..d68419d59 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_8_failure.xml @@ -0,0 +1,70 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572869770 + 807562 + 8388608 + 0 + 1 + 0 + 0 + + 0 + -1 + -1 + + + 0 + 0 + + + 0 + 0 + + + 0 + 0 + + + + + 0 + 0 + + + 0 + 0 + + + 0 + 0 + + + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_initializingmode.xml b/plugins/inputs/monit/testdata/response_servicetype_8_initializingmode.xml new file mode 100644 index 000000000..357f66f3b --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_8_initializingmode.xml @@ -0,0 +1,70 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572869770 + 807562 + 0 + 0 + 2 + 0 + 0 + + 1 + 1000000000 + 1 + + + 0 + 15243 + + + 0 + 5506778 + + + 0 + 0 + + + + + 0 + 8822 + + + 0 + 1287240 + + + 0 + 0 + + + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_passivemode.xml b/plugins/inputs/monit/testdata/response_servicetype_8_passivemode.xml new file mode 100644 index 000000000..a4d9595ae --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_8_passivemode.xml @@ -0,0 +1,70 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572869770 + 807562 + 0 + 0 + 1 + 1 + 0 + + 1 + 1000000000 + 1 + + + 0 + 15243 + + + 0 + 5506778 + + + 0 + 0 + + + + + 0 + 8822 + + + 0 + 1287240 + + + 0 + 0 + + + + +
diff --git a/plugins/inputs/monit/testdata/response_servicetype_8_pendingaction.xml b/plugins/inputs/monit/testdata/response_servicetype_8_pendingaction.xml new file mode 100644 index 000000000..df19a6428 --- /dev/null +++ b/plugins/inputs/monit/testdata/response_servicetype_8_pendingaction.xml @@ -0,0 +1,70 @@ + + + + 0ed39c522be4c3971541412c43141613 + 1476518435 + 5.17.1 + 109878 + 10 + 0 + localhost + /var/vcap/bosh/etc/monitrc + +
127.0.0.1
+ 2822 + 0 +
+
+ + Linux + 4.15.0-65-generic + #74~16.04.1-Ubuntu SMP Wed Sep 18 09:51:44 UTC 2019 + x86_64 + 8 + 16432272 + 16432268 + + + test + 1572869770 + 807562 + 0 + 0 + 1 + 0 + 5 + + 1 + 1000000000 + 1 + + + 0 + 15243 + + + 0 + 5506778 + + + 0 + 0 + + + + + 0 + 8822 + + + 0 + 1287240 + + + 0 + 0 + + + + +
diff --git a/plugins/inputs/mqtt_consumer/README.md b/plugins/inputs/mqtt_consumer/README.md index df7869a86..a9e8236ee 100644 --- a/plugins/inputs/mqtt_consumer/README.md +++ b/plugins/inputs/mqtt_consumer/README.md @@ -1,38 +1,63 @@ # MQTT Consumer Input Plugin -The [MQTT](http://mqtt.org/) consumer plugin reads from -specified MQTT topics and adds messages to InfluxDB. -The plugin expects messages in the -[Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). +The [MQTT][mqtt] consumer plugin reads from the specified MQTT topics +and creates metrics using one of the supported [input data formats][]. -### Configuration: +### Configuration ```toml -# Read metrics from MQTT topic(s) [[inputs.mqtt_consumer]] - ## MQTT broker URLs to be used. The format should be scheme://host:port, - ## schema can be tcp, ssl, or ws. - servers = ["tcp://localhost:1883"] - ## MQTT QoS, must be 0, 1, or 2 - qos = 0 - ## Connection timeout for initial connection in seconds - connection_timeout = "30s" + ## Broker URLs for the MQTT server or cluster. To connect to multiple + ## clusters or standalone servers, use a seperate plugin instance. + ## example: servers = ["tcp://localhost:1883"] + ## servers = ["ssl://localhost:1883"] + ## servers = ["ws://localhost:1883"] + servers = ["tcp://127.0.0.1:1883"] - ## Topics to subscribe to + ## Topics that will be subscribed to. topics = [ "telegraf/host01/cpu", "telegraf/+/mem", "sensors/#", ] - # if true, messages that can't be delivered while the subscriber is offline - # will be delivered when it comes back (such as on service restart). - # NOTE: if true, client_id MUST be set - persistent_session = false - # If empty, a random client ID will be generated. - client_id = "" + ## The message topic will be stored in a tag specified by this value. If set + ## to the empty string no topic tag will be created. + # topic_tag = "topic" - ## username and password to connect MQTT server. + ## QoS policy for messages + ## 0 = at most once + ## 1 = at least once + ## 2 = exactly once + ## + ## When using a QoS of 1 or 2, you should enable persistent_session to allow + ## resuming unacknowledged messages. + # qos = 0 + + ## Connection timeout for initial connection in seconds + # connection_timeout = "30s" + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Persistent session disables clearing of the client session on connection. + ## In order for this option to work you must also set client_id to identify + ## the client. To receive messages that arrived while the client is offline, + ## also set the qos option to 1 or 2 and don't forget to also set the QoS when + ## publishing. + # persistent_session = false + + ## If unset, a random client ID will be generated. + # client_id = "" + + ## Username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" @@ -50,7 +75,10 @@ The plugin expects messages in the data_format = "influx" ``` -### Tags: +### Metrics - All measurements are tagged with the incoming topic, ie `topic=telegraf/host01/cpu` + +[mqtt]: https://mqtt.org +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer.go b/plugins/inputs/mqtt_consumer/mqtt_consumer.go index 58074af79..9ceeb1389 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer.go @@ -1,78 +1,132 @@ package mqtt_consumer import ( + "context" + "errors" "fmt" - "log" "strings" - "sync" "time" + "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - - "github.com/eclipse/paho.mqtt.golang" ) -// 30 Seconds is the default used by paho.mqtt.golang -var defaultConnectionTimeout = internal.Duration{Duration: 30 * time.Second} +var ( + // 30 Seconds is the default used by paho.mqtt.golang + defaultConnectionTimeout = internal.Duration{Duration: 30 * time.Second} + + defaultMaxUndeliveredMessages = 1000 +) + +type ConnectionState int +type empty struct{} +type semaphore chan empty + +const ( + Disconnected ConnectionState = iota + Connecting + Connected +) + +type Client interface { + Connect() mqtt.Token + SubscribeMultiple(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token + AddRoute(topic string, callback mqtt.MessageHandler) + Disconnect(quiesce uint) +} + +type ClientFactory func(o *mqtt.ClientOptions) Client type MQTTConsumer struct { - Servers []string - Topics []string - Username string - Password string - QoS int `toml:"qos"` - ConnectionTimeout internal.Duration `toml:"connection_timeout"` + Servers []string `toml:"servers"` + Topics []string `toml:"topics"` + TopicTag *string `toml:"topic_tag"` + Username string `toml:"username"` + Password string `toml:"password"` + QoS int `toml:"qos"` + ConnectionTimeout internal.Duration `toml:"connection_timeout"` + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` parser parsers.Parser - // Legacy metric buffer support + // Legacy metric buffer support; deprecated in v0.10.3 MetricBuffer int PersistentSession bool ClientID string `toml:"client_id"` tls.ClientConfig - sync.Mutex - client mqtt.Client - // channel of all incoming raw mqtt messages - in chan mqtt.Message - done chan struct{} + Log telegraf.Logger - // keep the accumulator internally: - acc telegraf.Accumulator + clientFactory ClientFactory + client Client + opts *mqtt.ClientOptions + acc telegraf.TrackingAccumulator + state ConnectionState + sem semaphore + messages map[telegraf.TrackingID]bool + topicTag string - connected bool + ctx context.Context + cancel context.CancelFunc } var sampleConfig = ` - ## MQTT broker URLs to be used. The format should be scheme://host:port, - ## schema can be tcp, ssl, or ws. - servers = ["tcp://localhost:1883"] + ## Broker URLs for the MQTT server or cluster. To connect to multiple + ## clusters or standalone servers, use a seperate plugin instance. + ## example: servers = ["tcp://localhost:1883"] + ## servers = ["ssl://localhost:1883"] + ## servers = ["ws://localhost:1883"] + servers = ["tcp://127.0.0.1:1883"] - ## MQTT QoS, must be 0, 1, or 2 - qos = 0 - ## Connection timeout for initial connection in seconds - connection_timeout = "30s" - - ## Topics to subscribe to + ## Topics that will be subscribed to. topics = [ "telegraf/host01/cpu", "telegraf/+/mem", "sensors/#", ] - # if true, messages that can't be delivered while the subscriber is offline - # will be delivered when it comes back (such as on service restart). - # NOTE: if true, client_id MUST be set - persistent_session = false - # If empty, a random client ID will be generated. - client_id = "" + ## The message topic will be stored in a tag specified by this value. If set + ## to the empty string no topic tag will be created. + # topic_tag = "topic" - ## username and password to connect MQTT server. + ## QoS policy for messages + ## 0 = at most once + ## 1 = at least once + ## 2 = exactly once + ## + ## When using a QoS of 1 or 2, you should enable persistent_session to allow + ## resuming unacknowledged messages. + # qos = 0 + + ## Connection timeout for initial connection in seconds + # connection_timeout = "30s" + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Persistent session disables clearing of the client session on connection. + ## In order for this option to work you must also set client_id to identify + ## the client. To receive messages that arrived while the client is offline, + ## also set the qos option to 1 or 2 and don't forget to also set the QoS when + ## publishing. + # persistent_session = false + + ## If unset, a random client ID will be generated. + # client_id = "" + + ## Username and password to connect MQTT server. # username = "telegraf" # password = "metricsmetricsmetricsmetrics" @@ -102,23 +156,24 @@ func (m *MQTTConsumer) SetParser(parser parsers.Parser) { m.parser = parser } -func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { - m.Lock() - defer m.Unlock() - m.connected = false +func (m *MQTTConsumer) Init() error { + m.state = Disconnected if m.PersistentSession && m.ClientID == "" { - return fmt.Errorf("ERROR MQTT Consumer: When using persistent_session" + - " = true, you MUST also set client_id") + return errors.New("persistent_session requires client_id") } - m.acc = acc if m.QoS > 2 || m.QoS < 0 { - return fmt.Errorf("MQTT Consumer, invalid QoS value: %d", m.QoS) + return fmt.Errorf("qos value must be 0, 1, or 2: %d", m.QoS) } if m.ConnectionTimeout.Duration < 1*time.Second { - return fmt.Errorf("MQTT Consumer, invalid connection_timeout value: %s", m.ConnectionTimeout.Duration) + return fmt.Errorf("connection_timeout must be greater than 1s: %s", m.ConnectionTimeout.Duration) + } + + m.topicTag = "topic" + if m.TopicTag != nil { + m.topicTag = *m.TopicTag } opts, err := m.createOpts() @@ -126,92 +181,133 @@ func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { return err } - m.client = mqtt.NewClient(opts) - m.in = make(chan mqtt.Message, 1000) - m.done = make(chan struct{}) + m.opts = opts + return nil +} + +func (m *MQTTConsumer) Start(acc telegraf.Accumulator) error { + m.state = Disconnected + + m.acc = acc.WithTracking(m.MaxUndeliveredMessages) + m.sem = make(semaphore, m.MaxUndeliveredMessages) + m.ctx, m.cancel = context.WithCancel(context.Background()) + + m.client = m.clientFactory(m.opts) + + // AddRoute sets up the function for handling messages. These need to be + // added in case we find a persistent session containing subscriptions so we + // know where to dispatch persisted and new messages to. In the alternate + // case that we need to create the subscriptions these will be replaced. + for _, topic := range m.Topics { + m.client.AddRoute(topic, m.recvMessage) + } + + m.state = Connecting m.connect() return nil } func (m *MQTTConsumer) connect() error { - if token := m.client.Connect(); token.Wait() && token.Error() != nil { + token := m.client.Connect() + if token.Wait() && token.Error() != nil { err := token.Error() - log.Printf("D! MQTT Consumer, connection error - %v", err) - + m.state = Disconnected return err } - go m.receiver() + m.Log.Infof("Connected %v", m.Servers) + m.state = Connected + m.messages = make(map[telegraf.TrackingID]bool) + + // Persistent sessions should skip subscription if a session is present, as + // the subscriptions are stored by the server. + type sessionPresent interface { + SessionPresent() bool + } + if t, ok := token.(sessionPresent); ok && t.SessionPresent() { + m.Log.Debugf("Session found %v", m.Servers) + return nil + } + + topics := make(map[string]byte) + for _, topic := range m.Topics { + topics[topic] = byte(m.QoS) + } + + subscribeToken := m.client.SubscribeMultiple(topics, m.recvMessage) + subscribeToken.Wait() + if subscribeToken.Error() != nil { + m.acc.AddError(fmt.Errorf("subscription error: topics: %s: %v", + strings.Join(m.Topics[:], ","), subscribeToken.Error())) + } return nil } -func (m *MQTTConsumer) onConnect(c mqtt.Client) { - log.Printf("I! MQTT Client Connected") - if !m.PersistentSession || !m.connected { - topics := make(map[string]byte) - for _, topic := range m.Topics { - topics[topic] = byte(m.QoS) - } - subscribeToken := c.SubscribeMultiple(topics, m.recvMessage) - subscribeToken.Wait() - if subscribeToken.Error() != nil { - m.acc.AddError(fmt.Errorf("E! MQTT Subscribe Error\ntopics: %s\nerror: %s", - strings.Join(m.Topics[:], ","), subscribeToken.Error())) - } - m.connected = true - } - return -} - func (m *MQTTConsumer) onConnectionLost(c mqtt.Client, err error) { - m.acc.AddError(fmt.Errorf("E! MQTT Connection lost\nerror: %s\nMQTT Client will try to reconnect", err.Error())) + m.acc.AddError(fmt.Errorf("connection lost: %v", err)) + m.Log.Debugf("Disconnected %v", m.Servers) + m.state = Disconnected return } -// receiver() reads all incoming messages from the consumer, and parses them into -// influxdb metric points. -func (m *MQTTConsumer) receiver() { +func (m *MQTTConsumer) recvMessage(c mqtt.Client, msg mqtt.Message) { for { select { - case <-m.done: - return - case msg := <-m.in: - topic := msg.Topic() - metrics, err := m.parser.Parse(msg.Payload()) + case track := <-m.acc.Delivered(): + <-m.sem + _, ok := m.messages[track.ID()] + if !ok { + // Added by a previous connection + continue + } + // No ack, MQTT does not support durable handling + delete(m.messages, track.ID()) + case m.sem <- empty{}: + err := m.onMessage(m.acc, msg) if err != nil { - m.acc.AddError(fmt.Errorf("E! MQTT Parse Error\nmessage: %s\nerror: %s", - string(msg.Payload()), err.Error())) - } - - for _, metric := range metrics { - tags := metric.Tags() - tags["topic"] = topic - m.acc.AddFields(metric.Name(), metric.Fields(), tags, metric.Time()) + m.acc.AddError(err) + <-m.sem } + return } } } -func (m *MQTTConsumer) recvMessage(_ mqtt.Client, msg mqtt.Message) { - m.in <- msg +func (m *MQTTConsumer) onMessage(acc telegraf.TrackingAccumulator, msg mqtt.Message) error { + metrics, err := m.parser.Parse(msg.Payload()) + if err != nil { + return err + } + + if m.topicTag != "" { + topic := msg.Topic() + for _, metric := range metrics { + metric.AddTag(m.topicTag, topic) + } + } + + id := acc.AddTrackingMetricGroup(metrics) + m.messages[id] = true + return nil } func (m *MQTTConsumer) Stop() { - m.Lock() - defer m.Unlock() - - if m.connected { - close(m.done) + if m.state == Connected { + m.Log.Debugf("Disconnecting %v", m.Servers) m.client.Disconnect(200) - m.connected = false + m.Log.Debugf("Disconnected %v", m.Servers) + m.state = Disconnected } + m.cancel() } func (m *MQTTConsumer) Gather(acc telegraf.Accumulator) error { - if !m.connected { + if m.state == Disconnected { + m.state = Connecting + m.Log.Debugf("Connecting %v", m.Servers) m.connect() } @@ -248,13 +344,13 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { } if len(m.Servers) == 0 { - return opts, fmt.Errorf("could not get host infomations") + return opts, fmt.Errorf("could not get host informations") } for _, server := range m.Servers { // Preserve support for host:port style servers; deprecated in Telegraf 1.4.4 if !strings.Contains(server, "://") { - log.Printf("W! mqtt_consumer server %q should be updated to use `scheme://host:port` format", server) + m.Log.Warnf("Server %q should be updated to use `scheme://host:port` format", server) if tlsCfg == nil { server = "tcp://" + server } else { @@ -264,19 +360,28 @@ func (m *MQTTConsumer) createOpts() (*mqtt.ClientOptions, error) { opts.AddBroker(server) } - opts.SetAutoReconnect(true) + opts.SetAutoReconnect(false) opts.SetKeepAlive(time.Second * 60) opts.SetCleanSession(!m.PersistentSession) - opts.SetOnConnectHandler(m.onConnect) opts.SetConnectionLostHandler(m.onConnectionLost) return opts, nil } +func New(factory ClientFactory) *MQTTConsumer { + return &MQTTConsumer{ + Servers: []string{"tcp://127.0.0.1:1883"}, + ConnectionTimeout: defaultConnectionTimeout, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + clientFactory: factory, + state: Disconnected, + } +} + func init() { inputs.Add("mqtt_consumer", func() telegraf.Input { - return &MQTTConsumer{ - ConnectionTimeout: defaultConnectionTimeout, - } + return New(func(o *mqtt.ClientOptions) Client { + return mqtt.NewClient(o) + }) }) } diff --git a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go index eb5e3048c..4884fc050 100644 --- a/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go +++ b/plugins/inputs/mqtt_consumer/mqtt_consumer_test.go @@ -2,229 +2,377 @@ package mqtt_consumer import ( "testing" - - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" + "time" "github.com/eclipse/paho.mqtt.golang" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) -const ( - testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" - testMsgNeg = "cpu_load_short,host=server01 value=-23422.0 1422568543702900257\n" - testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" - testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" - invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n" -) +type FakeClient struct { + ConnectF func() mqtt.Token + SubscribeMultipleF func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token + AddRouteF func(topic string, callback mqtt.MessageHandler) + DisconnectF func(quiesce uint) -func newTestMQTTConsumer() (*MQTTConsumer, chan mqtt.Message) { - in := make(chan mqtt.Message, 100) - n := &MQTTConsumer{ - Topics: []string{"telegraf"}, - Servers: []string{"localhost:1883"}, - in: in, - done: make(chan struct{}), - connected: true, - } + connectCallCount int + subscribeCallCount int + addRouteCallCount int + disconnectCallCount int +} - return n, in +func (c *FakeClient) Connect() mqtt.Token { + c.connectCallCount++ + return c.ConnectF() +} + +func (c *FakeClient) SubscribeMultiple(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + c.subscribeCallCount++ + return c.SubscribeMultipleF(filters, callback) +} + +func (c *FakeClient) AddRoute(topic string, callback mqtt.MessageHandler) { + c.addRouteCallCount++ + c.AddRouteF(topic, callback) +} + +func (c *FakeClient) Disconnect(quiesce uint) { + c.disconnectCallCount++ + c.DisconnectF(quiesce) +} + +type FakeParser struct { +} + +// FakeParser satisfies parsers.Parser +var _ parsers.Parser = &FakeParser{} + +func (p *FakeParser) Parse(buf []byte) ([]telegraf.Metric, error) { + panic("not implemented") +} + +func (p *FakeParser) ParseLine(line string) (telegraf.Metric, error) { + panic("not implemented") +} + +func (p *FakeParser) SetDefaultTags(tags map[string]string) { + panic("not implemented") +} + +type FakeToken struct { + sessionPresent bool +} + +// FakeToken satisfies mqtt.Token +var _ mqtt.Token = &FakeToken{} + +func (t *FakeToken) Wait() bool { + return true +} + +func (t *FakeToken) WaitTimeout(time.Duration) bool { + return true +} + +func (t *FakeToken) Error() error { + return nil +} + +func (t *FakeToken) SessionPresent() bool { + return t.sessionPresent +} + +// Test the basic lifecycle transitions of the plugin. +func TestLifecycleSanity(t *testing.T) { + var acc testutil.Accumulator + + plugin := New(func(o *mqtt.ClientOptions) Client { + return &FakeClient{ + ConnectF: func() mqtt.Token { + return &FakeToken{} + }, + AddRouteF: func(topic string, callback mqtt.MessageHandler) { + }, + SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + return &FakeToken{} + }, + DisconnectF: func(quiesce uint) { + }, + } + }) + plugin.Log = testutil.Logger{} + plugin.Servers = []string{"tcp://127.0.0.1"} + + parser := &FakeParser{} + plugin.SetParser(parser) + + err := plugin.Init() + require.NoError(t, err) + + err = plugin.Start(&acc) + require.NoError(t, err) + + err = plugin.Gather(&acc) + require.NoError(t, err) + + plugin.Stop() } // Test that default client has random ID func TestRandomClientID(t *testing.T) { - m1 := &MQTTConsumer{ - Servers: []string{"localhost:1883"}} - opts, err := m1.createOpts() - assert.NoError(t, err) + var err error - m2 := &MQTTConsumer{ - Servers: []string{"localhost:1883"}} - opts2, err2 := m2.createOpts() - assert.NoError(t, err2) + m1 := New(nil) + m1.Log = testutil.Logger{} + err = m1.Init() + require.NoError(t, err) - assert.NotEqual(t, opts.ClientID, opts2.ClientID) + m2 := New(nil) + m2.Log = testutil.Logger{} + err = m2.Init() + require.NoError(t, err) + + require.NotEqual(t, m1.opts.ClientID, m2.opts.ClientID) } -// Test that default client has random ID -func TestClientID(t *testing.T) { - m1 := &MQTTConsumer{ - Servers: []string{"localhost:1883"}, - ClientID: "telegraf-test", - } - opts, err := m1.createOpts() - assert.NoError(t, err) - - m2 := &MQTTConsumer{ - Servers: []string{"localhost:1883"}, - ClientID: "telegraf-test", - } - opts2, err2 := m2.createOpts() - assert.NoError(t, err2) - - assert.Equal(t, "telegraf-test", opts2.ClientID) - assert.Equal(t, "telegraf-test", opts.ClientID) -} - -// Test that Start() fails if client ID is not set but persistent is +// PersistentSession requires ClientID func TestPersistentClientIDFail(t *testing.T) { - m1 := &MQTTConsumer{ - Servers: []string{"localhost:1883"}, - PersistentSession: true, + plugin := New(nil) + plugin.Log = testutil.Logger{} + plugin.PersistentSession = true + + err := plugin.Init() + require.Error(t, err) +} + +type Message struct { +} + +func (m *Message) Duplicate() bool { + panic("not implemented") +} + +func (m *Message) Qos() byte { + panic("not implemented") +} + +func (m *Message) Retained() bool { + panic("not implemented") +} + +func (m *Message) Topic() string { + return "telegraf" +} + +func (m *Message) MessageID() uint16 { + panic("not implemented") +} + +func (m *Message) Payload() []byte { + return []byte("cpu time_idle=42i") +} + +func (m *Message) Ack() { + panic("not implemented") +} + +func TestTopicTag(t *testing.T) { + tests := []struct { + name string + topicTag func() *string + expected []telegraf.Metric + }{ + { + name: "default topic when topic tag is unset for backwards compatibility", + topicTag: func() *string { + return nil + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "topic": "telegraf", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "use topic tag when set", + topicTag: func() *string { + tag := "topic_tag" + return &tag + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "topic_tag": "telegraf", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "no topic tag is added when topic tag is set to the empty string", + topicTag: func() *string { + tag := "" + return &tag + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + }, + }, } - acc := testutil.Accumulator{} - err := m1.Start(&acc) - assert.Error(t, err) -} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var handler mqtt.MessageHandler + client := &FakeClient{ + ConnectF: func() mqtt.Token { + return &FakeToken{} + }, + AddRouteF: func(topic string, callback mqtt.MessageHandler) { + handler = callback + }, + SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + return &FakeToken{} + }, + DisconnectF: func(quiesce uint) { + }, + } -func TestRunParser(t *testing.T) { - n, in := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) + plugin := New(func(o *mqtt.ClientOptions) Client { + return client + }) + plugin.Log = testutil.Logger{} + plugin.Topics = []string{"telegraf"} + plugin.TopicTag = tt.topicTag() - n.parser, _ = parsers.NewInfluxParser() - go n.receiver() - in <- mqttMsg(testMsgNeg) - acc.Wait(1) + parser, err := parsers.NewInfluxParser() + require.NoError(t, err) + plugin.SetParser(parser) - if a := acc.NFields(); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } -} + err = plugin.Init() + require.NoError(t, err) -func TestRunParserNegativeNumber(t *testing.T) { - n, in := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) - n.parser, _ = parsers.NewInfluxParser() - go n.receiver() - in <- mqttMsg(testMsg) - acc.Wait(1) + handler(nil, &Message{}) - if a := acc.NFields(); a != 1 { - t.Errorf("got %v, expected %v", a, 1) - } -} + plugin.Stop() -// Test that the parser ignores invalid messages -func TestRunParserInvalidMsg(t *testing.T) { - n, in := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - go n.receiver() - in <- mqttMsg(invalidMsg) - acc.WaitError(1) - - if a := acc.NFields(); a != 0 { - t.Errorf("got %v, expected %v", a, 0) - } - assert.Contains(t, acc.Errors[0].Error(), "MQTT Parse Error") -} - -// Test that the parser parses line format messages into metrics -func TestRunParserAndGather(t *testing.T) { - n, in := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - go n.receiver() - in <- mqttMsg(testMsg) - acc.Wait(1) - - n.Gather(&acc) - - acc.AssertContainsFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses graphite format messages into metrics -func TestRunParserAndGatherGraphite(t *testing.T) { - n, in := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) - go n.receiver() - in <- mqttMsg(testMsgGraphite) - - n.Gather(&acc) - acc.Wait(1) - - acc.AssertContainsFields(t, "cpu_load_short_graphite", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses json format messages into metrics -func TestRunParserAndGatherJSON(t *testing.T) { - n, in := newTestMQTTConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil) - go n.receiver() - in <- mqttMsg(testMsgJSON) - - n.Gather(&acc) - - acc.Wait(1) - - acc.AssertContainsFields(t, "nats_json_test", - map[string]interface{}{ - "a": float64(5), - "b_c": float64(6), + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) }) -} - -func mqttMsg(val string) mqtt.Message { - return &message{ - topic: "telegraf/unit_test", - payload: []byte(val), } } -// Take the message struct from the paho mqtt client library for returning -// a test message interface. -type message struct { - duplicate bool - qos byte - retained bool - topic string - messageID uint16 - payload []byte +func TestAddRouteCalledForEachTopic(t *testing.T) { + client := &FakeClient{ + ConnectF: func() mqtt.Token { + return &FakeToken{} + }, + AddRouteF: func(topic string, callback mqtt.MessageHandler) { + }, + SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + return &FakeToken{} + }, + DisconnectF: func(quiesce uint) { + }, + } + plugin := New(func(o *mqtt.ClientOptions) Client { + return client + }) + plugin.Log = testutil.Logger{} + plugin.Topics = []string{"a", "b"} + + err := plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + plugin.Stop() + + require.Equal(t, client.addRouteCallCount, 2) } -func (m *message) Duplicate() bool { - return m.duplicate +func TestSubscribeCalledIfNoSession(t *testing.T) { + client := &FakeClient{ + ConnectF: func() mqtt.Token { + return &FakeToken{} + }, + AddRouteF: func(topic string, callback mqtt.MessageHandler) { + }, + SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + return &FakeToken{} + }, + DisconnectF: func(quiesce uint) { + }, + } + plugin := New(func(o *mqtt.ClientOptions) Client { + return client + }) + plugin.Log = testutil.Logger{} + plugin.Topics = []string{"b"} + + err := plugin.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) + + plugin.Stop() + + require.Equal(t, client.subscribeCallCount, 1) } -func (m *message) Qos() byte { - return m.qos -} +func TestSubscribeNotCalledIfSession(t *testing.T) { + client := &FakeClient{ + ConnectF: func() mqtt.Token { + return &FakeToken{sessionPresent: true} + }, + AddRouteF: func(topic string, callback mqtt.MessageHandler) { + }, + SubscribeMultipleF: func(filters map[string]byte, callback mqtt.MessageHandler) mqtt.Token { + return &FakeToken{} + }, + DisconnectF: func(quiesce uint) { + }, + } + plugin := New(func(o *mqtt.ClientOptions) Client { + return client + }) + plugin.Log = testutil.Logger{} + plugin.Topics = []string{"b"} -func (m *message) Retained() bool { - return m.retained -} + err := plugin.Init() + require.NoError(t, err) -func (m *message) Topic() string { - return m.topic -} + var acc testutil.Accumulator + err = plugin.Start(&acc) + require.NoError(t, err) -func (m *message) MessageID() uint16 { - return m.messageID -} + plugin.Stop() -func (m *message) Payload() []byte { - return m.payload + require.Equal(t, client.subscribeCallCount, 0) } diff --git a/plugins/inputs/multifile/README.md b/plugins/inputs/multifile/README.md new file mode 100644 index 000000000..2d71ac159 --- /dev/null +++ b/plugins/inputs/multifile/README.md @@ -0,0 +1,68 @@ +# Multifile Input Plugin + +The multifile input plugin allows Telegraf to combine data from multiple files +into a single metric, creating one field or tag per file. This is often +useful creating custom metrics from the `/sys` or `/proc` filesystems. + +> Note: If you wish to parse metrics from a single file formatted in one of the supported +> [input data formats][], you should use the [file][] input plugin instead. + +### Configuration +```toml +[[inputs.multifile]] + ## Base directory where telegraf will look for files. + ## Omit this option to use absolute paths. + base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" + + ## If true discard all data when a single file can't be read. + ## Else, Telegraf omits the field generated from this file. + # fail_early = true + + ## Files to parse each interval. + [[inputs.multifile.file]] + file = "in_pressure_input" + dest = "pressure" + conversion = "float" + [[inputs.multifile.file]] + file = "in_temp_input" + dest = "temperature" + conversion = "float(3)" + [[inputs.multifile.file]] + file = "in_humidityrelative_input" + dest = "humidityrelative" + conversion = "float(3)" +``` + +Each file table can contain the following options: +* `file`: +Path of the file to be parsed, relative to the `base_dir`. +* `dest`: +Name of the field/tag key, defaults to `$(basename file)`. +* `conversion`: +Data format used to parse the file contents: + * `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Effectively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. + * `float`: Converts the value into a float with no adjustment. Same as `float(0)`. + * `int`: Converts the value into an integer. + * `string`, `""`: No conversion. + * `bool`: Converts the value into a boolean. + * `tag`: File content is used as a tag. + +### Example Output +This example shows a BME280 connected to a Raspberry Pi, using the sample config. +``` +multifile pressure=101.343285156,temperature=20.4,humidityrelative=48.9 1547202076000000000 +``` + +To reproduce this, connect a BMP280 to the board's GPIO pins and register the BME280 device driver +``` +cd /sys/bus/i2c/devices/i2c-1 +echo bme280 0x76 > new_device +``` + +The kernel driver provides the following files in `/sys/bus/i2c/devices/1-0076/iio:device0`: +* `in_humidityrelative_input`: `48900` +* `in_pressure_input`: `101.343285156` +* `in_temp_input`: `20400` + +[input data formats]: /docs/DATA_FORMATS_INPUT.md +[file]: /plugins/inputs/file/README.md diff --git a/plugins/inputs/multifile/multifile.go b/plugins/inputs/multifile/multifile.go new file mode 100644 index 000000000..9c9813d9a --- /dev/null +++ b/plugins/inputs/multifile/multifile.go @@ -0,0 +1,149 @@ +package multifile + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "math" + "path" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type MultiFile struct { + BaseDir string + FailEarly bool + Files []File `toml:"file"` + + initialized bool +} + +type File struct { + Name string `toml:"file"` + Dest string + Conversion string +} + +const sampleConfig = ` + ## Base directory where telegraf will look for files. + ## Omit this option to use absolute paths. + base_dir = "/sys/bus/i2c/devices/1-0076/iio:device0" + + ## If true, Telegraf discard all data when a single file can't be read. + ## Else, Telegraf omits the field generated from this file. + # fail_early = true + + ## Files to parse each interval. + [[inputs.multifile.file]] + file = "in_pressure_input" + dest = "pressure" + conversion = "float" + [[inputs.multifile.file]] + file = "in_temp_input" + dest = "temperature" + conversion = "float(3)" + [[inputs.multifile.file]] + file = "in_humidityrelative_input" + dest = "humidityrelative" + conversion = "float(3)" +` + +// SampleConfig returns the default configuration of the Input +func (m *MultiFile) SampleConfig() string { + return sampleConfig +} + +func (m *MultiFile) Description() string { + return "Aggregates the contents of multiple files into a single point" +} + +func (m *MultiFile) init() { + if m.initialized { + return + } + + for i, file := range m.Files { + if m.BaseDir != "" { + m.Files[i].Name = path.Join(m.BaseDir, file.Name) + } + if file.Dest == "" { + m.Files[i].Dest = path.Base(file.Name) + } + } + + m.initialized = true +} + +func (m *MultiFile) Gather(acc telegraf.Accumulator) error { + m.init() + now := time.Now() + fields := make(map[string]interface{}) + tags := make(map[string]string) + + for _, file := range m.Files { + fileContents, err := ioutil.ReadFile(file.Name) + + if err != nil { + if m.FailEarly { + return err + } + continue + } + + vStr := string(bytes.TrimSpace(bytes.Trim(fileContents, "\x00"))) + + if file.Conversion == "tag" { + tags[file.Dest] = vStr + continue + } + + var value interface{} + + var d int = 0 + if _, errfmt := fmt.Sscanf(file.Conversion, "float(%d)", &d); errfmt == nil || file.Conversion == "float" { + var v float64 + v, err = strconv.ParseFloat(vStr, 64) + value = v / math.Pow10(d) + } + + if file.Conversion == "int" { + value, err = strconv.ParseInt(vStr, 10, 64) + } + + if file.Conversion == "string" || file.Conversion == "" { + value = vStr + } + + if file.Conversion == "bool" { + value, err = strconv.ParseBool(vStr) + } + + if err != nil { + if m.FailEarly { + return err + } + continue + } + + if value == nil { + return errors.New(fmt.Sprintf("invalid conversion %v", file.Conversion)) + } + + fields[file.Dest] = value + } + + acc.AddGauge("multifile", fields, tags, now) + return nil +} + +func init() { + inputs.Add("multifile", func() telegraf.Input { + return &MultiFile{ + FailEarly: true, + } + }) +} diff --git a/plugins/inputs/multifile/multifile_test.go b/plugins/inputs/multifile/multifile_test.go new file mode 100644 index 000000000..b12f29f35 --- /dev/null +++ b/plugins/inputs/multifile/multifile_test.go @@ -0,0 +1,76 @@ +package multifile + +import ( + "os" + "path" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFileTypes(t *testing.T) { + wd, _ := os.Getwd() + + m := MultiFile{ + BaseDir: path.Join(wd, `testdata`), + FailEarly: true, + Files: []File{ + {Name: `bool.txt`, Dest: `examplebool`, Conversion: `bool`}, + {Name: `float.txt`, Dest: `examplefloat`, Conversion: `float`}, + {Name: `int.txt`, Dest: `examplefloatX`, Conversion: `float(3)`}, + {Name: `int.txt`, Dest: `exampleint`, Conversion: `int`}, + {Name: `string.txt`, Dest: `examplestring`}, + {Name: `tag.txt`, Dest: `exampletag`, Conversion: `tag`}, + {Name: `int.txt`, Conversion: `int`}, + }, + } + + var acc testutil.Accumulator + + err := m.Gather(&acc) + + require.NoError(t, err) + assert.Equal(t, map[string]string{"exampletag": "test"}, acc.Metrics[0].Tags) + assert.Equal(t, map[string]interface{}{ + "examplebool": true, + "examplestring": "hello world", + "exampleint": int64(123456), + "int.txt": int64(123456), + "examplefloat": 123.456, + "examplefloatX": 123.456, + }, acc.Metrics[0].Fields) +} + +func FailEarly(failEarly bool, t *testing.T) error { + wd, _ := os.Getwd() + + m := MultiFile{ + BaseDir: path.Join(wd, `testdata`), + FailEarly: failEarly, + Files: []File{ + {Name: `int.txt`, Dest: `exampleint`, Conversion: `int`}, + {Name: `int.txt`, Dest: `exampleerror`, Conversion: `bool`}, + }, + } + + var acc testutil.Accumulator + + err := m.Gather(&acc) + + if err == nil { + assert.Equal(t, map[string]interface{}{ + "exampleint": int64(123456), + }, acc.Metrics[0].Fields) + } + + return err +} + +func TestFailEarly(t *testing.T) { + err := FailEarly(false, t) + require.NoError(t, err) + err = FailEarly(true, t) + require.Error(t, err) +} diff --git a/plugins/inputs/multifile/testdata/bool.txt b/plugins/inputs/multifile/testdata/bool.txt new file mode 100644 index 000000000..27ba77dda --- /dev/null +++ b/plugins/inputs/multifile/testdata/bool.txt @@ -0,0 +1 @@ +true diff --git a/plugins/inputs/multifile/testdata/float.txt b/plugins/inputs/multifile/testdata/float.txt new file mode 100644 index 000000000..d5910a0a6 --- /dev/null +++ b/plugins/inputs/multifile/testdata/float.txt @@ -0,0 +1 @@ +123.456 diff --git a/plugins/inputs/multifile/testdata/int.txt b/plugins/inputs/multifile/testdata/int.txt new file mode 100644 index 000000000..9f358a4ad --- /dev/null +++ b/plugins/inputs/multifile/testdata/int.txt @@ -0,0 +1 @@ +123456 diff --git a/plugins/inputs/multifile/testdata/string.txt b/plugins/inputs/multifile/testdata/string.txt new file mode 100644 index 000000000..9409bd50f --- /dev/null +++ b/plugins/inputs/multifile/testdata/string.txt @@ -0,0 +1 @@ + hello world diff --git a/plugins/inputs/multifile/testdata/tag.txt b/plugins/inputs/multifile/testdata/tag.txt new file mode 100644 index 000000000..9daeafb98 --- /dev/null +++ b/plugins/inputs/multifile/testdata/tag.txt @@ -0,0 +1 @@ +test diff --git a/plugins/inputs/mysql/README.md b/plugins/inputs/mysql/README.md index 564d75e61..8b4717168 100644 --- a/plugins/inputs/mysql/README.md +++ b/plugins/inputs/mysql/README.md @@ -21,10 +21,9 @@ This plugin gathers the statistic data from MySQL server ### Configuration ```toml -# Read metrics from one or many mysql servers [[inputs.mysql]] ## specify servers via a url matching: - ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] + ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name ## e.g. ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] @@ -32,60 +31,80 @@ This plugin gathers the statistic data from MySQL server # ## If no servers are specified, then localhost is used as the host. servers = ["tcp(127.0.0.1:3306)/"] - ## the limits for metrics form perf_events_statements - perf_events_statements_digest_text_limit = 120 - perf_events_statements_limit = 250 - perf_events_statements_time_limit = 86400 - # + + ## Selects the metric output format. + ## + ## This option exists to maintain backwards compatibility, if you have + ## existing metrics do not set or change this value until you are ready to + ## migrate to the new format. + ## + ## If you do not have existing metrics from this plugin set to the latest + ## version. + ## + ## Telegraf >=1.6: metric_version = 2 + ## <1.6: metric_version = 1 (or unset) + metric_version = 2 + ## if the list is empty, then metrics are gathered from all database tables - table_schema_databases = [] - # + # table_schema_databases = [] + ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list - gather_table_schema = false - # + # gather_table_schema = false + ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST - gather_process_list = true - # - ## gather thread state counts from INFORMATION_SCHEMA.USER_STATISTICS - gather_user_statistics = true - # + # gather_process_list = false + + ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS + # gather_user_statistics = false + ## gather auto_increment columns and max values from information schema - gather_info_schema_auto_inc = true - # + # gather_info_schema_auto_inc = false + ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS - gather_innodb_metrics = true - # + # gather_innodb_metrics = false + ## gather metrics from SHOW SLAVE STATUS command output - gather_slave_status = true - # + # gather_slave_status = false + ## gather metrics from SHOW BINARY LOGS command output - gather_binary_logs = false - # + # gather_binary_logs = false + + ## gather metrics from SHOW GLOBAL VARIABLES command output + # gather_global_variables = true + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE - gather_table_io_waits = false - # + # gather_table_io_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS - gather_table_lock_waits = false - # + # gather_table_lock_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE - gather_index_io_waits = false - # + # gather_index_io_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS - gather_event_waits = false - # + # gather_event_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME - gather_file_events_stats = false - # + # gather_file_events_stats = false + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST - gather_perf_events_statements = false - # + # gather_perf_events_statements = false + + ## the limits for metrics form perf_events_statements + # perf_events_statements_digest_text_limit = 120 + # perf_events_statements_limit = 250 + # perf_events_statements_time_limit = 86400 + ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) - interval_slow = "30m" + ## example: interval_slow = "30m" + # interval_slow = "" ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) - tls_ca = "/etc/telegraf/ca.pem" - tls_cert = "/etc/telegraf/cert.pem" - tls_key = "/etc/telegraf/key.pem" + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` #### Metric Version @@ -134,7 +153,7 @@ If you wish to remove the `name_suffix` you may use Kapacitor to copy the historical data to the default name. Do this only after retiring the old measurement name. -1. Use the techinique described above to write to multiple locations: +1. Use the technique described above to write to multiple locations: ```toml [[inputs.mysql]] servers = ["tcp(127.0.0.1:3306)/"] @@ -264,7 +283,7 @@ The unit of fields varies by the tags. * events_statements_rows_examined_total(float, number) * events_statements_tmp_tables_total(float, number) * events_statements_tmp_disk_tables_total(float, number) - * events_statements_sort_merge_passes_totales(float, number) + * events_statements_sort_merge_passes_totals(float, number) * events_statements_sort_rows_total(float, number) * events_statements_no_index_used_total(float, number) * Table schema - gathers statistics of each schema. It has following measurements diff --git a/plugins/inputs/mysql/mysql.go b/plugins/inputs/mysql/mysql.go index c17de3dcd..81db026ec 100644 --- a/plugins/inputs/mysql/mysql.go +++ b/plugins/inputs/mysql/mysql.go @@ -9,12 +9,12 @@ import ( "sync" "time" + "github.com/go-sql-driver/mysql" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/inputs/mysql/v1" - - "github.com/go-sql-driver/mysql" + "github.com/influxdata/telegraf/plugins/inputs/mysql/v2" ) type Mysql struct { @@ -36,12 +36,18 @@ type Mysql struct { GatherTableSchema bool `toml:"gather_table_schema"` GatherFileEventsStats bool `toml:"gather_file_events_stats"` GatherPerfEventsStatements bool `toml:"gather_perf_events_statements"` + GatherGlobalVars bool `toml:"gather_global_variables"` IntervalSlow string `toml:"interval_slow"` MetricVersion int `toml:"metric_version"` + + Log telegraf.Logger `toml:"-"` tls.ClientConfig + lastT time.Time + initDone bool + scanIntervalSlow uint32 } -var sampleConfig = ` +const sampleConfig = ` ## specify servers via a url matching: ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name @@ -65,55 +71,59 @@ var sampleConfig = ` ## <1.6: metric_version = 1 (or unset) metric_version = 2 - ## the limits for metrics form perf_events_statements - perf_events_statements_digest_text_limit = 120 - perf_events_statements_limit = 250 - perf_events_statements_time_limit = 86400 - # - ## if the list is empty, then metrics are gathered from all databasee tables - table_schema_databases = [] - # + ## if the list is empty, then metrics are gathered from all database tables + # table_schema_databases = [] + ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list - gather_table_schema = false - # + # gather_table_schema = false + ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST - gather_process_list = true - # + # gather_process_list = false + ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS - gather_user_statistics = true - # + # gather_user_statistics = false + ## gather auto_increment columns and max values from information schema - gather_info_schema_auto_inc = true - # + # gather_info_schema_auto_inc = false + ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS - gather_innodb_metrics = true - # + # gather_innodb_metrics = false + ## gather metrics from SHOW SLAVE STATUS command output - gather_slave_status = true - # + # gather_slave_status = false + ## gather metrics from SHOW BINARY LOGS command output - gather_binary_logs = false - # + # gather_binary_logs = false + + ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES + # gather_global_variables = true + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE - gather_table_io_waits = false - # + # gather_table_io_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS - gather_table_lock_waits = false - # + # gather_table_lock_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE - gather_index_io_waits = false - # + # gather_index_io_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS - gather_event_waits = false - # + # gather_event_waits = false + ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME - gather_file_events_stats = false - # + # gather_file_events_stats = false + ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST - gather_perf_events_statements = false - # + # gather_perf_events_statements = false + + ## the limits for metrics form perf_events_statements + # perf_events_statements_digest_text_limit = 120 + # perf_events_statements_limit = 250 + # perf_events_statements_time_limit = 86400 + ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) - interval_slow = "30m" + ## example: interval_slow = "30m" + # interval_slow = "" ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) # tls_ca = "/etc/telegraf/ca.pem" @@ -123,7 +133,13 @@ var sampleConfig = ` # insecure_skip_verify = false ` -var defaultTimeout = time.Second * time.Duration(5) +const ( + defaultTimeout = 5 * time.Second + defaultPerfEventsStatementsDigestTextLimit = 120 + defaultPerfEventsStatementsLimit = 250 + defaultPerfEventsStatementsTimeLimit = 86400 + defaultGatherGlobalVars = true +) func (m *Mysql) SampleConfig() string { return sampleConfig @@ -133,21 +149,16 @@ func (m *Mysql) Description() string { return "Read metrics from one or many mysql servers" } -var ( - localhost = "" - lastT time.Time - initDone = false - scanIntervalSlow uint32 -) +const localhost = "" func (m *Mysql) InitMysql() { if len(m.IntervalSlow) > 0 { interval, err := time.ParseDuration(m.IntervalSlow) if err == nil && interval.Seconds() >= 1.0 { - scanIntervalSlow = uint32(interval.Seconds()) + m.scanIntervalSlow = uint32(interval.Seconds()) } } - initDone = true + m.initDone = true } func (m *Mysql) Gather(acc telegraf.Accumulator) error { @@ -156,7 +167,7 @@ func (m *Mysql) Gather(acc telegraf.Accumulator) error { return m.gatherServer(localhost, acc) } // Initialise additional query intervals - if !initDone { + if !m.initDone { m.InitMysql() } @@ -184,6 +195,7 @@ func (m *Mysql) Gather(acc telegraf.Accumulator) error { return nil } +// These are const but can't be declared as such because golang doesn't allow const maps var ( // status counter generalThreadStates = map[string]uint32{ @@ -202,10 +214,10 @@ var ( "deleting": uint32(0), "executing": uint32(0), "execution of init_command": uint32(0), - "end": uint32(0), - "freeing items": uint32(0), - "flushing tables": uint32(0), - "fulltext initialization": uint32(0), + "end": uint32(0), + "freeing items": uint32(0), + "flushing tables": uint32(0), + "fulltext initialization": uint32(0), "idle": uint32(0), "init": uint32(0), "killed": uint32(0), @@ -241,8 +253,8 @@ var ( } // plaintext statuses stateStatusMappings = map[string]string{ - "user sleep": "idle", - "creating index": "altering table", + "user sleep": "idle", + "creating index": "altering table", "committing alter table to storage engine": "altering table", "discard or import tablespace": "altering table", "rename": "altering table", @@ -424,14 +436,16 @@ func (m *Mysql) gatherServer(serv string, acc telegraf.Accumulator) error { return err } - // Global Variables may be gathered less often - if len(m.IntervalSlow) > 0 { - if uint32(time.Since(lastT).Seconds()) >= scanIntervalSlow { - err = m.gatherGlobalVariables(db, serv, acc) - if err != nil { - return err + if m.GatherGlobalVars { + // Global Variables may be gathered less often + if len(m.IntervalSlow) > 0 { + if uint32(time.Since(m.lastT).Seconds()) >= m.scanIntervalSlow { + err = m.gatherGlobalVariables(db, serv, acc) + if err != nil { + return err + } + m.lastT = time.Now() } - lastT = time.Now() } } @@ -550,14 +564,20 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu return err } key = strings.ToLower(key) + // parse mysql version and put into field and tag if strings.Contains(key, "version") { fields[key] = string(val) tags[key] = string(val) } - if value, ok := m.parseValue(val); ok { + + value, err := m.parseGlobalVariables(key, val) + if err != nil { + m.Log.Debugf("Error parsing global variable %q: %v", key, err) + } else { fields[key] = value } + // Send 20 fields at a time if len(fields) >= 20 { acc.AddFields("mysql_variables", fields, tags) @@ -571,6 +591,18 @@ func (m *Mysql) gatherGlobalVariables(db *sql.DB, serv string, acc telegraf.Accu return nil } +func (m *Mysql) parseGlobalVariables(key string, value sql.RawBytes) (interface{}, error) { + if m.MetricVersion < 2 { + v, ok := v1.ParseValue(value) + if ok { + return v, nil + } + return v, fmt.Errorf("could not parse value: %q", string(value)) + } else { + return v2.ConvertGlobalVariables(key, value) + } +} + // gatherSlaveStatuses can be used to get replication analytics // When the server is slave, then it returns only one row. // If the multi-source replication is set, then everything works differently @@ -744,7 +776,10 @@ func (m *Mysql) gatherGlobalStatuses(db *sql.DB, serv string, acc telegraf.Accum } } else { key = strings.ToLower(key) - if value, ok := m.parseValue(val); ok { + value, err := v2.ConvertGlobalStatus(key, val) + if err != nil { + m.Log.Debugf("Error parsing global status: %v", err) + } else { fields[key] = value } } @@ -995,6 +1030,30 @@ func getColSlice(l int) ([]interface{}, error) { &total_ssl_connections, &max_statement_time_exceeded, }, nil + case 21: // mysql 5.5 + return []interface{}{ + &user, + &total_connections, + &concurrent_connections, + &connected_time, + &busy_time, + &cpu_time, + &bytes_received, + &bytes_sent, + &binlog_bytes_written, + &rows_fetched, + &rows_updated, + &table_rows_read, + &select_commands, + &update_commands, + &other_commands, + &commit_transactions, + &rollback_transactions, + &denied_connections, + &lost_connections, + &access_denied, + &empty_queries, + }, nil case 22: // percona return []interface{}{ &user, @@ -1711,6 +1770,11 @@ func getDSNTag(dsn string) string { func init() { inputs.Add("mysql", func() telegraf.Input { - return &Mysql{} + return &Mysql{ + PerfEventsStatementsDigestTextLimit: defaultPerfEventsStatementsDigestTextLimit, + PerfEventsStatementsLimit: defaultPerfEventsStatementsLimit, + PerfEventsStatementsTimeLimit: defaultPerfEventsStatementsTimeLimit, + GatherGlobalVars: defaultGatherGlobalVars, + } }) } diff --git a/plugins/inputs/mysql/mysql_test.go b/plugins/inputs/mysql/mysql_test.go index b4983ba0e..be9c338bf 100644 --- a/plugins/inputs/mysql/mysql_test.go +++ b/plugins/inputs/mysql/mysql_test.go @@ -26,6 +26,54 @@ func TestMysqlDefaultsToLocal(t *testing.T) { assert.True(t, acc.HasMeasurement("mysql")) } +func TestMysqlMultipleInstances(t *testing.T) { + // Invoke Gather() from two separate configurations and + // confirm they don't interfere with each other + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + testServer := "root@tcp(127.0.0.1:3306)/?tls=false" + m := &Mysql{ + Servers: []string{testServer}, + IntervalSlow: "30s", + } + + var acc, acc2 testutil.Accumulator + err := m.Gather(&acc) + require.NoError(t, err) + assert.True(t, acc.HasMeasurement("mysql")) + // acc should have global variables + assert.True(t, acc.HasMeasurement("mysql_variables")) + + m2 := &Mysql{ + Servers: []string{testServer}, + } + err = m2.Gather(&acc2) + require.NoError(t, err) + assert.True(t, acc2.HasMeasurement("mysql")) + // acc2 should not have global variables + assert.False(t, acc2.HasMeasurement("mysql_variables")) +} + +func TestMysqlMultipleInits(t *testing.T) { + m := &Mysql{ + IntervalSlow: "30s", + } + m2 := &Mysql{} + + m.InitMysql() + assert.True(t, m.initDone) + assert.False(t, m2.initDone) + assert.Equal(t, m.scanIntervalSlow, uint32(30)) + assert.Equal(t, m2.scanIntervalSlow, uint32(0)) + + m2.InitMysql() + assert.True(t, m.initDone) + assert.True(t, m2.initDone) + assert.Equal(t, m.scanIntervalSlow, uint32(30)) + assert.Equal(t, m2.scanIntervalSlow, uint32(0)) +} + func TestMysqlGetDSNTag(t *testing.T) { tests := []struct { input string diff --git a/plugins/inputs/mysql/v2/convert.go b/plugins/inputs/mysql/v2/convert.go new file mode 100644 index 000000000..a3ac3e976 --- /dev/null +++ b/plugins/inputs/mysql/v2/convert.go @@ -0,0 +1,103 @@ +package v2 + +import ( + "bytes" + "database/sql" + "fmt" + "strconv" +) + +type ConversionFunc func(value sql.RawBytes) (interface{}, error) + +func ParseInt(value sql.RawBytes) (interface{}, error) { + v, err := strconv.ParseInt(string(value), 10, 64) + + // Ignore ErrRange. When this error is set the returned value is "the + // maximum magnitude integer of the appropriate bitSize and sign." + if err, ok := err.(*strconv.NumError); ok && err.Err == strconv.ErrRange { + return v, nil + } + + return v, err +} + +func ParseBoolAsInteger(value sql.RawBytes) (interface{}, error) { + if bytes.EqualFold(value, []byte("YES")) || bytes.EqualFold(value, []byte("ON")) { + return int64(1), nil + } + + return int64(0), nil +} + +func ParseGTIDMode(value sql.RawBytes) (interface{}, error) { + // https://dev.mysql.com/doc/refman/8.0/en/replication-mode-change-online-concepts.html + v := string(value) + switch v { + case "OFF": + return int64(0), nil + case "ON": + return int64(1), nil + case "OFF_PERMISSIVE": + return int64(0), nil + case "ON_PERMISSIVE": + return int64(1), nil + default: + return nil, fmt.Errorf("unrecognized gtid_mode: %q", v) + } +} + +func ParseValue(value sql.RawBytes) (interface{}, error) { + if bytes.EqualFold(value, []byte("YES")) || bytes.Compare(value, []byte("ON")) == 0 { + return 1, nil + } + + if bytes.EqualFold(value, []byte("NO")) || bytes.Compare(value, []byte("OFF")) == 0 { + return 0, nil + } + + if val, err := strconv.ParseInt(string(value), 10, 64); err == nil { + return val, nil + } + if val, err := strconv.ParseFloat(string(value), 64); err == nil { + return val, nil + } + + if len(string(value)) > 0 { + return string(value), nil + } + + return nil, fmt.Errorf("unconvertible value: %q", string(value)) +} + +var GlobalStatusConversions = map[string]ConversionFunc{ + "ssl_ctx_verify_depth": ParseInt, + "ssl_verify_depth": ParseInt, +} + +var GlobalVariableConversions = map[string]ConversionFunc{ + "gtid_mode": ParseGTIDMode, +} + +func ConvertGlobalStatus(key string, value sql.RawBytes) (interface{}, error) { + if bytes.Equal(value, []byte("")) { + return nil, nil + } + + if conv, ok := GlobalStatusConversions[key]; ok { + return conv(value) + } + + return ParseValue(value) +} + +func ConvertGlobalVariables(key string, value sql.RawBytes) (interface{}, error) { + if bytes.Equal(value, []byte("")) { + return nil, nil + } + + if conv, ok := GlobalVariableConversions[key]; ok { + return conv(value) + } + + return ParseValue(value) +} diff --git a/plugins/inputs/mysql/v2/convert_test.go b/plugins/inputs/mysql/v2/convert_test.go new file mode 100644 index 000000000..47189c18d --- /dev/null +++ b/plugins/inputs/mysql/v2/convert_test.go @@ -0,0 +1,86 @@ +package v2 + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestConvertGlobalStatus(t *testing.T) { + tests := []struct { + name string + key string + value sql.RawBytes + expected interface{} + expectedErr error + }{ + { + name: "default", + key: "ssl_ctx_verify_depth", + value: []byte("0"), + expected: int64(0), + expectedErr: nil, + }, + { + name: "overflow int64", + key: "ssl_ctx_verify_depth", + value: []byte("18446744073709551615"), + expected: int64(9223372036854775807), + expectedErr: nil, + }, + { + name: "defined variable but unset", + key: "ssl_ctx_verify_depth", + value: []byte(""), + expected: nil, + expectedErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual, err := ConvertGlobalStatus(tt.key, tt.value) + require.Equal(t, tt.expectedErr, err) + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestCovertGlobalVariables(t *testing.T) { + tests := []struct { + name string + key string + value sql.RawBytes + expected interface{} + expectedErr error + }{ + { + name: "boolean type mysql<=5.6", + key: "gtid_mode", + value: []byte("ON"), + expected: int64(1), + expectedErr: nil, + }, + { + name: "enum type mysql>=5.7", + key: "gtid_mode", + value: []byte("ON_PERMISSIVE"), + expected: int64(1), + expectedErr: nil, + }, + { + name: "defined variable but unset", + key: "ssl_ctx_verify_depth", + value: []byte(""), + expected: nil, + expectedErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual, err := ConvertGlobalVariables(tt.key, tt.value) + require.Equal(t, tt.expectedErr, err) + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/plugins/inputs/nats/nats.go b/plugins/inputs/nats/nats.go index ba1cc803c..1afb0046d 100644 --- a/plugins/inputs/nats/nats.go +++ b/plugins/inputs/nats/nats.go @@ -1,21 +1,19 @@ -// +build !freebsd +// +build !freebsd freebsd,cgo package nats import ( + "encoding/json" "io/ioutil" "net/http" "net/url" "path" "time" - "encoding/json" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" - - gnatsd "github.com/nats-io/gnatsd/server" + gnatsd "github.com/nats-io/nats-server/v2/server" ) type Nats struct { diff --git a/plugins/inputs/nats/nats_freebsd.go b/plugins/inputs/nats/nats_freebsd.go index c23a6eec5..08d08ba76 100644 --- a/plugins/inputs/nats/nats_freebsd.go +++ b/plugins/inputs/nats/nats_freebsd.go @@ -1,3 +1,3 @@ -// +build freebsd +// +build freebsd,!cgo package nats diff --git a/plugins/inputs/nats/nats_test.go b/plugins/inputs/nats/nats_test.go index ef387f7e4..ece22288f 100644 --- a/plugins/inputs/nats/nats_test.go +++ b/plugins/inputs/nats/nats_test.go @@ -1,4 +1,4 @@ -// +build !freebsd +// +build !freebsd freebsd,cgo package nats diff --git a/plugins/inputs/nats_consumer/README.md b/plugins/inputs/nats_consumer/README.md index 18dd57f07..ae40d9185 100644 --- a/plugins/inputs/nats_consumer/README.md +++ b/plugins/inputs/nats_consumer/README.md @@ -1,32 +1,63 @@ # NATS Consumer Input Plugin -The [NATS](http://www.nats.io/about/) consumer plugin reads from -specified NATS subjects and adds messages to InfluxDB. The plugin expects messages -in the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). -A [Queue Group](http://www.nats.io/documentation/concepts/nats-queueing/) -is used when subscribing to subjects so multiple instances of telegraf can read -from a NATS cluster in parallel. +The [NATS][nats] consumer plugin reads from the specified NATS subjects and +creates metrics using one of the supported [input data formats][]. -## Configuration +A [Queue Group][queue group] is used when subscribing to subjects so multiple +instances of telegraf can read from a NATS cluster in parallel. + +### Configuration: ```toml -# Read metrics from NATS subject(s) [[inputs.nats_consumer]] ## urls of NATS servers servers = ["nats://localhost:4222"] - ## Use Transport Layer Security - secure = false + ## subject(s) to consume subjects = ["telegraf"] + ## name a queue group queue_group = "telegraf_consumers" - ## Maximum number of metrics to buffer between collection intervals - metric_buffer = 100000 - ## Data format to consume. + ## Optional credentials + # username = "" + # password = "" + ## Optional NATS 2.0 and NATS NGS compatible user credentials + # credentials = "/etc/telegraf/nats.creds" + + ## Use Transport Layer Security + # secure = false + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Sets the limits for pending msgs and bytes for each subscription + ## These shouldn't need to be adjusted except in very high throughput scenarios + # pending_message_limit = 65536 + # pending_bytes_limit = 67108864 + + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "influx" ``` + +[nats]: https://www.nats.io/about/ +[input data formats]: /docs/DATA_FORMATS_INPUT.md +[queue group]: https://www.nats.io/documentation/concepts/nats-queueing/ diff --git a/plugins/inputs/nats_consumer/nats_consumer.go b/plugins/inputs/nats_consumer/nats_consumer.go index cb3eb3017..6ac19b0a8 100644 --- a/plugins/inputs/nats_consumer/nats_consumer.go +++ b/plugins/inputs/nats_consumer/nats_consumer.go @@ -1,16 +1,25 @@ package natsconsumer import ( + "context" "fmt" - "log" + "strings" "sync" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" - nats "github.com/nats-io/go-nats" + "github.com/nats-io/nats.go" ) +var ( + defaultMaxUndeliveredMessages = 1000 +) + +type empty struct{} +type semaphore chan empty + type natsError struct { conn *nats.Conn sub *nats.Subscription @@ -23,48 +32,82 @@ func (e natsError) Error() string { } type natsConsumer struct { - QueueGroup string - Subjects []string - Servers []string - Secure bool + QueueGroup string `toml:"queue_group"` + Subjects []string `toml:"subjects"` + Servers []string `toml:"servers"` + Secure bool `toml:"secure"` + Username string `toml:"username"` + Password string `toml:"password"` + Credentials string `toml:"credentials"` + + tls.ClientConfig + + Log telegraf.Logger // Client pending limits: - PendingMessageLimit int - PendingBytesLimit int + PendingMessageLimit int `toml:"pending_message_limit"` + PendingBytesLimit int `toml:"pending_bytes_limit"` - // Legacy metric buffer support + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + + // Legacy metric buffer support; deprecated in v0.10.3 MetricBuffer int + conn *nats.Conn + subs []*nats.Subscription + parser parsers.Parser - - sync.Mutex - wg sync.WaitGroup - Conn *nats.Conn - Subs []*nats.Subscription - // channel for all incoming NATS messages in chan *nats.Msg // channel for all NATS read errors - errs chan error - done chan struct{} - acc telegraf.Accumulator + errs chan error + acc telegraf.TrackingAccumulator + wg sync.WaitGroup + cancel context.CancelFunc } var sampleConfig = ` ## urls of NATS servers - # servers = ["nats://localhost:4222"] + servers = ["nats://localhost:4222"] + + ## subject(s) to consume + subjects = ["telegraf"] + + ## name a queue group + queue_group = "telegraf_consumers" + + ## Optional credentials + # username = "" + # password = "" + + ## Optional NATS 2.0 and NATS NGS compatible user credentials + # credentials = "/etc/telegraf/nats.creds" + ## Use Transport Layer Security # secure = false - ## subject(s) to consume - # subjects = ["telegraf"] - ## name a queue group - # queue_group = "telegraf_consumers" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ## Sets the limits for pending msgs and bytes for each subscription ## These shouldn't need to be adjusted except in very high throughput scenarios # pending_message_limit = 65536 # pending_bytes_limit = 67108864 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -94,107 +137,131 @@ func (n *natsConsumer) natsErrHandler(c *nats.Conn, s *nats.Subscription, e erro // Start the nats consumer. Caller must call *natsConsumer.Stop() to clean up. func (n *natsConsumer) Start(acc telegraf.Accumulator) error { - n.Lock() - defer n.Unlock() - - n.acc = acc + n.acc = acc.WithTracking(n.MaxUndeliveredMessages) var connectErr error - // set default NATS connection options - opts := nats.DefaultOptions + options := []nats.Option{ + nats.MaxReconnects(-1), + nats.ErrorHandler(n.natsErrHandler), + } - // override max reconnection tries - opts.MaxReconnect = -1 + // override authentication, if any was specified + if n.Username != "" && n.Password != "" { + options = append(options, nats.UserInfo(n.Username, n.Password)) + } - // override servers if any were specified - opts.Servers = n.Servers + if n.Credentials != "" { + options = append(options, nats.UserCredentials(n.Credentials)) + } - opts.Secure = n.Secure + if n.Secure { + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return err + } - if n.Conn == nil || n.Conn.IsClosed() { - n.Conn, connectErr = opts.Connect() + options = append(options, nats.Secure(tlsConfig)) + } + + if n.conn == nil || n.conn.IsClosed() { + n.conn, connectErr = nats.Connect(strings.Join(n.Servers, ","), options...) if connectErr != nil { return connectErr } // Setup message and error channels n.errs = make(chan error) - n.Conn.SetErrorHandler(n.natsErrHandler) n.in = make(chan *nats.Msg, 1000) for _, subj := range n.Subjects { - sub, err := n.Conn.QueueSubscribe(subj, n.QueueGroup, func(m *nats.Msg) { + sub, err := n.conn.QueueSubscribe(subj, n.QueueGroup, func(m *nats.Msg) { n.in <- m }) if err != nil { return err } - // ensure that the subscription has been processed by the server - if err = n.Conn.Flush(); err != nil { - return err - } + // set the subscription pending limits - if err = sub.SetPendingLimits(n.PendingMessageLimit, n.PendingBytesLimit); err != nil { + err = sub.SetPendingLimits(n.PendingMessageLimit, n.PendingBytesLimit) + if err != nil { return err } - n.Subs = append(n.Subs, sub) + + n.subs = append(n.subs, sub) } } - n.done = make(chan struct{}) + ctx, cancel := context.WithCancel(context.Background()) + n.cancel = cancel // Start the message reader n.wg.Add(1) - go n.receiver() - log.Printf("I! Started the NATS consumer service, nats: %v, subjects: %v, queue: %v\n", - n.Conn.ConnectedUrl(), n.Subjects, n.QueueGroup) + go func() { + defer n.wg.Done() + go n.receiver(ctx) + }() + + n.Log.Infof("Started the NATS consumer service, nats: %v, subjects: %v, queue: %v", + n.conn.ConnectedUrl(), n.Subjects, n.QueueGroup) return nil } // receiver() reads all incoming messages from NATS, and parses them into // telegraf metrics. -func (n *natsConsumer) receiver() { - defer n.wg.Done() +func (n *natsConsumer) receiver(ctx context.Context) { + sem := make(semaphore, n.MaxUndeliveredMessages) + for { select { - case <-n.done: + case <-ctx.Done(): return + case <-n.acc.Delivered(): + <-sem case err := <-n.errs: - n.acc.AddError(fmt.Errorf("E! error reading from %s\n", err.Error())) - case msg := <-n.in: - metrics, err := n.parser.Parse(msg.Data) - if err != nil { - n.acc.AddError(fmt.Errorf("E! subject: %s, error: %s", msg.Subject, err.Error())) - } + n.Log.Error(err) + case sem <- empty{}: + select { + case <-ctx.Done(): + return + case err := <-n.errs: + <-sem + n.Log.Error(err) + case <-n.acc.Delivered(): + <-sem + <-sem + case msg := <-n.in: + metrics, err := n.parser.Parse(msg.Data) + if err != nil { + n.Log.Errorf("Subject: %s, error: %s", msg.Subject, err.Error()) + <-sem + continue + } - for _, metric := range metrics { - n.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) + n.acc.AddTrackingMetricGroup(metrics) } } } } func (n *natsConsumer) clean() { - for _, sub := range n.Subs { + for _, sub := range n.subs { if err := sub.Unsubscribe(); err != nil { - n.acc.AddError(fmt.Errorf("E! Error unsubscribing from subject %s in queue %s: %s\n", - sub.Subject, sub.Queue, err.Error())) + n.Log.Errorf("Error unsubscribing from subject %s in queue %s: %s", + sub.Subject, sub.Queue, err.Error()) } } - if n.Conn != nil && !n.Conn.IsClosed() { - n.Conn.Close() + if n.conn != nil && !n.conn.IsClosed() { + n.conn.Close() } } func (n *natsConsumer) Stop() { - n.Lock() - close(n.done) + n.cancel() n.wg.Wait() n.clean() - n.Unlock() } func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { @@ -204,12 +271,13 @@ func (n *natsConsumer) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("nats_consumer", func() telegraf.Input { return &natsConsumer{ - Servers: []string{"nats://localhost:4222"}, - Secure: false, - Subjects: []string{"telegraf"}, - QueueGroup: "telegraf_consumers", - PendingBytesLimit: nats.DefaultSubPendingBytesLimit, - PendingMessageLimit: nats.DefaultSubPendingMsgsLimit, + Servers: []string{"nats://localhost:4222"}, + Secure: false, + Subjects: []string{"telegraf"}, + QueueGroup: "telegraf_consumers", + PendingBytesLimit: nats.DefaultSubPendingBytesLimit, + PendingMessageLimit: nats.DefaultSubPendingMsgsLimit, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, } }) } diff --git a/plugins/inputs/nats_consumer/nats_consumer_test.go b/plugins/inputs/nats_consumer/nats_consumer_test.go deleted file mode 100644 index a0b84ff2e..000000000 --- a/plugins/inputs/nats_consumer/nats_consumer_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package natsconsumer - -import ( - "testing" - - "github.com/influxdata/telegraf/plugins/parsers" - "github.com/influxdata/telegraf/testutil" - nats "github.com/nats-io/go-nats" - "github.com/stretchr/testify/assert" -) - -const ( - testMsg = "cpu_load_short,host=server01 value=23422.0 1422568543702900257\n" - testMsgGraphite = "cpu.load.short.graphite 23422 1454780029" - testMsgJSON = "{\"a\": 5, \"b\": {\"c\": 6}}\n" - invalidMsg = "cpu_load_short,host=server01 1422568543702900257\n" - metricBuffer = 5 -) - -func newTestNatsConsumer() (*natsConsumer, chan *nats.Msg) { - in := make(chan *nats.Msg, metricBuffer) - n := &natsConsumer{ - QueueGroup: "test", - Subjects: []string{"telegraf"}, - Servers: []string{"nats://localhost:4222"}, - Secure: false, - in: in, - errs: make(chan error, metricBuffer), - done: make(chan struct{}), - } - return n, in -} - -// Test that the parser parses NATS messages into metrics -func TestRunParser(t *testing.T) { - n, in := newTestNatsConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - n.wg.Add(1) - go n.receiver() - in <- natsMsg(testMsg) - - acc.Wait(1) -} - -// Test that the parser ignores invalid messages -func TestRunParserInvalidMsg(t *testing.T) { - n, in := newTestNatsConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - n.wg.Add(1) - go n.receiver() - in <- natsMsg(invalidMsg) - - acc.WaitError(1) - assert.Contains(t, acc.Errors[0].Error(), "E! subject: telegraf, error: metric parse error") - assert.EqualValues(t, 0, acc.NMetrics()) -} - -// Test that the parser parses line format messages into metrics -func TestRunParserAndGather(t *testing.T) { - n, in := newTestNatsConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewInfluxParser() - n.wg.Add(1) - go n.receiver() - in <- natsMsg(testMsg) - - n.Gather(&acc) - - acc.Wait(1) - acc.AssertContainsFields(t, "cpu_load_short", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses graphite format messages into metrics -func TestRunParserAndGatherGraphite(t *testing.T) { - n, in := newTestNatsConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewGraphiteParser("_", []string{}, nil) - n.wg.Add(1) - go n.receiver() - in <- natsMsg(testMsgGraphite) - - n.Gather(&acc) - - acc.Wait(1) - acc.AssertContainsFields(t, "cpu_load_short_graphite", - map[string]interface{}{"value": float64(23422)}) -} - -// Test that the parser parses json format messages into metrics -func TestRunParserAndGatherJSON(t *testing.T) { - n, in := newTestNatsConsumer() - acc := testutil.Accumulator{} - n.acc = &acc - defer close(n.done) - - n.parser, _ = parsers.NewJSONParser("nats_json_test", []string{}, nil) - n.wg.Add(1) - go n.receiver() - in <- natsMsg(testMsgJSON) - - n.Gather(&acc) - - acc.Wait(1) - acc.AssertContainsFields(t, "nats_json_test", - map[string]interface{}{ - "a": float64(5), - "b_c": float64(6), - }) -} - -func natsMsg(val string) *nats.Msg { - return &nats.Msg{ - Subject: "telegraf", - Data: []byte(val), - } -} diff --git a/plugins/inputs/neptune_apex/README.md b/plugins/inputs/neptune_apex/README.md new file mode 100644 index 000000000..61919a5c6 --- /dev/null +++ b/plugins/inputs/neptune_apex/README.md @@ -0,0 +1,149 @@ +# Neptune Apex Input Plugin + +The Neptune Apex controller family allows an aquarium hobbyist to monitor and control +their tanks based on various probes. The data is taken directly from the `/cgi-bin/status.xml` at the interval specified +in the telegraf.conf configuration file. + +The [Neptune Apex](https://www.neptunesystems.com/) input plugin collects real-time data from the Apex's status.xml page. + + +### Configuration + +```toml +[[inputs.neptune_apex]] + ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. + ## Measurements will be logged under "apex". + + ## The base URL of the local Apex(es). If you specify more than one server, they will + ## be differentiated by the "source" tag. + servers = [ + "http://apex.local", + ] + + ## The response_timeout specifies how long to wait for a reply from the Apex. + #response_timeout = "5s" + +``` + +### Metrics + +The Neptune Apex controller family allows an aquarium hobbyist to monitor and control +their tanks based on various probes. The data is taken directly from the /cgi-bin/status.xml at the interval specified +in the telegraf.conf configuration file. + +No manipulation is done on any of the fields to ensure future changes to the status.xml do not introduce conversion bugs +to this plugin. When reasonable and predictable, some tags are derived to make graphing easier and without front-end +programming. These tags are clearly marked in the list below and should be considered a convenience rather than authoritative. + +- neptune_apex (All metrics have this measurement name) + - tags: + - host (mandatory, string) is the host on which telegraf runs. + - source (mandatory, string) contains the hostname of the apex device. This can be used to differentiate between + different units. By using the source instead of the serial number, replacements units won't disturb graphs. + - type (mandatory, string) maps to the different types of data. Values can be "controller" (The Apex controller + itself), "probe" for the different input probes, or "output" for any physical or virtual outputs. The Watt and Amp + probes attached to the physical 120V outlets are aggregated under the output type. + - hardware (mandatory, string) controller hardware version + - software (mandatory, string) software version + - probe_type (optional, string) contains the probe type as reported by the Apex. + - name (optional, string) contains the name of the probe or output. + - output_id (optional, string) represents the internal unique output ID. This is different from the device_id. + - device_id (optional, string) maps to either the aquabus address or the internal reference. + - output_type (optional, string) categorizes the output into different categories. This tag is DERIVED from the + device_id. Possible values are: "variable" for the 0-10V signal ports, "outlet" for physical 120V sockets, "alert" + for alarms (email, sound), "virtual" for user-defined outputs, and "unknown" for everything else. + - fields: + - value (float, various unit) represents the probe reading. + - state (string) represents the output state as defined by the Apex. Examples include "AOF" for Auto (OFF), "TBL" + for operating according to a table, and "PF*" for different programs. + - amp (float, Ampere) is the amount of current flowing through the 120V outlet. + - watt (float, Watt) represents the amount of energy flowing through the 120V outlet. + - xstatus (string) indicates the xstatus of an outlet. Found on wireless Vortech devices. + - power_failed (int64, Unix epoch in ns) when the controller last lost power. Omitted if the apex reports it as "none" + - power_restored (int64, Unix epoch in ns) when the controller last powered on. Omitted if the apex reports it as "none" + - serial (string, serial number) + - time: + - The time used for the metric is parsed from the status.xml page. This helps when cross-referencing events with + the local system of Apex Fusion. Since the Apex uses NTP, this should not matter in most scenarios. + + +### Sample Queries + + +Get the max, mean, and min for the temperature in the last hour: +``` +SELECT mean("value") FROM "neptune_apex" WHERE ("probe_type" = 'Temp') AND time >= now() - 6h GROUP BY time(20s) +``` + +### Troubleshooting + +#### sendRequest failure +This indicates a problem communicating with the local Apex controller. If on Mac/Linux, try curl: +``` +$ curl apex.local/cgi-bin/status.xml +``` +to isolate the problem. + +#### parseXML errors +Ensure the XML being returned is valid. If you get valid XML back, open a bug request. + +#### Missing fields/data +The neptune_apex plugin is strict on its input to prevent any conversion errors. If you have fields in the status.xml +output that are not converted to a metric, open a feature request and paste your whole status.xml + +### Example Output + +``` +neptune_apex,hardware=1.0,host=ubuntu,software=5.04_7A18,source=apex,type=controller power_failed=1544814000000000000i,power_restored=1544833875000000000i,serial="AC5:12345" 1545978278000000000 +neptune_apex,device_id=base_Var1,hardware=1.0,host=ubuntu,name=VarSpd1_I1,output_id=0,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF1" 1545978278000000000 +neptune_apex,device_id=base_Var2,hardware=1.0,host=ubuntu,name=VarSpd2_I2,output_id=1,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF2" 1545978278000000000 +neptune_apex,device_id=base_Var3,hardware=1.0,host=ubuntu,name=VarSpd3_I3,output_id=2,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF3" 1545978278000000000 +neptune_apex,device_id=base_Var4,hardware=1.0,host=ubuntu,name=VarSpd4_I4,output_id=3,output_type=variable,software=5.04_7A18,source=apex,type=output state="PF4" 1545978278000000000 +neptune_apex,device_id=base_Alarm,hardware=1.0,host=ubuntu,name=SndAlm_I6,output_id=4,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=base_Warn,hardware=1.0,host=ubuntu,name=SndWrn_I7,output_id=5,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=base_email,hardware=1.0,host=ubuntu,name=EmailAlm_I5,output_id=6,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=base_email2,hardware=1.0,host=ubuntu,name=Email2Alm_I9,output_id=7,output_type=alert,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=2_1,hardware=1.0,host=ubuntu,name=RETURN_2_1,output_id=8,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.3,state="AON",watt=34 1545978278000000000 +neptune_apex,device_id=2_2,hardware=1.0,host=ubuntu,name=Heater1_2_2,output_id=9,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 +neptune_apex,device_id=2_3,hardware=1.0,host=ubuntu,name=FREE_2_3,output_id=10,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 +neptune_apex,device_id=2_4,hardware=1.0,host=ubuntu,name=LIGHT_2_4,output_id=11,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 +neptune_apex,device_id=2_5,hardware=1.0,host=ubuntu,name=LHead_2_5,output_id=12,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=4 1545978278000000000 +neptune_apex,device_id=2_6,hardware=1.0,host=ubuntu,name=SKIMMER_2_6,output_id=13,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.1,state="AON",watt=12 1545978278000000000 +neptune_apex,device_id=2_7,hardware=1.0,host=ubuntu,name=FREE_2_7,output_id=14,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="OFF",watt=1 1545978278000000000 +neptune_apex,device_id=2_8,hardware=1.0,host=ubuntu,name=CABLIGHT_2_8,output_id=15,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 +neptune_apex,device_id=2_9,hardware=1.0,host=ubuntu,name=LinkA_2_9,output_id=16,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=2_10,hardware=1.0,host=ubuntu,name=LinkB_2_10,output_id=17,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=3_1,hardware=1.0,host=ubuntu,name=RVortech_3_1,output_id=18,output_type=unknown,software=5.04_7A18,source=apex,type=output state="TBL",xstatus="OK" 1545978278000000000 +neptune_apex,device_id=3_2,hardware=1.0,host=ubuntu,name=LVortech_3_2,output_id=19,output_type=unknown,software=5.04_7A18,source=apex,type=output state="TBL",xstatus="OK" 1545978278000000000 +neptune_apex,device_id=4_1,hardware=1.0,host=ubuntu,name=OSMOLATO_4_1,output_id=20,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 +neptune_apex,device_id=4_2,hardware=1.0,host=ubuntu,name=HEATER2_4_2,output_id=21,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AOF",watt=0 1545978278000000000 +neptune_apex,device_id=4_3,hardware=1.0,host=ubuntu,name=NUC_4_3,output_id=22,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0.1,state="AON",watt=8 1545978278000000000 +neptune_apex,device_id=4_4,hardware=1.0,host=ubuntu,name=CABFAN_4_4,output_id=23,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 +neptune_apex,device_id=4_5,hardware=1.0,host=ubuntu,name=RHEAD_4_5,output_id=24,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=3 1545978278000000000 +neptune_apex,device_id=4_6,hardware=1.0,host=ubuntu,name=FIRE_4_6,output_id=25,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=3 1545978278000000000 +neptune_apex,device_id=4_7,hardware=1.0,host=ubuntu,name=LightGW_4_7,output_id=26,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=1 1545978278000000000 +neptune_apex,device_id=4_8,hardware=1.0,host=ubuntu,name=GBSWITCH_4_8,output_id=27,output_type=outlet,software=5.04_7A18,source=apex,type=output amp=0,state="AON",watt=0 1545978278000000000 +neptune_apex,device_id=4_9,hardware=1.0,host=ubuntu,name=LinkA_4_9,output_id=28,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=4_10,hardware=1.0,host=ubuntu,name=LinkB_4_10,output_id=29,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=5_1,hardware=1.0,host=ubuntu,name=LinkA_5_1,output_id=30,output_type=unknown,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=Cntl_A1,hardware=1.0,host=ubuntu,name=ATO_EMPTY,output_id=31,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=Cntl_A2,hardware=1.0,host=ubuntu,name=LEAK,output_id=32,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,device_id=Cntl_A3,hardware=1.0,host=ubuntu,name=SKMR_NOPWR,output_id=33,output_type=virtual,software=5.04_7A18,source=apex,type=output state="AOF" 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=Tmp,probe_type=Temp,software=5.04_7A18,source=apex,type=probe value=78.1 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=pH,probe_type=pH,software=5.04_7A18,source=apex,type=probe value=7.93 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=ORP,probe_type=ORP,software=5.04_7A18,source=apex,type=probe value=191 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=Salt,probe_type=Cond,software=5.04_7A18,source=apex,type=probe value=29.4 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=Volt_2,software=5.04_7A18,source=apex,type=probe value=117 1545978278000000000 +neptune_apex,hardware=1.0,host=ubuntu,name=Volt_4,software=5.04_7A18,source=apex,type=probe value=118 1545978278000000000 + +``` + +### Contributing + +This plugin is used for mission-critical aquatic life support. A bug could very well result in the death of animals. +Neptune does not publish a schema file and as such, we have made this plugin very strict on input with no provisions for +automatically adding fields. We are also careful to not add default values when none are presented to prevent automation +errors. + +When writing unit tests, use actual Apex output to run tests. It's acceptable to abridge the number of repeated fields +but never inner fields or parameters. diff --git a/plugins/inputs/neptune_apex/neptune_apex.go b/plugins/inputs/neptune_apex/neptune_apex.go new file mode 100644 index 000000000..8161ac7b4 --- /dev/null +++ b/plugins/inputs/neptune_apex/neptune_apex.go @@ -0,0 +1,294 @@ +// Package neptuneapex implements an input plugin for the Neptune Apex +// aquarium controller. +package neptuneapex + +import ( + "encoding/xml" + "fmt" + "io/ioutil" + "math" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Measurement is constant across all metrics. +const Measurement = "neptune_apex" + +type xmlReply struct { + SoftwareVersion string `xml:"software,attr"` + HardwareVersion string `xml:"hardware,attr"` + Hostname string `xml:"hostname"` + Serial string `xml:"serial"` + Timezone float64 `xml:"timezone"` + Date string `xml:"date"` + PowerFailed string `xml:"power>failed"` + PowerRestored string `xml:"power>restored"` + Probe []probe `xml:"probes>probe"` + Outlet []outlet `xml:"outlets>outlet"` +} + +type probe struct { + Name string `xml:"name"` + Value string `xml:"value"` + Type *string `xml:"type"` +} + +type outlet struct { + Name string `xml:"name"` + OutputID string `xml:"outputID"` + State string `xml:"state"` + DeviceID string `xml:"deviceID"` + Xstatus *string `xml:"xstatus"` +} + +// NeptuneApex implements telegraf.Input. +type NeptuneApex struct { + Servers []string + ResponseTimeout internal.Duration + httpClient *http.Client +} + +// Description implements telegraf.Input.Description +func (*NeptuneApex) Description() string { + return "Neptune Apex data collector" +} + +// SampleConfig implements telegraf.Input.SampleConfig +func (*NeptuneApex) SampleConfig() string { + return ` + ## The Neptune Apex plugin reads the publicly available status.xml data from a local Apex. + ## Measurements will be logged under "apex". + + ## The base URL of the local Apex(es). If you specify more than one server, they will + ## be differentiated by the "source" tag. + servers = [ + "http://apex.local", + ] + + ## The response_timeout specifies how long to wait for a reply from the Apex. + #response_timeout = "5s" +` +} + +// Gather implements telegraf.Input.Gather +func (n *NeptuneApex) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + for _, server := range n.Servers { + wg.Add(1) + go func(server string) { + defer wg.Done() + acc.AddError(n.gatherServer(acc, server)) + }(server) + } + wg.Wait() + return nil +} + +func (n *NeptuneApex) gatherServer( + acc telegraf.Accumulator, server string) error { + resp, err := n.sendRequest(server) + if err != nil { + return err + } + return n.parseXML(acc, resp) +} + +// parseXML is strict on the input and does not do best-effort parsing. +//This is because of the life-support nature of the Neptune Apex. +func (n *NeptuneApex) parseXML(acc telegraf.Accumulator, data []byte) error { + r := xmlReply{} + err := xml.Unmarshal(data, &r) + if err != nil { + return fmt.Errorf("unable to unmarshal XML: %v\nXML DATA: %q", + err, data) + } + + mainFields := map[string]interface{}{ + "serial": r.Serial, + } + var reportTime time.Time + + if reportTime, err = parseTime(r.Date, r.Timezone); err != nil { + return err + } + if val, err := parseTime(r.PowerFailed, r.Timezone); err == nil { + mainFields["power_failed"] = val.UnixNano() + } + if val, err := parseTime(r.PowerRestored, r.Timezone); err == nil { + mainFields["power_restored"] = val.UnixNano() + } + + acc.AddFields(Measurement, mainFields, + map[string]string{ + "source": r.Hostname, + "type": "controller", + "software": r.SoftwareVersion, + "hardware": r.HardwareVersion, + }, + reportTime) + + // Outlets. + for _, o := range r.Outlet { + tags := map[string]string{ + "source": r.Hostname, + "output_id": o.OutputID, + "device_id": o.DeviceID, + "name": o.Name, + "type": "output", + "software": r.SoftwareVersion, + "hardware": r.HardwareVersion, + } + fields := map[string]interface{}{ + "state": o.State, + } + // Find Amp and Watt probes and add them as fields. + // Remove the redundant probe. + if pos := findProbe(fmt.Sprintf("%sW", o.Name), r.Probe); pos > -1 { + value, err := strconv.ParseFloat( + strings.TrimSpace(r.Probe[pos].Value), 64) + if err != nil { + acc.AddError( + fmt.Errorf( + "cannot convert string value %q to float64: %v", + r.Probe[pos].Value, err)) + continue // Skip the whole outlet. + } + fields["watt"] = value + r.Probe[pos] = r.Probe[len(r.Probe)-1] + r.Probe = r.Probe[:len(r.Probe)-1] + } + if pos := findProbe(fmt.Sprintf("%sA", o.Name), r.Probe); pos > -1 { + value, err := strconv.ParseFloat( + strings.TrimSpace(r.Probe[pos].Value), 64) + if err != nil { + acc.AddError( + fmt.Errorf( + "cannot convert string value %q to float64: %v", + r.Probe[pos].Value, err)) + break // // Skip the whole outlet. + } + fields["amp"] = value + r.Probe[pos] = r.Probe[len(r.Probe)-1] + r.Probe = r.Probe[:len(r.Probe)-1] + } + if o.Xstatus != nil { + fields["xstatus"] = *o.Xstatus + } + // Try to determine outlet type. Focus on accuracy, leaving the + //outlet_type "unknown" when ambiguous. 24v and vortech cannot be + // determined. + switch { + case strings.HasPrefix(o.DeviceID, "base_Var"): + tags["output_type"] = "variable" + case o.DeviceID == "base_Alarm": + fallthrough + case o.DeviceID == "base_Warn": + fallthrough + case strings.HasPrefix(o.DeviceID, "base_email"): + tags["output_type"] = "alert" + case fields["watt"] != nil || fields["amp"] != nil: + tags["output_type"] = "outlet" + case strings.HasPrefix(o.DeviceID, "Cntl_"): + tags["output_type"] = "virtual" + default: + tags["output_type"] = "unknown" + } + + acc.AddFields(Measurement, fields, tags, reportTime) + } + + // Probes. + for _, p := range r.Probe { + value, err := strconv.ParseFloat(strings.TrimSpace(p.Value), 64) + if err != nil { + acc.AddError(fmt.Errorf( + "cannot convert string value %q to float64: %v", + p.Value, err)) + continue + } + fields := map[string]interface{}{ + "value": value, + } + tags := map[string]string{ + "source": r.Hostname, + "type": "probe", + "name": p.Name, + "software": r.SoftwareVersion, + "hardware": r.HardwareVersion, + } + if p.Type != nil { + tags["probe_type"] = *p.Type + } + acc.AddFields(Measurement, fields, tags, reportTime) + } + + return nil +} + +func findProbe(probe string, probes []probe) int { + for i, p := range probes { + if p.Name == probe { + return i + } + } + return -1 +} + +// parseTime takes a Neptune Apex date/time string with a timezone and +// returns a time.Time struct. +func parseTime(val string, tz float64) (time.Time, error) { + // Magic time constant from https://golang.org/pkg/time/#Parse + const TimeLayout = "01/02/2006 15:04:05 -0700" + + // Timezone offset needs to be explicit + sign := '+' + if tz < 0 { + sign = '-' + } + + // Build a time string with the timezone in a format Go can parse. + tzs := fmt.Sprintf("%c%04d", sign, int(math.Abs(tz))*100) + ts := fmt.Sprintf("%s %s", val, tzs) + t, err := time.Parse(TimeLayout, ts) + if err != nil { + return time.Now(), fmt.Errorf("unable to parse %q (%v)", ts, err) + } + return t, nil +} + +func (n *NeptuneApex) sendRequest(server string) ([]byte, error) { + url := fmt.Sprintf("%s/cgi-bin/status.xml", server) + resp, err := n.httpClient.Get(url) + if err != nil { + return nil, fmt.Errorf("http GET failed: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf( + "response from server URL %q returned %d (%s), expected %d (%s)", + url, resp.StatusCode, http.StatusText(resp.StatusCode), + http.StatusOK, http.StatusText(http.StatusOK)) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read output from %q: %v", url, err) + } + return body, nil +} + +func init() { + inputs.Add("neptune_apex", func() telegraf.Input { + return &NeptuneApex{ + httpClient: &http.Client{ + Timeout: 5 * time.Second, + }, + } + }) +} diff --git a/plugins/inputs/neptune_apex/neptune_apex_test.go b/plugins/inputs/neptune_apex/neptune_apex_test.go new file mode 100644 index 000000000..cefa5fad1 --- /dev/null +++ b/plugins/inputs/neptune_apex/neptune_apex_test.go @@ -0,0 +1,621 @@ +package neptuneapex + +import ( + "bytes" + "context" + "net" + "net/http" + "net/http/httptest" + "reflect" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" +) + +func TestGather(t *testing.T) { + h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + w.Write([]byte("data")) + }) + c, destroy := fakeHTTPClient(h) + defer destroy() + n := &NeptuneApex{ + httpClient: c, + } + tests := []struct { + name string + servers []string + }{ + { + name: "Good case, 2 servers", + servers: []string{"http://abc", "https://def"}, + }, + { + name: "Good case, 0 servers", + servers: []string{}, + }, + { + name: "Good case nil", + servers: nil, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + var acc testutil.Accumulator + n.Servers = test.servers + n.Gather(&acc) + if len(acc.Errors) != len(test.servers) { + t.Errorf("Number of servers mismatch. got=%d, want=%d", + len(acc.Errors), len(test.servers)) + } + + }) + } +} + +func TestParseXML(t *testing.T) { + n := &NeptuneApex{} + goodTime := time.Date(2018, 12, 22, 21, 55, 37, 0, + time.FixedZone("PST", 3600*-8)) + tests := []struct { + name string + xmlResponse []byte + wantMetrics []*testutil.Metric + wantAccErr bool + wantErr bool + }{ + { + name: "Good test", + xmlResponse: []byte(APEX2016), + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "type": "controller", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{ + "serial": "AC5:12345", + "power_failed": int64(1544814000000000000), + "power_restored": int64(1544833875000000000), + }, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "0", + "device_id": "base_Var1", + "name": "VarSpd1_I1", + "output_type": "variable", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"state": "PF1"}, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "6", + "device_id": "base_email", + "name": "EmailAlm_I5", + "output_type": "alert", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"state": "AOF"}, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "8", + "device_id": "2_1", + "name": "RETURN_2_1", + "output_type": "outlet", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{ + "state": "AON", + "watt": 35.0, + "amp": 0.3, + }, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "18", + "device_id": "3_1", + "name": "RVortech_3_1", + "output_type": "unknown", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{ + "state": "TBL", + "xstatus": "OK", + }, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "28", + "device_id": "4_9", + "name": "LinkA_4_9", + "output_type": "unknown", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"state": "AOF"}, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "output_id": "32", + "device_id": "Cntl_A2", + "name": "LEAK", + "output_type": "virtual", + "type": "output", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"state": "AOF"}, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "name": "Salt", + "type": "probe", + "probe_type": "Cond", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"value": 30.1}, + }, + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "apex", + "name": "Volt_2", + "type": "probe", + "software": "5.04_7A18", + "hardware": "1.0", + }, + Fields: map[string]interface{}{"value": 115.0}, + }, + }, + }, + { + name: "Unmarshal error", + xmlResponse: []byte("Invalid"), + wantErr: true, + }, + { + name: "Report time failure", + xmlResponse: []byte(`abc`), + wantErr: true, + }, + { + name: "Power Failed time failure", + xmlResponse: []byte( + `12/22/2018 21:55:37 + -8.0a + 12/22/2018 22:55:37`), + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "", + "type": "controller", + "hardware": "", + "software": "", + }, + Fields: map[string]interface{}{ + "serial": "", + "power_restored": int64(1545548137000000000), + }, + }, + }, + }, + { + name: "Power restored time failure", + xmlResponse: []byte( + `12/22/2018 21:55:37 + -8.0a + 12/22/2018 22:55:37`), + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "", + "type": "controller", + "hardware": "", + "software": "", + }, + Fields: map[string]interface{}{ + "serial": "", + "power_failed": int64(1545548137000000000), + }, + }, + }, + }, + { + name: "Power failed failure", + xmlResponse: []byte( + `abc`), + wantErr: true, + }, + { + name: "Failed to parse watt to float", + xmlResponse: []byte( + ` + 12/22/2018 21:55:37-8.0 + 12/22/2018 21:55:37 + 12/22/2018 21:55:37 + o1 + o1Wabc + `), + wantAccErr: true, + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "", + "type": "controller", + "hardware": "", + "software": "", + }, + Fields: map[string]interface{}{ + "serial": "", + "power_failed": int64(1545544537000000000), + "power_restored": int64(1545544537000000000), + }, + }, + }, + }, + { + name: "Failed to parse amp to float", + xmlResponse: []byte( + ` + 12/22/2018 21:55:37-8.0 + 12/22/2018 21:55:37 + 12/22/2018 21:55:37 + o1 + o1Aabc + `), + wantAccErr: true, + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "", + "type": "controller", + "hardware": "", + "software": "", + }, + Fields: map[string]interface{}{ + "serial": "", + "power_failed": int64(1545544537000000000), + "power_restored": int64(1545544537000000000), + }, + }, + }, + }, + { + name: "Failed to parse probe value to float", + xmlResponse: []byte( + ` + 12/22/2018 21:55:37-8.0 + 12/22/2018 21:55:37 + 12/22/2018 21:55:37 + p1abc + `), + wantAccErr: true, + wantMetrics: []*testutil.Metric{ + { + Measurement: Measurement, + Time: goodTime, + Tags: map[string]string{ + "source": "", + "type": "controller", + "hardware": "", + "software": "", + }, + Fields: map[string]interface{}{ + "serial": "", + "power_failed": int64(1545544537000000000), + "power_restored": int64(1545544537000000000), + }, + }, + }, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + var acc testutil.Accumulator + err := n.parseXML(&acc, []byte(test.xmlResponse)) + if (err != nil) != test.wantErr { + t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) + } + if test.wantErr { + return + } + if len(acc.Errors) > 0 != test.wantAccErr { + t.Errorf("Accumulator errors. got=%v, want=none", acc.Errors) + } + if len(acc.Metrics) != len(test.wantMetrics) { + t.Fatalf("Invalid number of metrics received. got=%d, want=%d", len(acc.Metrics), len(test.wantMetrics)) + } + for i, m := range acc.Metrics { + if m.Measurement != test.wantMetrics[i].Measurement { + t.Errorf("Metric measurement mismatch at position %d:\ngot=\n%s\nWant=\n%s", i, m.Measurement, test.wantMetrics[i].Measurement) + } + if !reflect.DeepEqual(m.Tags, test.wantMetrics[i].Tags) { + t.Errorf("Metric tags mismatch at position %d:\ngot=\n%v\nwant=\n%v", i, m.Tags, test.wantMetrics[i].Tags) + } + if !reflect.DeepEqual(m.Fields, test.wantMetrics[i].Fields) { + t.Errorf("Metric fields mismatch at position %d:\ngot=\n%#v\nwant=:\n%#v", i, m.Fields, test.wantMetrics[i].Fields) + } + if !m.Time.Equal(test.wantMetrics[i].Time) { + t.Errorf("Metric time mismatch at position %d:\ngot=\n%s\nwant=\n%s", i, m.Time, test.wantMetrics[i].Time) + } + } + }) + } +} + +func TestSendRequest(t *testing.T) { + tests := []struct { + name string + statusCode int + wantErr bool + }{ + { + name: "Good case", + statusCode: http.StatusOK, + }, + { + name: "Get error", + statusCode: http.StatusNotFound, + wantErr: true, + }, + { + name: "Status 301", + statusCode: http.StatusMovedPermanently, + wantErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + h := http.HandlerFunc(func( + w http.ResponseWriter, r *http.Request) { + w.WriteHeader(test.statusCode) + w.Write([]byte("data")) + }) + c, destroy := fakeHTTPClient(h) + defer destroy() + n := &NeptuneApex{ + httpClient: c, + } + resp, err := n.sendRequest("http://abc") + if (err != nil) != test.wantErr { + t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) + } + if test.wantErr { + return + } + if bytes.Compare(resp, []byte("data")) != 0 { + t.Errorf( + "Response data mismatch. got=%q, want=%q", resp, "data") + } + }) + } +} + +func TestParseTime(t *testing.T) { + tests := []struct { + name string + input string + timeZone float64 + wantTime time.Time + wantErr bool + }{ + { + name: "Good case - Timezone positive", + input: "01/01/2023 12:34:56", + timeZone: 5, + wantTime: time.Date(2023, 1, 1, 12, 34, 56, 0, + time.FixedZone("a", 3600*5)), + }, + { + name: "Good case - Timezone negative", + input: "01/01/2023 12:34:56", + timeZone: -8, + wantTime: time.Date(2023, 1, 1, 12, 34, 56, 0, + time.FixedZone("a", 3600*-8)), + }, + { + name: "Cannot parse", + input: "Not a date", + wantErr: true, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + res, err := parseTime(test.input, test.timeZone) + if (err != nil) != test.wantErr { + t.Errorf("err mismatch. got=%v, want=%t", err, test.wantErr) + } + if test.wantErr { + return + } + if !test.wantTime.Equal(res) { + t.Errorf("err mismatch. got=%s, want=%s", res, test.wantTime) + } + }) + } +} + +func TestFindProbe(t *testing.T) { + fakeProbes := []probe{ + { + Name: "test1", + }, + { + Name: "good", + }, + } + tests := []struct { + name string + probeName string + wantIndex int + }{ + { + name: "Good case - Found", + probeName: "good", + wantIndex: 1, + }, + { + name: "Not found", + probeName: "bad", + wantIndex: -1, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + index := findProbe(test.probeName, fakeProbes) + if index != test.wantIndex { + t.Errorf("probe index mismatch; got=%d, want %d", index, test.wantIndex) + } + }) + } +} + +func TestDescription(t *testing.T) { + n := &NeptuneApex{} + if n.Description() == "" { + t.Errorf("Empty description") + } +} + +func TestSampleConfig(t *testing.T) { + n := &NeptuneApex{} + if n.SampleConfig() == "" { + t.Errorf("Empty sample config") + } +} + +// This fakeHttpClient creates a server and binds a client to it. +// That way, it is possible to control the http +// output from within the test without changes to the main code. +func fakeHTTPClient(h http.Handler) (*http.Client, func()) { + s := httptest.NewServer(h) + c := &http.Client{ + Transport: &http.Transport{ + DialContext: func( + _ context.Context, network, _ string) (net.Conn, error) { + return net.Dial(network, s.Listener.Addr().String()) + }, + }, + } + return c, s.Close +} + +// Sample configuration from a 2016 version Neptune Apex. +const APEX2016 = ` + +apex +AC5:12345 +-8.00 +12/22/2018 21:55:37 +12/14/2018 11:00:00 +12/14/2018 16:31:15 + + + Salt 30.1 + Cond + RETURN_2_1A 0.3 + + RETURN_2_1W 35 + + Volt_2 115 + + + + VarSpd1_I1 + 0 + PF1 + base_Var1 + + + EmailAlm_I5 + 6 + AOF + base_email + + + RETURN_2_1 + 8 + AON + 2_1 + + + RVortech_3_1 + 18 + TBL + 3_1 +OK + + LinkA_4_9 + 28 + AOF + 4_9 + + + LEAK + 32 + AOF + Cntl_A2 + + +` diff --git a/plugins/inputs/system/NETSTAT_README.md b/plugins/inputs/net/NETSTAT_README.md similarity index 67% rename from plugins/inputs/system/NETSTAT_README.md rename to plugins/inputs/net/NETSTAT_README.md index 636a7e3af..d0f39f5e4 100644 --- a/plugins/inputs/system/NETSTAT_README.md +++ b/plugins/inputs/net/NETSTAT_README.md @@ -1,10 +1,18 @@ -Telegraf plugin: NETSTAT +# Netstat Input Plugin -#### Description +This plugin collects TCP connections state and UDP socket counts by using `lsof`. -The NETSTAT plugin collects TCP connections state and UDP socket counts by using `lsof`. +### Configuration: -Supported TCP Connection states are follows. +``` toml +# Collect TCP connections state and UDP socket counts +[[inputs.netstat]] + # no configuration +``` + +# Measurements: + +Supported TCP Connection states are follows. - established - syn_sent @@ -19,8 +27,6 @@ Supported TCP Connection states are follows. - closing - none - -# Measurements: ### TCP Connection State measurements: Meta: @@ -49,4 +55,3 @@ Meta: Measurement names: - udp_socket - diff --git a/plugins/inputs/system/NET_README.md b/plugins/inputs/net/NET_README.md similarity index 97% rename from plugins/inputs/system/NET_README.md rename to plugins/inputs/net/NET_README.md index f265e2448..d9e747119 100644 --- a/plugins/inputs/system/NET_README.md +++ b/plugins/inputs/net/NET_README.md @@ -51,7 +51,7 @@ Under Linux the system wide protocol metrics have the interface=all tag. ### Sample Queries: -You can use the following query to get the upload/download traffic rate per second for all interfaces in the last hour. The query uses the (derivative function)[https://docs.influxdata.com/influxdb/v1.2/query_language/functions#derivative] which calculates the rate of change between subsequent field values. +You can use the following query to get the upload/download traffic rate per second for all interfaces in the last hour. The query uses the [derivative function](https://docs.influxdata.com/influxdb/v1.2/query_language/functions#derivative) which calculates the rate of change between subsequent field values. ``` SELECT derivative(first(bytes_recv), 1s) as "download bytes/sec", derivative(first(bytes_sent), 1s) as "upload bytes/sec" FROM net WHERE time > now() - 1h AND interface != 'all' GROUP BY time(10s), interface fill(0); diff --git a/plugins/inputs/system/net.go b/plugins/inputs/net/net.go similarity index 85% rename from plugins/inputs/system/net.go rename to plugins/inputs/net/net.go index a7ba5c63d..f91501860 100644 --- a/plugins/inputs/system/net.go +++ b/plugins/inputs/net/net.go @@ -1,4 +1,4 @@ -package system +package net import ( "fmt" @@ -8,11 +8,12 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) type NetIOStats struct { filter filter.Filter - ps PS + ps system.PS skipChecks bool IgnoreProtocolStats bool @@ -53,6 +54,15 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { } } + interfaces, err := net.Interfaces() + if err != nil { + return fmt.Errorf("error getting list of interfaces: %s", err) + } + interfacesByName := map[string]net.Interface{} + for _, iface := range interfaces { + interfacesByName[iface.Name] = iface + } + for _, io := range netio { if len(s.Interfaces) != 0 { var found bool @@ -65,8 +75,8 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { continue } } else if !s.skipChecks { - iface, err := net.InterfaceByName(io.Name) - if err != nil { + iface, ok := interfacesByName[io.Name] + if !ok { continue } @@ -119,6 +129,6 @@ func (s *NetIOStats) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("net", func() telegraf.Input { - return &NetIOStats{ps: newSystemPS()} + return &NetIOStats{ps: system.NewSystemPS()} }) } diff --git a/plugins/inputs/system/net_test.go b/plugins/inputs/net/net_test.go similarity index 94% rename from plugins/inputs/system/net_test.go rename to plugins/inputs/net/net_test.go index 83b9bd460..3c4c3c7ef 100644 --- a/plugins/inputs/system/net_test.go +++ b/plugins/inputs/net/net_test.go @@ -1,16 +1,17 @@ -package system +package net import ( "syscall" "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/net" "github.com/stretchr/testify/require" ) func TestNetStats(t *testing.T) { - var mps MockPS + var mps system.MockPS var err error defer mps.AssertExpectations(t) var acc testutil.Accumulator @@ -30,7 +31,7 @@ func TestNetStats(t *testing.T) { mps.On("NetIO").Return([]net.IOCountersStat{netio}, nil) netprotos := []net.ProtoCountersStat{ - net.ProtoCountersStat{ + { Protocol: "Udp", Stats: map[string]int64{ "InDatagrams": 4655, @@ -41,16 +42,16 @@ func TestNetStats(t *testing.T) { mps.On("NetProto").Return(netprotos, nil) netstats := []net.ConnectionStat{ - net.ConnectionStat{ + { Type: syscall.SOCK_DGRAM, }, - net.ConnectionStat{ + { Status: "ESTABLISHED", }, - net.ConnectionStat{ + { Status: "ESTABLISHED", }, - net.ConnectionStat{ + { Status: "CLOSE", }, } diff --git a/plugins/inputs/system/netstat.go b/plugins/inputs/net/netstat.go similarity index 92% rename from plugins/inputs/system/netstat.go rename to plugins/inputs/net/netstat.go index 1699e0808..555b396af 100644 --- a/plugins/inputs/system/netstat.go +++ b/plugins/inputs/net/netstat.go @@ -1,4 +1,4 @@ -package system +package net import ( "fmt" @@ -6,10 +6,11 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) type NetStats struct { - ps PS + ps system.PS } func (_ *NetStats) Description() string { @@ -66,6 +67,6 @@ func (s *NetStats) Gather(acc telegraf.Accumulator) error { func init() { inputs.Add("netstat", func() telegraf.Input { - return &NetStats{ps: newSystemPS()} + return &NetStats{ps: system.NewSystemPS()} }) } diff --git a/plugins/inputs/net_response/README.md b/plugins/inputs/net_response/README.md index 1982ced0c..2c492408b 100644 --- a/plugins/inputs/net_response/README.md +++ b/plugins/inputs/net_response/README.md @@ -30,7 +30,7 @@ verify text in the response. # expect = "ssh" ## Uncomment to remove deprecated fields; recommended for new deploys - # fieldexclude = ["result_type", "string_found"] + # fielddrop = ["result_type", "string_found"] ``` ### Metrics: @@ -43,7 +43,6 @@ verify text in the response. - result - fields: - response_time (float, seconds) - - success (int) # success 0, failure 1 - result_code (int, success = 0, timeout = 1, connection_failed = 2, read_failed = 3, string_mismatch = 4) - result_type (string) **DEPRECATED in 1.7; use result tag** - string_found (boolean) **DEPRECATED in 1.4; use result tag** diff --git a/plugins/inputs/net_response/net_response.go b/plugins/inputs/net_response/net_response.go index 66511a319..3f75a6058 100644 --- a/plugins/inputs/net_response/net_response.go +++ b/plugins/inputs/net_response/net_response.go @@ -63,7 +63,7 @@ var sampleConfig = ` # expect = "ssh" ## Uncomment to remove deprecated fields - # fieldexclude = ["result_type", "string_found"] + # fielddrop = ["result_type", "string_found"] ` // SampleConfig will return a complete configuration example with details about each field. @@ -141,9 +141,8 @@ func (n *NetResponse) UDPGather() (tags map[string]string, fields map[string]int start := time.Now() // Resolving udpAddr, err := net.ResolveUDPAddr("udp", n.Address) - LocalAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") // Connecting - conn, err := net.DialUDP("udp", LocalAddr, udpAddr) + conn, err := net.DialUDP("udp", nil, udpAddr) // Handle error if err != nil { setResult(ConnectionFailed, fields, tags, n.Expect) @@ -224,9 +223,6 @@ func (n *NetResponse) Gather(acc telegraf.Accumulator) error { } else { return errors.New("Bad protocol") } - for key, value := range returnTags { - tags[key] = value - } // Merge the tags for k, v := range returnTags { tags[k] = v diff --git a/plugins/inputs/nginx_plus/nginx_plus.go b/plugins/inputs/nginx_plus/nginx_plus.go index 089ba7d93..ea3aeb28b 100644 --- a/plugins/inputs/nginx_plus/nginx_plus.go +++ b/plugins/inputs/nginx_plus/nginx_plus.go @@ -14,15 +14,16 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) type NginxPlus struct { - Urls []string + Urls []string `toml:"urls"` + ResponseTimeout internal.Duration `toml:"response_timeout"` + tls.ClientConfig client *http.Client - - ResponseTimeout internal.Duration } var sampleConfig = ` @@ -31,6 +32,13 @@ var sampleConfig = ` # HTTP response timeout (default: 5s) response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` func (n *NginxPlus) SampleConfig() string { @@ -74,14 +82,20 @@ func (n *NginxPlus) Gather(acc telegraf.Accumulator) error { } func (n *NginxPlus) createHttpClient() (*http.Client, error) { - if n.ResponseTimeout.Duration < time.Second { n.ResponseTimeout.Duration = time.Second * 5 } + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + client := &http.Client{ - Transport: &http.Transport{}, - Timeout: n.ResponseTimeout.Duration, + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: n.ResponseTimeout.Duration, } return client, nil diff --git a/plugins/inputs/nginx_plus_api/README.md b/plugins/inputs/nginx_plus_api/README.md new file mode 100644 index 000000000..4ec63b2e8 --- /dev/null +++ b/plugins/inputs/nginx_plus_api/README.md @@ -0,0 +1,255 @@ +# Telegraf Plugin: nginx_plus_api + +Nginx Plus is a commercial version of the open source web server Nginx. The use this plugin you will need a license. For more information about the differences between Nginx (F/OSS) and Nginx Plus, [click here](https://www.nginx.com/blog/whats-difference-nginx-foss-nginx-plus/). + +### Configuration: + +``` +# Read Nginx Plus API advanced status information +[[inputs.nginx_plus_api]] + ## An array of Nginx API URIs to gather stats. + urls = ["http://localhost/api"] + # Nginx API version, default: 3 + # api_version = 3 +``` + +### Migration from Nginx Plus (Status) input plugin + +| Nginx Plus | Nginx Plus API | +|---------------------------------|--------------------------------------| +| nginx_plus_processes | nginx_plus_api_processes | +| nginx_plus_connections | nginx_plus_api_connections | +| nginx_plus_ssl | nginx_plus_api_ssl | +| nginx_plus_requests | nginx_plus_api_http_requests | +| nginx_plus_zone | nginx_plus_api_http_server_zones | +| nginx_plus_upstream | nginx_plus_api_http_upstreams | +| nginx_plus_upstream_peer | nginx_plus_api_http_upstream_peers | +| nginx_plus_cache | nginx_plus_api_http_caches | +| nginx_plus_stream_upstream | nginx_plus_api_stream_upstreams | +| nginx_plus_stream_upstream_peer | nginx_plus_api_stream_upstream_peers | +| nginx.stream.zone | nginx_plus_api_stream_server_zones | + +### Measurements by API version + +| Measurement | API version (api_version) | +|--------------------------------------|---------------------------| +| nginx_plus_api_processes | >= 3 | +| nginx_plus_api_connections | >= 3 | +| nginx_plus_api_ssl | >= 3 | +| nginx_plus_api_http_requests | >= 3 | +| nginx_plus_api_http_server_zones | >= 3 | +| nginx_plus_api_http_upstreams | >= 3 | +| nginx_plus_api_http_upstream_peers | >= 3 | +| nginx_plus_api_http_caches | >= 3 | +| nginx_plus_api_stream_upstreams | >= 3 | +| nginx_plus_api_stream_upstream_peers | >= 3 | +| nginx_plus_api_stream_server_zones | >= 3 | +| nginx_plus_api_http_location_zones | >= 5 | +| nginx_plus_api_resolver_zones | >= 5 | + +### Measurements & Fields: + +- nginx_plus_api_processes + - respawned +- nginx_plus_api_connections + - accepted + - dropped + - active + - idle +- nginx_plus_api_ssl + - handshakes + - handshakes_failed + - session_reuses +- nginx_plus_api_http_requests + - total + - current +- nginx_plus_api_http_server_zones + - processing + - requests + - responses_1xx + - responses_2xx + - responses_3xx + - responses_4xx + - responses_5xx + - responses_total + - received + - sent + - discarded +- nginx_plus_api_http_upstreams + - keepalive + - zombies +- nginx_plus_api_http_upstream_peers + - requests + - unavail + - healthchecks_checks + - header_time + - state + - response_time + - active + - healthchecks_last_passed + - weight + - responses_1xx + - responses_2xx + - responses_3xx + - responses_4xx + - responses_5xx + - received + - healthchecks_fails + - healthchecks_unhealthy + - backup + - responses_total + - sent + - fails + - downtime +- nginx_plus_api_http_caches + - size + - max_size + - cold + - hit_responses + - hit_bytes + - stale_responses + - stale_bytes + - updating_responses + - updating_bytes + - revalidated_responses + - revalidated_bytes + - miss_responses + - miss_bytes + - miss_responses_written + - miss_bytes_written + - expired_responses + - expired_bytes + - expired_responses_written + - expired_bytes_written + - bypass_responses + - bypass_bytes + - bypass_responses_written + - bypass_bytes_written +- nginx_plus_api_stream_upstreams + - zombies +- nginx_plus_api_stream_upstream_peers + - unavail + - healthchecks_checks + - healthchecks_fails + - healthchecks_unhealthy + - healthchecks_last_passed + - response_time + - state + - active + - weight + - received + - backup + - sent + - fails + - downtime +- nginx_plus_api_stream_server_zones + - processing + - connections + - received + - sent +- nginx_plus_api_location_zones + - requests + - responses_1xx + - responses_2xx + - responses_3xx + - responses_4xx + - responses_5xx + - responses_total + - received + - sent + - discarded +- nginx_plus_api_resolver_zones + - name + - srv + - addr + - noerror + - formerr + - servfail + - nxdomain + - notimp + - refused + - timedout + - unknown + +### Tags: + +- nginx_plus_api_processes, nginx_plus_api_connections, nginx_plus_api_ssl, nginx_plus_api_http_requests + - source + - port + +- nginx_plus_api_http_upstreams, nginx_plus_api_stream_upstreams + - upstream + - source + - port + +- nginx_plus_api_http_server_zones, nginx_plus_api_upstream_server_zones, nginx_plus_api_http_location_zones, nginx_plus_api_resolver_zones + - source + - port + - zone + +- nginx_plus_api_upstream_peers, nginx_plus_api_stream_upstream_peers + - id + - upstream + - source + - port + - upstream_address + +- nginx_plus_api_http_caches + - source + - port + +### Example Output: + +Using this configuration: +``` +[[inputs.nginx_plus_api]] + ## An array of Nginx Plus API URIs to gather stats. + urls = ["http://localhost/api"] +``` + +When run with: +``` +./telegraf -config telegraf.conf -input-filter nginx_plus_api -test +``` + +It produces: +``` +> nginx_plus_api_processes,port=80,source=demo.nginx.com respawned=0i 1570696321000000000 +> nginx_plus_api_connections,port=80,source=demo.nginx.com accepted=68998606i,active=7i,dropped=0i,idle=57i 1570696322000000000 +> nginx_plus_api_ssl,port=80,source=demo.nginx.com handshakes=9398978i,handshakes_failed=289353i,session_reuses=1004389i 1570696322000000000 +> nginx_plus_api_http_requests,port=80,source=demo.nginx.com current=51i,total=264649353i 1570696322000000000 +> nginx_plus_api_http_server_zones,port=80,source=demo.nginx.com,zone=hg.nginx.org discarded=5i,processing=0i,received=24123604i,requests=60138i,responses_1xx=0i,responses_2xx=59353i,responses_3xx=531i,responses_4xx=249i,responses_5xx=0i,responses_total=60133i,sent=830165221i 1570696322000000000 +> nginx_plus_api_http_server_zones,port=80,source=demo.nginx.com,zone=trac.nginx.org discarded=250i,processing=0i,received=2184618i,requests=12404i,responses_1xx=0i,responses_2xx=8579i,responses_3xx=2513i,responses_4xx=583i,responses_5xx=479i,responses_total=12154i,sent=139384159i 1570696322000000000 +> nginx_plus_api_http_server_zones,port=80,source=demo.nginx.com,zone=lxr.nginx.org discarded=1i,processing=0i,received=1011701i,requests=4523i,responses_1xx=0i,responses_2xx=4332i,responses_3xx=28i,responses_4xx=39i,responses_5xx=123i,responses_total=4522i,sent=72631354i 1570696322000000000 +> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=trac-backend keepalive=0i,zombies=0i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=trac-backend,upstream_address=10.0.0.1:8080 active=0i,backup=false,downtime=0i,fails=0i,header_time=235i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=88581178i,requests=3180i,response_time=235i,responses_1xx=0i,responses_2xx=3168i,responses_3xx=5i,responses_4xx=6i,responses_5xx=0i,responses_total=3179i,sent=1321720i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=trac-backend,upstream_address=10.0.0.1:8081 active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=hg-backend keepalive=0i,zombies=0i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=hg-backend,upstream_address=10.0.0.1:8088 active=0i,backup=false,downtime=0i,fails=0i,header_time=22i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=909402572i,requests=18514i,response_time=88i,responses_1xx=0i,responses_2xx=17799i,responses_3xx=531i,responses_4xx=179i,responses_5xx=0i,responses_total=18509i,sent=10608107i,state="up",unavail=0i,weight=5i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=hg-backend,upstream_address=10.0.0.1:8089 active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=lxr-backend keepalive=0i,zombies=0i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=lxr-backend,upstream_address=unix:/tmp/cgi.sock active=0i,backup=false,downtime=0i,fails=123i,header_time=91i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=71782888i,requests=4354i,response_time=91i,responses_1xx=0i,responses_2xx=4230i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=4230i,sent=3088656i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=lxr-backend,upstream_address=unix:/tmp/cgib.sock active=0i,backup=true,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,max_conns=42i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_upstreams,port=80,source=demo.nginx.com,upstream=demo-backend keepalive=0i,zombies=0i 1570696322000000000 +> nginx_plus_api_http_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=demo-backend,upstream_address=10.0.0.2:15431 active=0i,backup=false,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,requests=0i,responses_1xx=0i,responses_2xx=0i,responses_3xx=0i,responses_4xx=0i,responses_5xx=0i,responses_total=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696322000000000 +> nginx_plus_api_http_caches,cache=http_cache,port=80,source=demo.nginx.com bypass_bytes=0i,bypass_bytes_written=0i,bypass_responses=0i,bypass_responses_written=0i,cold=false,expired_bytes=381518640i,expired_bytes_written=363449785i,expired_responses=42114i,expired_responses_written=39954i,hit_bytes=6321885979i,hit_responses=596730i,max_size=536870912i,miss_bytes=48512185i,miss_bytes_written=155600i,miss_responses=6052i,miss_responses_written=136i,revalidated_bytes=0i,revalidated_responses=0i,size=765952i,stale_bytes=0i,stale_responses=0i,updating_bytes=0i,updating_responses=0i 1570696323000000000 +> nginx_plus_api_stream_server_zones,port=80,source=demo.nginx.com,zone=postgresql_loadbalancer connections=0i,processing=0i,received=0i,sent=0i 1570696323000000000 +> nginx_plus_api_stream_server_zones,port=80,source=demo.nginx.com,zone=dns_loadbalancer connections=0i,processing=0i,received=0i,sent=0i 1570696323000000000 +> nginx_plus_api_stream_upstreams,port=80,source=demo.nginx.com,upstream=postgresql_backends zombies=0i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15432 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15433 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=2,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15434 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=3,port=80,source=demo.nginx.com,upstream=postgresql_backends,upstream_address=10.0.0.2:15435 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstreams,port=80,source=demo.nginx.com,upstream=dns_udp_backends zombies=0i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=0,port=80,source=demo.nginx.com,upstream=dns_udp_backends,upstream_address=10.0.0.5:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=2i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=1,port=80,source=demo.nginx.com,upstream=dns_udp_backends,upstream_address=10.0.0.2:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="up",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstream_peers,id=2,port=80,source=demo.nginx.com,upstream=dns_udp_backends,upstream_address=10.0.0.7:53 active=0i,backup=false,connections=0i,downtime=0i,fails=0i,healthchecks_checks=0i,healthchecks_fails=0i,healthchecks_unhealthy=0i,received=0i,sent=0i,state="down",unavail=0i,weight=1i 1570696323000000000 +> nginx_plus_api_stream_upstreams,port=80,source=demo.nginx.com,upstream=unused_tcp_backends zombies=0i 1570696323000000000 +> nginx_plus_api_http_location_zones,port=80,source=demo.nginx.com,zone=swagger discarded=0i,received=1622i,requests=8i,responses_1xx=0i,responses_2xx=7i,responses_3xx=0i,responses_4xx=1i,responses_5xx=0i,responses_total=8i,sent=638333i 1570696323000000000 +> nginx_plus_api_http_location_zones,port=80,source=demo.nginx.com,zone=api-calls discarded=64i,received=337530181i,requests=1726513i,responses_1xx=0i,responses_2xx=1726428i,responses_3xx=0i,responses_4xx=21i,responses_5xx=0i,responses_total=1726449i,sent=1902577668i 1570696323000000000 +> nginx_plus_api_resolver_zones,port=80,source=demo.nginx.com,zone=resolver1 addr=0i,formerr=0i,name=0i,noerror=0i,notimp=0i,nxdomain=0i,refused=0i,servfail=0i,srv=0i,timedout=0i,unknown=0i 1570696324000000000 +``` + +### Reference material + +[api documentation](http://demo.nginx.com/swagger-ui/#/) diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api.go b/plugins/inputs/nginx_plus_api/nginx_plus_api.go new file mode 100644 index 000000000..addb813e3 --- /dev/null +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api.go @@ -0,0 +1,132 @@ +package nginx_plus_api + +import ( + "fmt" + "net/http" + "net/url" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type NginxPlusApi struct { + Urls []string `toml:"urls"` + ApiVersion int64 `toml:"api_version"` + ResponseTimeout internal.Duration `toml:"response_timeout"` + tls.ClientConfig + + client *http.Client +} + +const ( + // Default settings + defaultApiVersion = 3 + + // Paths + processesPath = "processes" + connectionsPath = "connections" + sslPath = "ssl" + + httpRequestsPath = "http/requests" + httpServerZonesPath = "http/server_zones" + httpLocationZonesPath = "http/location_zones" + httpUpstreamsPath = "http/upstreams" + httpCachesPath = "http/caches" + + resolverZonesPath = "resolvers" + + streamServerZonesPath = "stream/server_zones" + streamUpstreamsPath = "stream/upstreams" +) + +var sampleConfig = ` + ## An array of API URI to gather stats. + urls = ["http://localhost/api"] + + # Nginx API version, default: 3 + # api_version = 3 + + # HTTP response timeout (default: 5s) + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +func (n *NginxPlusApi) SampleConfig() string { + return sampleConfig +} + +func (n *NginxPlusApi) Description() string { + return "Read Nginx Plus Api documentation" +} + +func (n *NginxPlusApi) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + // Create an HTTP client that is re-used for each + // collection interval + + if n.ApiVersion == 0 { + n.ApiVersion = defaultApiVersion + } + + if n.client == nil { + client, err := n.createHttpClient() + if err != nil { + return err + } + n.client = client + } + + for _, u := range n.Urls { + addr, err := url.Parse(u) + if err != nil { + acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + continue + } + + wg.Add(1) + go func(addr *url.URL) { + defer wg.Done() + n.gatherMetrics(addr, acc) + }(addr) + } + + wg.Wait() + return nil +} + +func (n *NginxPlusApi) createHttpClient() (*http.Client, error) { + if n.ResponseTimeout.Duration < time.Second { + n.ResponseTimeout.Duration = time.Second * 5 + } + + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: n.ResponseTimeout.Duration, + } + + return client, nil +} + +func init() { + inputs.Add("nginx_plus_api", func() telegraf.Input { + return &NginxPlusApi{} + }) +} diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go new file mode 100644 index 000000000..6aaaff2d3 --- /dev/null +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics.go @@ -0,0 +1,576 @@ +package nginx_plus_api + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/influxdata/telegraf" +) + +var ( + // errNotFound signals that the NGINX API routes does not exist. + errNotFound = errors.New("not found") +) + +func (n *NginxPlusApi) gatherMetrics(addr *url.URL, acc telegraf.Accumulator) { + addError(acc, n.gatherProcessesMetrics(addr, acc)) + addError(acc, n.gatherConnectionsMetrics(addr, acc)) + addError(acc, n.gatherSslMetrics(addr, acc)) + addError(acc, n.gatherHttpRequestsMetrics(addr, acc)) + addError(acc, n.gatherHttpServerZonesMetrics(addr, acc)) + addError(acc, n.gatherHttpUpstreamsMetrics(addr, acc)) + addError(acc, n.gatherHttpCachesMetrics(addr, acc)) + addError(acc, n.gatherStreamServerZonesMetrics(addr, acc)) + addError(acc, n.gatherStreamUpstreamsMetrics(addr, acc)) + + if n.ApiVersion >= 5 { + addError(acc, n.gatherHttpLocationZonesMetrics(addr, acc)) + addError(acc, n.gatherResolverZonesMetrics(addr, acc)) + } +} + +func addError(acc telegraf.Accumulator, err error) { + // This plugin has hardcoded API resource paths it checks that may not + // be in the nginx.conf. Currently, this is to prevent logging of + // paths that are not configured. + // + // The correct solution is to do a GET to /api to get the available paths + // on the server rather than simply ignore. + if err != errNotFound { + acc.AddError(err) + } +} + +func (n *NginxPlusApi) gatherUrl(addr *url.URL, path string) ([]byte, error) { + url := fmt.Sprintf("%s/%d/%s", addr.String(), n.ApiVersion, path) + resp, err := n.client.Get(url) + + if err != nil { + return nil, fmt.Errorf("error making HTTP request to %s: %s", url, err) + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + case http.StatusNotFound: + // format as special error to catch and ignore as some nginx API + // features are either optional, or only available in some versions + return nil, errNotFound + default: + return nil, fmt.Errorf("%s returned HTTP status %s", url, resp.Status) + } + + contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] + switch contentType { + case "application/json": + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return body, nil + default: + return nil, fmt.Errorf("%s returned unexpected content type %s", url, contentType) + } +} + +func (n *NginxPlusApi) gatherProcessesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, processesPath) + if err != nil { + return err + } + + var processes = &Processes{} + + if err := json.Unmarshal(body, processes); err != nil { + return err + } + + acc.AddFields( + "nginx_plus_api_processes", + map[string]interface{}{ + "respawned": processes.Respawned, + }, + getTags(addr), + ) + + return nil +} + +func (n *NginxPlusApi) gatherConnectionsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, connectionsPath) + if err != nil { + return err + } + + var connections = &Connections{} + + if err := json.Unmarshal(body, connections); err != nil { + return err + } + + acc.AddFields( + "nginx_plus_api_connections", + map[string]interface{}{ + "accepted": connections.Accepted, + "dropped": connections.Dropped, + "active": connections.Active, + "idle": connections.Idle, + }, + getTags(addr), + ) + + return nil +} + +func (n *NginxPlusApi) gatherSslMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, sslPath) + if err != nil { + return err + } + + var ssl = &Ssl{} + + if err := json.Unmarshal(body, ssl); err != nil { + return err + } + + acc.AddFields( + "nginx_plus_api_ssl", + map[string]interface{}{ + "handshakes": ssl.Handshakes, + "handshakes_failed": ssl.HandshakesFailed, + "session_reuses": ssl.SessionReuses, + }, + getTags(addr), + ) + + return nil +} + +func (n *NginxPlusApi) gatherHttpRequestsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, httpRequestsPath) + if err != nil { + return err + } + + var httpRequests = &HttpRequests{} + + if err := json.Unmarshal(body, httpRequests); err != nil { + return err + } + + acc.AddFields( + "nginx_plus_api_http_requests", + map[string]interface{}{ + "total": httpRequests.Total, + "current": httpRequests.Current, + }, + getTags(addr), + ) + + return nil +} + +func (n *NginxPlusApi) gatherHttpServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, httpServerZonesPath) + if err != nil { + return err + } + + var httpServerZones HttpServerZones + + if err := json.Unmarshal(body, &httpServerZones); err != nil { + return err + } + + tags := getTags(addr) + + for zoneName, zone := range httpServerZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + acc.AddFields( + "nginx_plus_api_http_server_zones", + func() map[string]interface{} { + result := map[string]interface{}{ + "processing": zone.Processing, + "requests": zone.Requests, + "responses_1xx": zone.Responses.Responses1xx, + "responses_2xx": zone.Responses.Responses2xx, + "responses_3xx": zone.Responses.Responses3xx, + "responses_4xx": zone.Responses.Responses4xx, + "responses_5xx": zone.Responses.Responses5xx, + "responses_total": zone.Responses.Total, + "received": zone.Received, + "sent": zone.Sent, + } + if zone.Discarded != nil { + result["discarded"] = *zone.Discarded + } + return result + }(), + zoneTags, + ) + } + + return nil +} + +// Added in 5 API version +func (n *NginxPlusApi) gatherHttpLocationZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, httpLocationZonesPath) + if err != nil { + return err + } + + var httpLocationZones HttpLocationZones + + if err := json.Unmarshal(body, &httpLocationZones); err != nil { + return err + } + + tags := getTags(addr) + + for zoneName, zone := range httpLocationZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + acc.AddFields( + "nginx_plus_api_http_location_zones", + func() map[string]interface{} { + result := map[string]interface{}{ + "requests": zone.Requests, + "responses_1xx": zone.Responses.Responses1xx, + "responses_2xx": zone.Responses.Responses2xx, + "responses_3xx": zone.Responses.Responses3xx, + "responses_4xx": zone.Responses.Responses4xx, + "responses_5xx": zone.Responses.Responses5xx, + "responses_total": zone.Responses.Total, + "received": zone.Received, + "sent": zone.Sent, + } + if zone.Discarded != nil { + result["discarded"] = *zone.Discarded + } + return result + }(), + zoneTags, + ) + } + + return nil +} + +func (n *NginxPlusApi) gatherHttpUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, httpUpstreamsPath) + if err != nil { + return err + } + + var httpUpstreams HttpUpstreams + + if err := json.Unmarshal(body, &httpUpstreams); err != nil { + return err + } + + tags := getTags(addr) + + for upstreamName, upstream := range httpUpstreams { + upstreamTags := map[string]string{} + for k, v := range tags { + upstreamTags[k] = v + } + upstreamTags["upstream"] = upstreamName + upstreamFields := map[string]interface{}{ + "keepalive": upstream.Keepalive, + "zombies": upstream.Zombies, + } + if upstream.Queue != nil { + upstreamFields["queue_size"] = upstream.Queue.Size + upstreamFields["queue_max_size"] = upstream.Queue.MaxSize + upstreamFields["queue_overflows"] = upstream.Queue.Overflows + } + acc.AddFields( + "nginx_plus_api_http_upstreams", + upstreamFields, + upstreamTags, + ) + for _, peer := range upstream.Peers { + peerFields := map[string]interface{}{ + "backup": peer.Backup, + "weight": peer.Weight, + "state": peer.State, + "active": peer.Active, + "requests": peer.Requests, + "responses_1xx": peer.Responses.Responses1xx, + "responses_2xx": peer.Responses.Responses2xx, + "responses_3xx": peer.Responses.Responses3xx, + "responses_4xx": peer.Responses.Responses4xx, + "responses_5xx": peer.Responses.Responses5xx, + "responses_total": peer.Responses.Total, + "sent": peer.Sent, + "received": peer.Received, + "fails": peer.Fails, + "unavail": peer.Unavail, + "healthchecks_checks": peer.HealthChecks.Checks, + "healthchecks_fails": peer.HealthChecks.Fails, + "healthchecks_unhealthy": peer.HealthChecks.Unhealthy, + "downtime": peer.Downtime, + //"selected": peer.Selected.toInt64, + //"downstart": peer.Downstart.toInt64, + } + if peer.HealthChecks.LastPassed != nil { + peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed + } + if peer.HeaderTime != nil { + peerFields["header_time"] = *peer.HeaderTime + } + if peer.ResponseTime != nil { + peerFields["response_time"] = *peer.ResponseTime + } + if peer.MaxConns != nil { + peerFields["max_conns"] = *peer.MaxConns + } + peerTags := map[string]string{} + for k, v := range upstreamTags { + peerTags[k] = v + } + peerTags["upstream_address"] = peer.Server + if peer.ID != nil { + peerTags["id"] = strconv.Itoa(*peer.ID) + } + acc.AddFields("nginx_plus_api_http_upstream_peers", peerFields, peerTags) + } + } + return nil +} + +func (n *NginxPlusApi) gatherHttpCachesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, httpCachesPath) + if err != nil { + return err + } + + var httpCaches HttpCaches + + if err := json.Unmarshal(body, &httpCaches); err != nil { + return err + } + + tags := getTags(addr) + + for cacheName, cache := range httpCaches { + cacheTags := map[string]string{} + for k, v := range tags { + cacheTags[k] = v + } + cacheTags["cache"] = cacheName + acc.AddFields( + "nginx_plus_api_http_caches", + map[string]interface{}{ + "size": cache.Size, + "max_size": cache.MaxSize, + "cold": cache.Cold, + "hit_responses": cache.Hit.Responses, + "hit_bytes": cache.Hit.Bytes, + "stale_responses": cache.Stale.Responses, + "stale_bytes": cache.Stale.Bytes, + "updating_responses": cache.Updating.Responses, + "updating_bytes": cache.Updating.Bytes, + "revalidated_responses": cache.Revalidated.Responses, + "revalidated_bytes": cache.Revalidated.Bytes, + "miss_responses": cache.Miss.Responses, + "miss_bytes": cache.Miss.Bytes, + "miss_responses_written": cache.Miss.ResponsesWritten, + "miss_bytes_written": cache.Miss.BytesWritten, + "expired_responses": cache.Expired.Responses, + "expired_bytes": cache.Expired.Bytes, + "expired_responses_written": cache.Expired.ResponsesWritten, + "expired_bytes_written": cache.Expired.BytesWritten, + "bypass_responses": cache.Bypass.Responses, + "bypass_bytes": cache.Bypass.Bytes, + "bypass_responses_written": cache.Bypass.ResponsesWritten, + "bypass_bytes_written": cache.Bypass.BytesWritten, + }, + cacheTags, + ) + } + + return nil +} + +func (n *NginxPlusApi) gatherStreamServerZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, streamServerZonesPath) + if err != nil { + return err + } + + var streamServerZones StreamServerZones + + if err := json.Unmarshal(body, &streamServerZones); err != nil { + return err + } + + tags := getTags(addr) + + for zoneName, zone := range streamServerZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + acc.AddFields( + "nginx_plus_api_stream_server_zones", + map[string]interface{}{ + "processing": zone.Processing, + "connections": zone.Connections, + "received": zone.Received, + "sent": zone.Sent, + }, + zoneTags, + ) + } + + return nil +} + +// Added in 5 API version +func (n *NginxPlusApi) gatherResolverZonesMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, resolverZonesPath) + if err != nil { + return err + } + + var resolverZones ResolverZones + + if err := json.Unmarshal(body, &resolverZones); err != nil { + return err + } + + tags := getTags(addr) + + for zoneName, resolver := range resolverZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + acc.AddFields( + "nginx_plus_api_resolver_zones", + map[string]interface{}{ + "name": resolver.Requests.Name, + "srv": resolver.Requests.Srv, + "addr": resolver.Requests.Addr, + + "noerror": resolver.Responses.Noerror, + "formerr": resolver.Responses.Formerr, + "servfail": resolver.Responses.Servfail, + "nxdomain": resolver.Responses.Nxdomain, + "notimp": resolver.Responses.Notimp, + "refused": resolver.Responses.Refused, + "timedout": resolver.Responses.Timedout, + "unknown": resolver.Responses.Unknown, + }, + zoneTags, + ) + } + + return nil +} + +func (n *NginxPlusApi) gatherStreamUpstreamsMetrics(addr *url.URL, acc telegraf.Accumulator) error { + body, err := n.gatherUrl(addr, streamUpstreamsPath) + if err != nil { + return err + } + + var streamUpstreams StreamUpstreams + + if err := json.Unmarshal(body, &streamUpstreams); err != nil { + return err + } + + tags := getTags(addr) + + for upstreamName, upstream := range streamUpstreams { + upstreamTags := map[string]string{} + for k, v := range tags { + upstreamTags[k] = v + } + upstreamTags["upstream"] = upstreamName + acc.AddFields( + "nginx_plus_api_stream_upstreams", + map[string]interface{}{ + "zombies": upstream.Zombies, + }, + upstreamTags, + ) + for _, peer := range upstream.Peers { + peerFields := map[string]interface{}{ + "backup": peer.Backup, + "weight": peer.Weight, + "state": peer.State, + "active": peer.Active, + "connections": peer.Connections, + "sent": peer.Sent, + "received": peer.Received, + "fails": peer.Fails, + "unavail": peer.Unavail, + "healthchecks_checks": peer.HealthChecks.Checks, + "healthchecks_fails": peer.HealthChecks.Fails, + "healthchecks_unhealthy": peer.HealthChecks.Unhealthy, + "downtime": peer.Downtime, + } + if peer.HealthChecks.LastPassed != nil { + peerFields["healthchecks_last_passed"] = *peer.HealthChecks.LastPassed + } + if peer.ConnectTime != nil { + peerFields["connect_time"] = *peer.ConnectTime + } + if peer.FirstByteTime != nil { + peerFields["first_byte_time"] = *peer.FirstByteTime + } + if peer.ResponseTime != nil { + peerFields["response_time"] = *peer.ResponseTime + } + peerTags := map[string]string{} + for k, v := range upstreamTags { + peerTags[k] = v + } + peerTags["upstream_address"] = peer.Server + peerTags["id"] = strconv.Itoa(peer.ID) + + acc.AddFields("nginx_plus_api_stream_upstream_peers", peerFields, peerTags) + } + } + + return nil +} + +func getTags(addr *url.URL) map[string]string { + h := addr.Host + host, port, err := net.SplitHostPort(h) + if err != nil { + host = addr.Host + if addr.Scheme == "http" { + port = "80" + } else if addr.Scheme == "https" { + port = "443" + } else { + port = "" + } + } + return map[string]string{"source": host, "port": port} +} diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go new file mode 100644 index 000000000..f309886cf --- /dev/null +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_metrics_test.go @@ -0,0 +1,1335 @@ +package nginx_plus_api + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const processesPayload = ` +{ + "respawned": 0 +} +` + +const connectionsPayload = ` +{ + "accepted": 1234567890000, + "dropped": 2345678900000, + "active": 345, + "idle": 567 +} +` + +const sslPayload = ` +{ + "handshakes": 79572, + "handshakes_failed": 21025, + "session_reuses": 15762 +} +` + +const resolverZonesPayload = ` +{ + "resolver_zone1": { + "requests": { + "name": 25460, + "srv": 130, + "addr": 2580 + }, + "responses": { + "noerror": 26499, + "formerr": 0, + "servfail": 3, + "nxdomain": 0, + "notimp": 0, + "refused": 0, + "timedout": 243, + "unknown": 478 + } + }, + "resolver_zone2": { + "requests": { + "name": 325460, + "srv": 1130, + "addr": 12580 + }, + "responses": { + "noerror": 226499, + "formerr": 0, + "servfail": 283, + "nxdomain": 0, + "notimp": 0, + "refused": 0, + "timedout": 743, + "unknown": 1478 + } + } +} +` + +const httpRequestsPayload = ` +{ + "total": 10624511, + "current": 4 +} +` + +const httpServerZonesPayload = ` +{ + "site1": { + "processing": 2, + "requests": 736395, + "responses": { + "1xx": 0, + "2xx": 727290, + "3xx": 4614, + "4xx": 934, + "5xx": 1535, + "total": 734373 + }, + "discarded": 2020, + "received": 180157219, + "sent": 20183175459 + }, + "site2": { + "processing": 1, + "requests": 185307, + "responses": { + "1xx": 0, + "2xx": 112674, + "3xx": 45383, + "4xx": 2504, + "5xx": 4419, + "total": 164980 + }, + "discarded": 20326, + "received": 51575327, + "sent": 2983241510 + } +} +` + +const httpLocationZonesPayload = ` +{ + "site1": { + "requests": 736395, + "responses": { + "1xx": 0, + "2xx": 727290, + "3xx": 4614, + "4xx": 934, + "5xx": 1535, + "total": 734373 + }, + "discarded": 2020, + "received": 180157219, + "sent": 20183175459 + }, + "site2": { + "requests": 185307, + "responses": { + "1xx": 0, + "2xx": 112674, + "3xx": 45383, + "4xx": 2504, + "5xx": 4419, + "total": 164980 + }, + "discarded": 20326, + "received": 51575327, + "sent": 2983241510 + } +} +` + +const httpUpstreamsPayload = ` +{ + "trac-backend": { + "peers": [ + { + "id": 0, + "server": "10.0.0.1:8088", + "name": "10.0.0.1:8088", + "backup": false, + "weight": 5, + "state": "up", + "active": 0, + "requests": 667231, + "header_time": 20, + "response_time": 36, + "responses": { + "1xx": 0, + "2xx": 666310, + "3xx": 0, + "4xx": 915, + "5xx": 6, + "total": 667231 + }, + "sent": 251946292, + "received": 19222475454, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26214, + "fails": 0, + "unhealthy": 0, + "last_passed": true + }, + "downtime": 0, + "downstart": {}, + "selected": {} + }, + { + "id": 1, + "server": "10.0.0.1:8089", + "name": "10.0.0.1:8089", + "backup": true, + "weight": 1, + "state": "unhealthy", + "active": 0, + "requests": 0, + "responses": { + "1xx": 0, + "2xx": 0, + "3xx": 0, + "4xx": 0, + "5xx": 0, + "total": 0 + }, + "sent": 0, + "received": 0, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26284, + "fails": 26284, + "unhealthy": 1, + "last_passed": false + }, + "downtime": 262925617, + "downstart": {}, + "selected": {} + } + ], + "keepalive": 0, + "zombies": 0, + "zone": "trac-backend" + }, + "hg-backend": { + "peers": [ + { + "id": 0, + "server": "10.0.0.1:8088", + "name": "10.0.0.1:8088", + "backup": false, + "weight": 5, + "state": "up", + "active": 0, + "requests": 667231, + "header_time": 20, + "response_time": 36, + "responses": { + "1xx": 0, + "2xx": 666310, + "3xx": 0, + "4xx": 915, + "5xx": 6, + "total": 667231 + }, + "sent": 251946292, + "received": 19222475454, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26214, + "fails": 0, + "unhealthy": 0, + "last_passed": true + }, + "downtime": 0, + "downstart": {}, + "selected": {} + }, + { + "id": 1, + "server": "10.0.0.1:8089", + "name": "10.0.0.1:8089", + "backup": true, + "weight": 1, + "state": "unhealthy", + "active": 0, + "requests": 0, + "responses": { + "1xx": 0, + "2xx": 0, + "3xx": 0, + "4xx": 0, + "5xx": 0, + "total": 0 + }, + "sent": 0, + "received": 0, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26284, + "fails": 26284, + "unhealthy": 1, + "last_passed": false + }, + "downtime": 262925617, + "downstart": {}, + "selected": {} + } + ], + "keepalive": 0, + "zombies": 0, + "zone": "hg-backend" + } +} +` + +const httpCachesPayload = ` +{ + "http-cache": { + "size": 530915328, + "max_size": 536870912, + "cold": false, + "hit": { + "responses": 254032, + "bytes": 6685627875 + }, + "stale": { + "responses": 0, + "bytes": 0 + }, + "updating": { + "responses": 0, + "bytes": 0 + }, + "revalidated": { + "responses": 0, + "bytes": 0 + }, + "miss": { + "responses": 1619201, + "bytes": 53841943822 + }, + "expired": { + "responses": 45859, + "bytes": 1656847080, + "responses_written": 44992, + "bytes_written": 1641825173 + }, + "bypass": { + "responses": 200187, + "bytes": 5510647548, + "responses_written": 200173, + "bytes_written": 44992 + } + }, + "frontend-cache": { + "size": 530915328, + "max_size": 536870912, + "cold": false, + "hit": { + "responses": 254032, + "bytes": 6685627875 + }, + "stale": { + "responses": 0, + "bytes": 0 + }, + "updating": { + "responses": 0, + "bytes": 0 + }, + "revalidated": { + "responses": 0, + "bytes": 0 + }, + "miss": { + "responses": 1619201, + "bytes": 53841943822 + }, + "expired": { + "responses": 45859, + "bytes": 1656847080, + "responses_written": 44992, + "bytes_written": 1641825173 + }, + "bypass": { + "responses": 200187, + "bytes": 5510647548, + "responses_written": 200173, + "bytes_written": 44992 + } + } +} +` + +const streamUpstreamsPayload = ` +{ + "mysql_backends": { + "peers": [ + { + "id": 0, + "server": "10.0.0.1:12345", + "name": "10.0.0.1:12345", + "backup": false, + "weight": 5, + "state": "up", + "active": 0, + "max_conns": 30, + "connecions": 1231, + "sent": 251946292, + "received": 19222475454, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26214, + "fails": 0, + "unhealthy": 0, + "last_passed": true + }, + "downtime": 0, + "downstart": {}, + "selected": {} + }, + { + "id": 1, + "server": "10.0.0.1:12346", + "name": "10.0.0.1:12346", + "backup": true, + "weight": 1, + "state": "unhealthy", + "active": 0, + "max_conns": 30, + "connections": 0, + "sent": 0, + "received": 0, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26284, + "fails": 26284, + "unhealthy": 1, + "last_passed": false + }, + "downtime": 262925617, + "downstart": {}, + "selected": {} + } + ], + "zombies": 0, + "zone": "mysql_backends" + }, + "dns": { + "peers": [ + { + "id": 0, + "server": "10.0.0.1:12347", + "name": "10.0.0.1:12347", + "backup": false, + "weight": 5, + "state": "up", + "active": 0, + "max_conns": 30, + "connections": 667231, + "sent": 251946292, + "received": 19222475454, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26214, + "fails": 0, + "unhealthy": 0, + "last_passed": true + }, + "downtime": 0, + "downstart": {}, + "selected": {} + }, + { + "id": 1, + "server": "10.0.0.1:12348", + "name": "10.0.0.1:12348", + "backup": true, + "weight": 1, + "state": "unhealthy", + "active": 0, + "connections": 0, + "max_conns": 30, + "sent": 0, + "received": 0, + "fails": 0, + "unavail": 0, + "health_checks": { + "checks": 26284, + "fails": 26284, + "unhealthy": 1, + "last_passed": false + }, + "downtime": 262925617, + "downstart": {}, + "selected": {} + } + ], + "zombies": 0, + "zone": "dns" + } +} +` + +const streamServerZonesPayload = ` +{ + "mysql-frontend": { + "processing": 2, + "connections": 270925, + "sessions": { + "2xx": 155564, + "4xx": 0, + "5xx": 0, + "total": 270925 + }, + "discarded": 0, + "received": 28988975, + "sent": 3879346317 + }, + "dns": { + "processing": 1, + "connections": 155569, + "sessions": { + "2xx": 155564, + "4xx": 0, + "5xx": 0, + "total": 155569 + }, + "discarded": 0, + "received": 4200363, + "sent": 20489184 + } +} +` + +func TestGatherProcessesMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, processesPath, defaultApiVersion, processesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherProcessesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_processes", + map[string]interface{}{ + "respawned": int(0), + }, + map[string]string{ + "source": host, + "port": port, + }) +} + +func TestGatherConnectionsMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, connectionsPath, defaultApiVersion, connectionsPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherConnectionsMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_connections", + map[string]interface{}{ + "accepted": int64(1234567890000), + "dropped": int64(2345678900000), + "active": int64(345), + "idle": int64(567), + }, + map[string]string{ + "source": host, + "port": port, + }) +} + +func TestGatherSslMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, sslPath, defaultApiVersion, sslPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherSslMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_ssl", + map[string]interface{}{ + "handshakes": int64(79572), + "handshakes_failed": int64(21025), + "session_reuses": int64(15762), + }, + map[string]string{ + "source": host, + "port": port, + }) +} + +func TestGatherHttpRequestsMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, httpRequestsPath, defaultApiVersion, httpRequestsPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherHttpRequestsMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_requests", + map[string]interface{}{ + "total": int64(10624511), + "current": int64(4), + }, + map[string]string{ + "source": host, + "port": port, + }) +} + +func TestGatherHttpServerZonesMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, httpServerZonesPath, defaultApiVersion, httpServerZonesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherHttpServerZonesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_server_zones", + map[string]interface{}{ + "discarded": int64(2020), + "processing": int(2), + "received": int64(180157219), + "requests": int64(736395), + "responses_1xx": int64(0), + "responses_2xx": int64(727290), + "responses_3xx": int64(4614), + "responses_4xx": int64(934), + "responses_5xx": int64(1535), + "responses_total": int64(734373), + "sent": int64(20183175459), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "site1", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_server_zones", + map[string]interface{}{ + "discarded": int64(20326), + "processing": int(1), + "received": int64(51575327), + "requests": int64(185307), + "responses_1xx": int64(0), + "responses_2xx": int64(112674), + "responses_3xx": int64(45383), + "responses_4xx": int64(2504), + "responses_5xx": int64(4419), + "responses_total": int64(164980), + "sent": int64(2983241510), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "site2", + }) +} + +func TestGatherHttpLocationZonesMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, httpLocationZonesPath, defaultApiVersion, httpLocationZonesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherHttpLocationZonesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_location_zones", + map[string]interface{}{ + "discarded": int64(2020), + "received": int64(180157219), + "requests": int64(736395), + "responses_1xx": int64(0), + "responses_2xx": int64(727290), + "responses_3xx": int64(4614), + "responses_4xx": int64(934), + "responses_5xx": int64(1535), + "responses_total": int64(734373), + "sent": int64(20183175459), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "site1", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_location_zones", + map[string]interface{}{ + "discarded": int64(20326), + "received": int64(51575327), + "requests": int64(185307), + "responses_1xx": int64(0), + "responses_2xx": int64(112674), + "responses_3xx": int64(45383), + "responses_4xx": int64(2504), + "responses_5xx": int64(4419), + "responses_total": int64(164980), + "sent": int64(2983241510), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "site2", + }) +} + +func TestGatherHttpUpstreamsMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, httpUpstreamsPath, defaultApiVersion, httpUpstreamsPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherHttpUpstreamsMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstreams", + map[string]interface{}{ + "keepalive": int(0), + "zombies": int(0), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "trac-backend", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstreams", + map[string]interface{}{ + "keepalive": int(0), + "zombies": int(0), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "hg-backend", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": false, + "downtime": int64(0), + "fails": int64(0), + "header_time": int64(20), + "healthchecks_checks": int64(26214), + "healthchecks_fails": int64(0), + "healthchecks_last_passed": true, + "healthchecks_unhealthy": int64(0), + "received": int64(19222475454), + "requests": int64(667231), + "response_time": int64(36), + "responses_1xx": int64(0), + "responses_2xx": int64(666310), + "responses_3xx": int64(0), + "responses_4xx": int64(915), + "responses_5xx": int64(6), + "responses_total": int64(667231), + "sent": int64(251946292), + "state": "up", + "unavail": int64(0), + "weight": int(5), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "trac-backend", + "upstream_address": "10.0.0.1:8088", + "id": "0", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": true, + "downtime": int64(262925617), + "fails": int64(0), + "healthchecks_checks": int64(26284), + "healthchecks_fails": int64(26284), + "healthchecks_last_passed": false, + "healthchecks_unhealthy": int64(1), + "received": int64(0), + "requests": int64(0), + "responses_1xx": int64(0), + "responses_2xx": int64(0), + "responses_3xx": int64(0), + "responses_4xx": int64(0), + "responses_5xx": int64(0), + "responses_total": int64(0), + "sent": int64(0), + "state": "unhealthy", + "unavail": int64(0), + "weight": int(1), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "trac-backend", + "upstream_address": "10.0.0.1:8089", + "id": "1", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": false, + "downtime": int64(0), + "fails": int64(0), + "header_time": int64(20), + "healthchecks_checks": int64(26214), + "healthchecks_fails": int64(0), + "healthchecks_last_passed": true, + "healthchecks_unhealthy": int64(0), + "received": int64(19222475454), + "requests": int64(667231), + "response_time": int64(36), + "responses_1xx": int64(0), + "responses_2xx": int64(666310), + "responses_3xx": int64(0), + "responses_4xx": int64(915), + "responses_5xx": int64(6), + "responses_total": int64(667231), + "sent": int64(251946292), + "state": "up", + "unavail": int64(0), + "weight": int(5), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "hg-backend", + "upstream_address": "10.0.0.1:8088", + "id": "0", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": true, + "downtime": int64(262925617), + "fails": int64(0), + "healthchecks_checks": int64(26284), + "healthchecks_fails": int64(26284), + "healthchecks_last_passed": false, + "healthchecks_unhealthy": int64(1), + "received": int64(0), + "requests": int64(0), + "responses_1xx": int64(0), + "responses_2xx": int64(0), + "responses_3xx": int64(0), + "responses_4xx": int64(0), + "responses_5xx": int64(0), + "responses_total": int64(0), + "sent": int64(0), + "state": "unhealthy", + "unavail": int64(0), + "weight": int(1), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "hg-backend", + "upstream_address": "10.0.0.1:8089", + "id": "1", + }) +} + +func TestGatherHttpCachesMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, httpCachesPath, defaultApiVersion, httpCachesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherHttpCachesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_caches", + map[string]interface{}{ + "bypass_bytes": int64(5510647548), + "bypass_bytes_written": int64(44992), + "bypass_responses": int64(200187), + "bypass_responses_written": int64(200173), + "cold": false, + "expired_bytes": int64(1656847080), + "expired_bytes_written": int64(1641825173), + "expired_responses": int64(45859), + "expired_responses_written": int64(44992), + "hit_bytes": int64(6685627875), + "hit_responses": int64(254032), + "max_size": int64(536870912), + "miss_bytes": int64(53841943822), + "miss_bytes_written": int64(0), + "miss_responses": int64(1619201), + "miss_responses_written": int64(0), + "revalidated_bytes": int64(0), + "revalidated_responses": int64(0), + "size": int64(530915328), + "stale_bytes": int64(0), + "stale_responses": int64(0), + "updating_bytes": int64(0), + "updating_responses": int64(0), + }, + map[string]string{ + "source": host, + "port": port, + "cache": "http-cache", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_http_caches", + map[string]interface{}{ + "bypass_bytes": int64(5510647548), + "bypass_bytes_written": int64(44992), + "bypass_responses": int64(200187), + "bypass_responses_written": int64(200173), + "cold": false, + "expired_bytes": int64(1656847080), + "expired_bytes_written": int64(1641825173), + "expired_responses": int64(45859), + "expired_responses_written": int64(44992), + "hit_bytes": int64(6685627875), + "hit_responses": int64(254032), + "max_size": int64(536870912), + "miss_bytes": int64(53841943822), + "miss_bytes_written": int64(0), + "miss_responses": int64(1619201), + "miss_responses_written": int64(0), + "revalidated_bytes": int64(0), + "revalidated_responses": int64(0), + "size": int64(530915328), + "stale_bytes": int64(0), + "stale_responses": int64(0), + "updating_bytes": int64(0), + "updating_responses": int64(0), + }, + map[string]string{ + "source": host, + "port": port, + "cache": "frontend-cache", + }) +} + +func TestGatherResolverZonesMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, resolverZonesPath, defaultApiVersion, resolverZonesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherResolverZonesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_resolver_zones", + map[string]interface{}{ + "name": int64(25460), + "srv": int64(130), + "addr": int64(2580), + "noerror": int64(26499), + "formerr": int64(0), + "servfail": int64(3), + "nxdomain": int64(0), + "notimp": int64(0), + "refused": int64(0), + "timedout": int64(243), + "unknown": int64(478), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "resolver_zone1", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_resolver_zones", + map[string]interface{}{ + "name": int64(325460), + "srv": int64(1130), + "addr": int64(12580), + "noerror": int64(226499), + "formerr": int64(0), + "servfail": int64(283), + "nxdomain": int64(0), + "notimp": int64(0), + "refused": int64(0), + "timedout": int64(743), + "unknown": int64(1478), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "resolver_zone2", + }) +} + +func TestGatherStreamUpstreams(t *testing.T) { + ts, n := prepareEndpoint(t, streamUpstreamsPath, defaultApiVersion, streamUpstreamsPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherStreamUpstreamsMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstreams", + map[string]interface{}{ + "zombies": int(0), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "mysql_backends", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstreams", + map[string]interface{}{ + "zombies": int(0), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "dns", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": false, + "connections": int64(0), + "downtime": int64(0), + "fails": int64(0), + "healthchecks_checks": int64(26214), + "healthchecks_fails": int64(0), + "healthchecks_last_passed": true, + "healthchecks_unhealthy": int64(0), + "received": int64(19222475454), + "sent": int64(251946292), + "state": "up", + "unavail": int64(0), + "weight": int(5), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "mysql_backends", + "upstream_address": "10.0.0.1:12345", + "id": "0", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": true, + "connections": int64(0), + "downtime": int64(262925617), + "fails": int64(0), + "healthchecks_checks": int64(26284), + "healthchecks_fails": int64(26284), + "healthchecks_last_passed": false, + "healthchecks_unhealthy": int64(1), + "received": int64(0), + "sent": int64(0), + "state": "unhealthy", + "unavail": int64(0), + "weight": int(1), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "mysql_backends", + "upstream_address": "10.0.0.1:12346", + "id": "1", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": false, + "connections": int64(667231), + "downtime": int64(0), + "fails": int64(0), + "healthchecks_checks": int64(26214), + "healthchecks_fails": int64(0), + "healthchecks_last_passed": true, + "healthchecks_unhealthy": int64(0), + "received": int64(19222475454), + "sent": int64(251946292), + "state": "up", + "unavail": int64(0), + "weight": int(5), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "dns", + "upstream_address": "10.0.0.1:12347", + "id": "0", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_upstream_peers", + map[string]interface{}{ + "active": int(0), + "backup": true, + "connections": int64(0), + "downtime": int64(262925617), + "fails": int64(0), + "healthchecks_checks": int64(26284), + "healthchecks_fails": int64(26284), + "healthchecks_last_passed": false, + "healthchecks_unhealthy": int64(1), + "received": int64(0), + "sent": int64(0), + "state": "unhealthy", + "unavail": int64(0), + "weight": int(1), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "dns", + "upstream_address": "10.0.0.1:12348", + "id": "1", + }) + +} + +func TestGatherStreamServerZonesMetrics(t *testing.T) { + ts, n := prepareEndpoint(t, streamServerZonesPath, defaultApiVersion, streamServerZonesPayload) + defer ts.Close() + + var acc testutil.Accumulator + addr, host, port := prepareAddr(t, ts) + + require.NoError(t, n.gatherStreamServerZonesMetrics(addr, &acc)) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_server_zones", + map[string]interface{}{ + "connections": int(270925), + "processing": int(2), + "received": int64(28988975), + "sent": int64(3879346317), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "mysql-frontend", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_plus_api_stream_server_zones", + map[string]interface{}{ + "connections": int(155569), + "processing": int(1), + "received": int64(4200363), + "sent": int64(20489184), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "dns", + }) +} + +func TestUnavailableEndpoints(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusNotFound) + })) + defer ts.Close() + + n := &NginxPlusApi{ + client: ts.Client(), + } + + addr, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + + var acc testutil.Accumulator + n.gatherMetrics(addr, &acc) + require.NoError(t, acc.FirstError()) +} + +func TestServerError(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer ts.Close() + + n := &NginxPlusApi{ + client: ts.Client(), + } + + addr, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + + var acc testutil.Accumulator + n.gatherMetrics(addr, &acc) + require.Error(t, acc.FirstError()) +} + +func TestMalformedJSON(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintln(w, "this is not JSON") + })) + defer ts.Close() + + n := &NginxPlusApi{ + client: ts.Client(), + } + + addr, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + + var acc testutil.Accumulator + n.gatherMetrics(addr, &acc) + require.Error(t, acc.FirstError()) +} + +func TestUnknownContentType(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + })) + defer ts.Close() + + n := &NginxPlusApi{ + client: ts.Client(), + } + + addr, err := url.Parse(ts.URL) + if err != nil { + t.Fatal(err) + } + + var acc testutil.Accumulator + n.gatherMetrics(addr, &acc) + require.Error(t, acc.FirstError()) +} + +func prepareAddr(t *testing.T, ts *httptest.Server) (*url.URL, string, string) { + t.Helper() + addr, err := url.Parse(fmt.Sprintf("%s/api", ts.URL)) + if err != nil { + t.Fatal(err) + } + + host, port, err := net.SplitHostPort(addr.Host) + + if err != nil { + host = addr.Host + if addr.Scheme == "http" { + port = "80" + } else if addr.Scheme == "https" { + port = "443" + } else { + port = "" + } + } + + return addr, host, port +} + +func prepareEndpoint(t *testing.T, path string, apiVersion int64, payload string) (*httptest.Server, *NginxPlusApi) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + + if r.URL.Path == fmt.Sprintf("/api/%d/%s", apiVersion, path) { + rsp = payload + w.Header()["Content-Type"] = []string{"application/json"} + } else { + t.Errorf("unknown request path") + } + + fmt.Fprintln(w, rsp) + })) + + n := &NginxPlusApi{ + Urls: []string{fmt.Sprintf("%s/api", ts.URL)}, + ApiVersion: apiVersion, + } + + client, err := n.createHttpClient() + if err != nil { + t.Fatal(err) + } + n.client = client + + return ts, n +} diff --git a/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go new file mode 100644 index 000000000..868bc04e4 --- /dev/null +++ b/plugins/inputs/nginx_plus_api/nginx_plus_api_types.go @@ -0,0 +1,159 @@ +package nginx_plus_api + +type Processes struct { + Respawned int `json:"respawned"` +} + +type Connections struct { + Accepted int64 `json:"accepted"` + Dropped int64 `json:"dropped"` + Active int64 `json:"active"` + Idle int64 `json:"idle"` +} + +type Ssl struct { // added in version 6 + Handshakes int64 `json:"handshakes"` + HandshakesFailed int64 `json:"handshakes_failed"` + SessionReuses int64 `json:"session_reuses"` +} + +type ResolverZones map[string]struct { + Requests struct { + Name int64 `json:"name"` + Srv int64 `json:"srv"` + Addr int64 `json:"addr"` + } `json:"requests"` + Responses struct { + Noerror int64 `json:"noerror"` + Formerr int64 `json:"formerr"` + Servfail int64 `json:"servfail"` + Nxdomain int64 `json:"nxdomain"` + Notimp int64 `json:"notimp"` + Refused int64 `json:"refused"` + Timedout int64 `json:"timedout"` + Unknown int64 `json:"unknown"` + } `json:"responses"` +} + +type HttpRequests struct { + Total int64 `json:"total"` + Current int64 `json:"current"` +} + +type ResponseStats struct { + Responses1xx int64 `json:"1xx"` + Responses2xx int64 `json:"2xx"` + Responses3xx int64 `json:"3xx"` + Responses4xx int64 `json:"4xx"` + Responses5xx int64 `json:"5xx"` + Total int64 `json:"total"` +} + +type HttpServerZones map[string]struct { + Processing int `json:"processing"` + Requests int64 `json:"requests"` + Responses ResponseStats `json:"responses"` + Discarded *int64 `json:"discarded"` // added in version 6 + Received int64 `json:"received"` + Sent int64 `json:"sent"` +} + +type HttpLocationZones map[string]struct { + Requests int64 `json:"requests"` + Responses ResponseStats `json:"responses"` + Discarded *int64 `json:"discarded"` // added in version 6 + Received int64 `json:"received"` + Sent int64 `json:"sent"` +} + +type HealthCheckStats struct { + Checks int64 `json:"checks"` + Fails int64 `json:"fails"` + Unhealthy int64 `json:"unhealthy"` + LastPassed *bool `json:"last_passed"` +} + +type HttpUpstreams map[string]struct { + Peers []struct { + ID *int `json:"id"` // added in version 3 + Server string `json:"server"` + Backup bool `json:"backup"` + Weight int `json:"weight"` + State string `json:"state"` + Active int `json:"active"` + Keepalive *int `json:"keepalive"` // removed in version 5 + MaxConns *int `json:"max_conns"` // added in version 3 + Requests int64 `json:"requests"` + Responses ResponseStats `json:"responses"` + Sent int64 `json:"sent"` + Received int64 `json:"received"` + Fails int64 `json:"fails"` + Unavail int64 `json:"unavail"` + HealthChecks HealthCheckStats `json:"health_checks"` + Downtime int64 `json:"downtime"` + HeaderTime *int64 `json:"header_time"` // added in version 5 + ResponseTime *int64 `json:"response_time"` // added in version 5 + } `json:"peers"` + Keepalive int `json:"keepalive"` + Zombies int `json:"zombies"` // added in version 6 + Queue *struct { // added in version 6 + Size int `json:"size"` + MaxSize int `json:"max_size"` + Overflows int64 `json:"overflows"` + } `json:"queue"` +} + +type StreamServerZones map[string]struct { + Processing int `json:"processing"` + Connections int `json:"connections"` + Sessions *ResponseStats `json:"sessions"` + Discarded *int64 `json:"discarded"` // added in version 7 + Received int64 `json:"received"` + Sent int64 `json:"sent"` +} + +type StreamUpstreams map[string]struct { + Peers []struct { + ID int `json:"id"` + Server string `json:"server"` + Backup bool `json:"backup"` + Weight int `json:"weight"` + State string `json:"state"` + Active int `json:"active"` + Connections int64 `json:"connections"` + ConnectTime *int `json:"connect_time"` + FirstByteTime *int `json:"first_byte_time"` + ResponseTime *int `json:"response_time"` + Sent int64 `json:"sent"` + Received int64 `json:"received"` + Fails int64 `json:"fails"` + Unavail int64 `json:"unavail"` + HealthChecks HealthCheckStats `json:"health_checks"` + Downtime int64 `json:"downtime"` + } `json:"peers"` + Zombies int `json:"zombies"` +} + +type BasicHitStats struct { + Responses int64 `json:"responses"` + Bytes int64 `json:"bytes"` +} + +type ExtendedHitStats struct { + BasicHitStats + ResponsesWritten int64 `json:"responses_written"` + BytesWritten int64 `json:"bytes_written"` +} + +type HttpCaches map[string]struct { // added in version 2 + Size int64 `json:"size"` + MaxSize int64 `json:"max_size"` + Cold bool `json:"cold"` + Hit BasicHitStats `json:"hit"` + Stale BasicHitStats `json:"stale"` + Updating BasicHitStats `json:"updating"` + Revalidated *BasicHitStats `json:"revalidated"` // added in version 3 + Miss ExtendedHitStats `json:"miss"` + Expired ExtendedHitStats `json:"expired"` + Bypass ExtendedHitStats `json:"bypass"` +} diff --git a/plugins/inputs/nginx_upstream_check/README.md b/plugins/inputs/nginx_upstream_check/README.md new file mode 100644 index 000000000..4ff76889d --- /dev/null +++ b/plugins/inputs/nginx_upstream_check/README.md @@ -0,0 +1,75 @@ +# Telegraf Plugin: Nginx_upstream_check + +Read the status output of the nginx_upstream_check (https://github.com/yaoweibin/nginx_upstream_check_module). +This module can periodically check the servers in the Nginx's upstream with configured request and interval to determine +if the server is still available. If checks are failed the server is marked as "down" and will not receive any requests +until the check will pass and a server will be marked as "up" again. + +The status page displays the current status of all upstreams and servers as well as number of the failed and successful +checks. This information can be exported in JSON format and parsed by this input. + +### Configuration: + +``` + ## An URL where Nginx Upstream check module is enabled + ## It should be set to return a JSON formatted response + url = "http://127.0.0.1/status?format=json" + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "check.example.com" + + ## Timeout for HTTP requests + timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Measurements & Fields: + +- Measurement + - fall (The number of failed server check attempts, counter) + - rise (The number of successful server check attempts, counter) + - status (The reporter server status as a string) + - status_code (The server status code. 1 - up, 2 - down, 0 - other) + +The "status_code" field most likely will be the most useful one because it allows you to determine the current +state of every server and, possible, add some monitoring to watch over it. InfluxDB can use string values and the +"status" field can be used instead, but for most other monitoring solutions the integer code will be appropriate. + +### Tags: + +- All measurements have the following tags: + - name (The hostname or IP of the upstream server) + - port (The alternative check port, 0 if the default one is used) + - type (The check type, http/tcp) + - upstream (The name of the upstream block in the Nginx configuration) + - url (The status url used by telegraf) + +### Example Output: + +When run with: +``` +./telegraf --config telegraf.conf --input-filter nginx_upstream_check --test +``` + +It produces: +``` +* Plugin: nginx_upstream_check, Collection 1 +> nginx_upstream_check,host=node1,name=192.168.0.1:8080,port=0,type=http,upstream=my_backends,url=http://127.0.0.1:80/status?format\=json fall=0i,rise=100i,status="up",status_code=1i 1529088524000000000 +> nginx_upstream_check,host=node2,name=192.168.0.2:8080,port=0,type=http,upstream=my_backends,url=http://127.0.0.1:80/status?format\=json fall=100i,rise=0i,status="down",status_code=2i 1529088524000000000 +``` diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go new file mode 100644 index 000000000..8e662849f --- /dev/null +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check.go @@ -0,0 +1,233 @@ +package nginx_upstream_check + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + ## An URL where Nginx Upstream check module is enabled + ## It should be set to return a JSON formatted response + url = "http://127.0.0.1/status?format=json" + + ## HTTP method + # method = "GET" + + ## Optional HTTP headers + # headers = {"X-Special-Header" = "Special-Value"} + + ## Override HTTP "Host" header + # host_header = "check.example.com" + + ## Timeout for HTTP requests + timeout = "5s" + + ## Optional HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +const description = "Read nginx_upstream_check module status information (https://github.com/yaoweibin/nginx_upstream_check_module)" + +type NginxUpstreamCheck struct { + URL string `toml:"url"` + + Username string `toml:"username"` + Password string `toml:"password"` + Method string `toml:"method"` + Headers map[string]string `toml:"headers"` + HostHeader string `toml:"host_header"` + Timeout internal.Duration `toml:"timeout"` + + tls.ClientConfig + client *http.Client +} + +func NewNginxUpstreamCheck() *NginxUpstreamCheck { + return &NginxUpstreamCheck{ + URL: "http://127.0.0.1/status?format=json", + Method: "GET", + Headers: make(map[string]string), + HostHeader: "", + Timeout: internal.Duration{Duration: time.Second * 5}, + } +} + +func init() { + inputs.Add("nginx_upstream_check", func() telegraf.Input { + return NewNginxUpstreamCheck() + }) +} + +func (check *NginxUpstreamCheck) SampleConfig() string { + return sampleConfig +} + +func (check *NginxUpstreamCheck) Description() string { + return description +} + +type NginxUpstreamCheckData struct { + Servers struct { + Total uint64 `json:"total"` + Generation uint64 `json:"generation"` + Server []NginxUpstreamCheckServer `json:"server"` + } `json:"servers"` +} + +type NginxUpstreamCheckServer struct { + Index uint64 `json:"index"` + Upstream string `json:"upstream"` + Name string `json:"name"` + Status string `json:"status"` + Rise uint64 `json:"rise"` + Fall uint64 `json:"fall"` + Type string `json:"type"` + Port uint16 `json:"port"` +} + +// createHttpClient create a clients to access API +func (check *NginxUpstreamCheck) createHttpClient() (*http.Client, error) { + tlsConfig, err := check.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: check.Timeout.Duration, + } + + return client, nil +} + +// gatherJsonData query the data source and parse the response JSON +func (check *NginxUpstreamCheck) gatherJsonData(url string, value interface{}) error { + + var method string + if check.Method != "" { + method = check.Method + } else { + method = "GET" + } + + request, err := http.NewRequest(method, url, nil) + if err != nil { + return err + } + + if (check.Username != "") || (check.Password != "") { + request.SetBasicAuth(check.Username, check.Password) + } + for header, value := range check.Headers { + request.Header.Add(header, value) + } + if check.HostHeader != "" { + request.Host = check.HostHeader + } + + response, err := check.client.Do(request) + if err != nil { + return err + } + + defer response.Body.Close() + if response.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(response.Body, 200)) + return fmt.Errorf("%s returned HTTP status %s: %q", url, response.Status, body) + } + + err = json.NewDecoder(response.Body).Decode(value) + if err != nil { + return err + } + + return nil +} + +func (check *NginxUpstreamCheck) Gather(accumulator telegraf.Accumulator) error { + if check.client == nil { + client, err := check.createHttpClient() + + if err != nil { + return err + } + check.client = client + } + + statusURL, err := url.Parse(check.URL) + if err != nil { + return err + } + + err = check.gatherStatusData(statusURL.String(), accumulator) + if err != nil { + return err + } + + return nil + +} + +func (check *NginxUpstreamCheck) gatherStatusData(url string, accumulator telegraf.Accumulator) error { + checkData := &NginxUpstreamCheckData{} + + err := check.gatherJsonData(url, checkData) + if err != nil { + return err + } + + for _, server := range checkData.Servers.Server { + + tags := map[string]string{ + "upstream": server.Upstream, + "type": server.Type, + "name": server.Name, + "port": strconv.Itoa(int(server.Port)), + "url": url, + } + + fields := map[string]interface{}{ + "status": server.Status, + "status_code": check.getStatusCode(server.Status), + "rise": server.Rise, + "fall": server.Fall, + } + + accumulator.AddFields("nginx_upstream_check", fields, tags) + } + + return nil +} + +func (check *NginxUpstreamCheck) getStatusCode(status string) uint8 { + switch status { + case "up": + return 1 + case "down": + return 2 + default: + return 0 + } +} diff --git a/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go new file mode 100644 index 000000000..1b70770d0 --- /dev/null +++ b/plugins/inputs/nginx_upstream_check/nginx_upstream_check_test.go @@ -0,0 +1,135 @@ +package nginx_upstream_check + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const sampleStatusResponse = ` +{ + "servers": { + "total": 2, + "generation": 1, + "server": [ + { + "index": 0, + "upstream": "upstream-1", + "name": "127.0.0.1:8081", + "status": "up", + "rise": 1000, + "fall": 0, + "type": "http", + "port": 0 + }, + { + "index": 1, + "upstream": "upstream-2", + "name": "127.0.0.1:8082", + "status": "down", + "rise": 0, + "fall": 2000, + "type": "tcp", + "port": 8080 + } + ] + } +} +` + +func TestNginxUpstreamCheckData(test *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { + var response string + + if request.URL.Path == "/status" { + response = sampleStatusResponse + responseWriter.Header()["Content-Type"] = []string{"application/json"} + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(responseWriter, response) + })) + defer testServer.Close() + + check := NewNginxUpstreamCheck() + check.URL = fmt.Sprintf("%s/status", testServer.URL) + + var accumulator testutil.Accumulator + + checkError := check.Gather(&accumulator) + require.NoError(test, checkError) + + accumulator.AssertContainsTaggedFields( + test, + "nginx_upstream_check", + map[string]interface{}{ + "status": string("up"), + "status_code": uint8(1), + "rise": uint64(1000), + "fall": uint64(0), + }, + map[string]string{ + "upstream": string("upstream-1"), + "type": string("http"), + "name": string("127.0.0.1:8081"), + "port": string("0"), + "url": fmt.Sprintf("%s/status", testServer.URL), + }) + + accumulator.AssertContainsTaggedFields( + test, + "nginx_upstream_check", + map[string]interface{}{ + "status": string("down"), + "status_code": uint8(2), + "rise": uint64(0), + "fall": uint64(2000), + }, + map[string]string{ + "upstream": string("upstream-2"), + "type": string("tcp"), + "name": string("127.0.0.1:8082"), + "port": string("8080"), + "url": fmt.Sprintf("%s/status", testServer.URL), + }) +} + +func TestNginxUpstreamCheckRequest(test *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(responseWriter http.ResponseWriter, request *http.Request) { + var response string + + if request.URL.Path == "/status" { + response = sampleStatusResponse + responseWriter.Header()["Content-Type"] = []string{"application/json"} + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(responseWriter, response) + + require.Equal(test, request.Method, "POST") + require.Equal(test, request.Header.Get("X-Test"), "test-value") + require.Equal(test, request.Header.Get("Authorization"), "Basic dXNlcjpwYXNzd29yZA==") + require.Equal(test, request.Host, "status.local") + + })) + defer testServer.Close() + + check := NewNginxUpstreamCheck() + check.URL = fmt.Sprintf("%s/status", testServer.URL) + check.Headers["X-test"] = "test-value" + check.HostHeader = "status.local" + check.Username = "user" + check.Password = "password" + check.Method = "POST" + + var accumulator testutil.Accumulator + + checkError := check.Gather(&accumulator) + require.NoError(test, checkError) +} diff --git a/plugins/inputs/nginx_vts/README.md b/plugins/inputs/nginx_vts/README.md new file mode 100644 index 000000000..ac22b7c2d --- /dev/null +++ b/plugins/inputs/nginx_vts/README.md @@ -0,0 +1,121 @@ +# Telegraf Plugin: nginx_vts + +This plugin gathers Nginx status using external virtual host traffic status module - https://github.com/vozlt/nginx-module-vts. This is an Nginx module that provides access to virtual host status information. It contains the current status such as servers, upstreams, caches. This is similar to the live activity monitoring of Nginx plus. +For module configuration details please see its [documentation](https://github.com/vozlt/nginx-module-vts#synopsis). + +### Configuration: + +``` +# Read nginx status information using nginx-module-vts module +[[inputs.nginx_vts]] + ## An array of Nginx status URIs to gather stats. + urls = ["http://localhost/status"] +``` + +### Measurements & Fields: + +- nginx_vts_connections + - active + - reading + - writing + - waiting + - accepted + - handled + - requests +- nginx_vts_server, nginx_vts_filter + - requests + - request_time + - in_bytes + - out_bytes + - response_1xx_count + - response_2xx_count + - response_3xx_count + - response_4xx_count + - response_5xx_count + - cache_miss + - cache_bypass + - cache_expired + - cache_stale + - cache_updating + - cache_revalidated + - cache_hit + - cache_scarce +- nginx_vts_upstream + - requests + - request_time + - response_time + - in_bytes + - out_bytes + - response_1xx_count + - response_2xx_count + - response_3xx_count + - response_4xx_count + - response_5xx_count + - weight + - max_fails + - fail_timeout + - backup + - down +- nginx_vts_cache + - max_bytes + - used_bytes + - in_bytes + - out_bytes + - miss + - bypass + - expired + - stale + - updating + - revalidated + - hit + - scarce + + +### Tags: + +- nginx_vts_connections + - source + - port +- nginx_vts_server + - source + - port + - zone +- nginx_vts_filter + - source + - port + - filter_name + - filter_key +- nginx_vts_upstream + - source + - port + - upstream + - upstream_address +- nginx_vts_cache + - source + - port + - zone + + +### Example Output: + +Using this configuration: +``` +[[inputs.nginx_vts]] + ## An array of Nginx status URIs to gather stats. + urls = ["http://localhost/status"] +``` + +When run with: +``` +./telegraf -config telegraf.conf -input-filter nginx_vts -test +``` + +It produces: +``` +nginx_vts_connections,source=localhost,port=80,host=localhost waiting=30i,accepted=295333i,handled=295333i,requests=6833487i,active=33i,reading=0i,writing=3i 1518341521000000000 +nginx_vts_server,zone=example.com,port=80,host=localhost,source=localhost cache_hit=158915i,in_bytes=1935528964i,out_bytes=6531366419i,response_2xx_count=809994i,response_4xx_count=16664i,cache_bypass=0i,cache_stale=0i,cache_revalidated=0i,requests=2187977i,response_1xx_count=0i,response_3xx_count=1360390i,cache_miss=2249i,cache_updating=0i,cache_scarce=0i,request_time=13i,response_5xx_count=929i,cache_expired=0i 1518341521000000000 +nginx_vts_server,host=localhost,source=localhost,port=80,zone=* requests=6775284i,in_bytes=5003242389i,out_bytes=36858233827i,cache_expired=318881i,cache_updating=0i,request_time=51i,response_1xx_count=0i,response_2xx_count=4385916i,response_4xx_count=83680i,response_5xx_count=1186i,cache_bypass=0i,cache_revalidated=0i,cache_hit=1972222i,cache_scarce=0i,response_3xx_count=2304502i,cache_miss=408251i,cache_stale=0i 1518341521000000000 +nginx_vts_filter,filter_key=FI,filter_name=country,port=80,host=localhost,source=localhost request_time=0i,in_bytes=139701i,response_3xx_count=0i,out_bytes=2644495i,response_1xx_count=0i,cache_expired=0i,cache_scarce=0i,requests=179i,cache_miss=0i,cache_bypass=0i,cache_stale=0i,cache_updating=0i,cache_revalidated=0i,cache_hit=0i,response_2xx_count=177i,response_4xx_count=2i,response_5xx_count=0i 1518341521000000000 +nginx_vts_upstream,port=80,host=localhost,upstream=backend_cluster,upstream_address=127.0.0.1:6000,source=localhost fail_timeout=10i,backup=false,request_time=31i,response_5xx_count=1081i,response_2xx_count=1877498i,max_fails=1i,in_bytes=2763336289i,out_bytes=19470265071i,weight=1i,down=false,response_time=31i,response_1xx_count=0i,response_4xx_count=76125i,requests=3379232i,response_3xx_count=1424528i 1518341521000000000 +nginx_vts_cache,source=localhost,port=80,host=localhost,zone=example stale=0i,used_bytes=64334336i,miss=394573i,bypass=0i,expired=318788i,updating=0i,revalidated=0i,hit=689883i,scarce=0i,max_bytes=9223372036854775296i,in_bytes=1111161581i,out_bytes=19175548290i 1518341521000000000 +``` diff --git a/plugins/inputs/nginx_vts/nginx_vts.go b/plugins/inputs/nginx_vts/nginx_vts.go new file mode 100644 index 000000000..f9372eabd --- /dev/null +++ b/plugins/inputs/nginx_vts/nginx_vts.go @@ -0,0 +1,356 @@ +package nginx_vts + +import ( + "bufio" + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type NginxVTS struct { + Urls []string `toml:"urls"` + ResponseTimeout internal.Duration `toml:"response_timeout"` + tls.ClientConfig + + client *http.Client +} + +var sampleConfig = ` + ## An array of ngx_http_status_module or status URI to gather stats. + urls = ["http://localhost/status"] + + ## HTTP response timeout (default: 5s) + response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +func (n *NginxVTS) SampleConfig() string { + return sampleConfig +} + +func (n *NginxVTS) Description() string { + return "Read Nginx virtual host traffic status module information (nginx-module-vts)" +} + +func (n *NginxVTS) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + + // Create an HTTP client that is re-used for each + // collection interval + + if n.client == nil { + client, err := n.createHTTPClient() + if err != nil { + return err + } + n.client = client + } + + for _, u := range n.Urls { + addr, err := url.Parse(u) + if err != nil { + acc.AddError(fmt.Errorf("Unable to parse address '%s': %s", u, err)) + continue + } + + wg.Add(1) + go func(addr *url.URL) { + defer wg.Done() + acc.AddError(n.gatherURL(addr, acc)) + }(addr) + } + + wg.Wait() + return nil +} + +func (n *NginxVTS) createHTTPClient() (*http.Client, error) { + if n.ResponseTimeout.Duration < time.Second { + n.ResponseTimeout.Duration = time.Second * 5 + } + + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + Timeout: n.ResponseTimeout.Duration, + } + + return client, nil +} + +func (n *NginxVTS) gatherURL(addr *url.URL, acc telegraf.Accumulator) error { + resp, err := n.client.Get(addr.String()) + if err != nil { + return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("%s returned HTTP status %s", addr.String(), resp.Status) + } + contentType := strings.Split(resp.Header.Get("Content-Type"), ";")[0] + switch contentType { + case "application/json": + return gatherStatusURL(bufio.NewReader(resp.Body), getTags(addr), acc) + default: + return fmt.Errorf("%s returned unexpected content type %s", addr.String(), contentType) + } +} + +type NginxVTSResponse struct { + Connections struct { + Active uint64 `json:"active"` + Reading uint64 `json:"reading"` + Writing uint64 `json:"writing"` + Waiting uint64 `json:"waiting"` + Accepted uint64 `json:"accepted"` + Handled uint64 `json:"handled"` + Requests uint64 `json:"requests"` + } `json:"connections"` + ServerZones map[string]Server `json:"serverZones"` + FilterZones map[string]map[string]Server `json:"filterZones"` + UpstreamZones map[string][]Upstream `json:"upstreamZones"` + CacheZones map[string]Cache `json:"cacheZones"` +} + +type Server struct { + RequestCounter uint64 `json:"requestCounter"` + InBytes uint64 `json:"inBytes"` + OutBytes uint64 `json:"outBytes"` + RequestMsec uint64 `json:"requestMsec"` + Responses struct { + OneXx uint64 `json:"1xx"` + TwoXx uint64 `json:"2xx"` + ThreeXx uint64 `json:"3xx"` + FourXx uint64 `json:"4xx"` + FiveXx uint64 `json:"5xx"` + Miss uint64 `json:"miss"` + Bypass uint64 `json:"bypass"` + Expired uint64 `json:"expired"` + Stale uint64 `json:"stale"` + Updating uint64 `json:"updating"` + Revalidated uint64 `json:"revalidated"` + Hit uint64 `json:"hit"` + Scarce uint64 `json:"scarce"` + } `json:"responses"` +} + +type Upstream struct { + Server string `json:"server"` + RequestCounter uint64 `json:"requestCounter"` + InBytes uint64 `json:"inBytes"` + OutBytes uint64 `json:"outBytes"` + Responses struct { + OneXx uint64 `json:"1xx"` + TwoXx uint64 `json:"2xx"` + ThreeXx uint64 `json:"3xx"` + FourXx uint64 `json:"4xx"` + FiveXx uint64 `json:"5xx"` + } `json:"responses"` + ResponseMsec uint64 `json:"responseMsec"` + RequestMsec uint64 `json:"requestMsec"` + Weight uint64 `json:"weight"` + MaxFails uint64 `json:"maxFails"` + FailTimeout uint64 `json:"failTimeout"` + Backup bool `json:"backup"` + Down bool `json:"down"` +} + +type Cache struct { + MaxSize uint64 `json:"maxSize"` + UsedSize uint64 `json:"usedSize"` + InBytes uint64 `json:"inBytes"` + OutBytes uint64 `json:"outBytes"` + Responses struct { + Miss uint64 `json:"miss"` + Bypass uint64 `json:"bypass"` + Expired uint64 `json:"expired"` + Stale uint64 `json:"stale"` + Updating uint64 `json:"updating"` + Revalidated uint64 `json:"revalidated"` + Hit uint64 `json:"hit"` + Scarce uint64 `json:"scarce"` + } `json:"responses"` +} + +func gatherStatusURL(r *bufio.Reader, tags map[string]string, acc telegraf.Accumulator) error { + dec := json.NewDecoder(r) + status := &NginxVTSResponse{} + if err := dec.Decode(status); err != nil { + return fmt.Errorf("Error while decoding JSON response") + } + + acc.AddFields("nginx_vts_connections", map[string]interface{}{ + "active": status.Connections.Active, + "reading": status.Connections.Reading, + "writing": status.Connections.Writing, + "waiting": status.Connections.Waiting, + "accepted": status.Connections.Accepted, + "handled": status.Connections.Handled, + "requests": status.Connections.Requests, + }, tags) + + for zoneName, zone := range status.ServerZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + + acc.AddFields("nginx_vts_server", map[string]interface{}{ + "requests": zone.RequestCounter, + "request_time": zone.RequestMsec, + "in_bytes": zone.InBytes, + "out_bytes": zone.OutBytes, + + "response_1xx_count": zone.Responses.OneXx, + "response_2xx_count": zone.Responses.TwoXx, + "response_3xx_count": zone.Responses.ThreeXx, + "response_4xx_count": zone.Responses.FourXx, + "response_5xx_count": zone.Responses.FiveXx, + + "cache_miss": zone.Responses.Miss, + "cache_bypass": zone.Responses.Bypass, + "cache_expired": zone.Responses.Expired, + "cache_stale": zone.Responses.Stale, + "cache_updating": zone.Responses.Updating, + "cache_revalidated": zone.Responses.Revalidated, + "cache_hit": zone.Responses.Hit, + "cache_scarce": zone.Responses.Scarce, + }, zoneTags) + } + + for filterName, filters := range status.FilterZones { + for filterKey, upstream := range filters { + filterTags := map[string]string{} + for k, v := range tags { + filterTags[k] = v + } + filterTags["filter_key"] = filterKey + filterTags["filter_name"] = filterName + + acc.AddFields("nginx_vts_filter", map[string]interface{}{ + "requests": upstream.RequestCounter, + "request_time": upstream.RequestMsec, + "in_bytes": upstream.InBytes, + "out_bytes": upstream.OutBytes, + + "response_1xx_count": upstream.Responses.OneXx, + "response_2xx_count": upstream.Responses.TwoXx, + "response_3xx_count": upstream.Responses.ThreeXx, + "response_4xx_count": upstream.Responses.FourXx, + "response_5xx_count": upstream.Responses.FiveXx, + + "cache_miss": upstream.Responses.Miss, + "cache_bypass": upstream.Responses.Bypass, + "cache_expired": upstream.Responses.Expired, + "cache_stale": upstream.Responses.Stale, + "cache_updating": upstream.Responses.Updating, + "cache_revalidated": upstream.Responses.Revalidated, + "cache_hit": upstream.Responses.Hit, + "cache_scarce": upstream.Responses.Scarce, + }, filterTags) + } + } + + for upstreamName, upstreams := range status.UpstreamZones { + for _, upstream := range upstreams { + upstreamServerTags := map[string]string{} + for k, v := range tags { + upstreamServerTags[k] = v + } + upstreamServerTags["upstream"] = upstreamName + upstreamServerTags["upstream_address"] = upstream.Server + acc.AddFields("nginx_vts_upstream", map[string]interface{}{ + "requests": upstream.RequestCounter, + "request_time": upstream.RequestMsec, + "response_time": upstream.ResponseMsec, + "in_bytes": upstream.InBytes, + "out_bytes": upstream.OutBytes, + + "response_1xx_count": upstream.Responses.OneXx, + "response_2xx_count": upstream.Responses.TwoXx, + "response_3xx_count": upstream.Responses.ThreeXx, + "response_4xx_count": upstream.Responses.FourXx, + "response_5xx_count": upstream.Responses.FiveXx, + + "weight": upstream.Weight, + "max_fails": upstream.MaxFails, + "fail_timeout": upstream.FailTimeout, + "backup": upstream.Backup, + "down": upstream.Down, + }, upstreamServerTags) + } + } + + for zoneName, zone := range status.CacheZones { + zoneTags := map[string]string{} + for k, v := range tags { + zoneTags[k] = v + } + zoneTags["zone"] = zoneName + + acc.AddFields("nginx_vts_cache", map[string]interface{}{ + "max_bytes": zone.MaxSize, + "used_bytes": zone.UsedSize, + "in_bytes": zone.InBytes, + "out_bytes": zone.OutBytes, + + "miss": zone.Responses.Miss, + "bypass": zone.Responses.Bypass, + "expired": zone.Responses.Expired, + "stale": zone.Responses.Stale, + "updating": zone.Responses.Updating, + "revalidated": zone.Responses.Revalidated, + "hit": zone.Responses.Hit, + "scarce": zone.Responses.Scarce, + }, zoneTags) + } + + return nil +} + +// Get tag(s) for the nginx plugin +func getTags(addr *url.URL) map[string]string { + h := addr.Host + host, port, err := net.SplitHostPort(h) + if err != nil { + host = addr.Host + if addr.Scheme == "http" { + port = "80" + } else if addr.Scheme == "https" { + port = "443" + } else { + port = "" + } + } + return map[string]string{"source": host, "port": port} +} + +func init() { + inputs.Add("nginx_vts", func() telegraf.Input { + return &NginxVTS{} + }) +} diff --git a/plugins/inputs/nginx_vts/nginx_vts_test.go b/plugins/inputs/nginx_vts/nginx_vts_test.go new file mode 100644 index 000000000..085fc3843 --- /dev/null +++ b/plugins/inputs/nginx_vts/nginx_vts_test.go @@ -0,0 +1,486 @@ +package nginx_vts + +import ( + "fmt" + "net" + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const sampleStatusResponse = ` +{ + "hostName": "test.example.com", + "nginxVersion": "1.12.2", + "loadMsec": 1518180328331, + "nowMsec": 1518256058416, + "connections": { + "active": 111, + "reading": 222, + "writing": 333, + "waiting": 444, + "accepted": 555, + "handled": 666, + "requests": 777 + }, + "serverZones": { + "example.com": { + "requestCounter": 1415887, + "inBytes": 1296356607, + "outBytes": 4404939605, + "responses": { + "1xx": 100, + "2xx": 200, + "3xx": 300, + "4xx": 400, + "5xx": 500, + "miss": 14, + "bypass": 15, + "expired": 16, + "stale": 17, + "updating": 18, + "revalidated": 19, + "hit": 20, + "scarce": 21 + }, + "requestMsec": 13 + }, + "other.example.com": { + "requestCounter": 505, + "inBytes": 171388, + "outBytes": 1273382, + "responses": { + "1xx": 101, + "2xx": 201, + "3xx": 301, + "4xx": 401, + "5xx": 501, + "miss": 22, + "bypass": 23, + "expired": 24, + "stale": 25, + "updating": 26, + "revalidated": 27, + "hit": 28, + "scarce": 29 + }, + "requestMsec": 12 + } + }, + "filterZones": { + "country": { + "FI": { + "requestCounter": 60, + "inBytes": 2570, + "outBytes": 53597, + "responses": { + "1xx": 106, + "2xx": 206, + "3xx": 306, + "4xx": 406, + "5xx": 506, + "miss": 61, + "bypass": 62, + "expired": 63, + "stale": 64, + "updating": 65, + "revalidated": 66, + "hit": 67, + "scarce": 68 + }, + "requestMsec": 69 + } + } + }, + "upstreamZones": { + "backend_cluster": [ + { + "server": "127.0.0.1:6000", + "requestCounter": 2103849, + "inBytes": 1774680141, + "outBytes": 11727669190, + "responses": { + "1xx": 103, + "2xx": 203, + "3xx": 303, + "4xx": 403, + "5xx": 503 + }, + "requestMsec": 30, + "responseMsec": 31, + "weight": 32, + "maxFails": 33, + "failTimeout": 34, + "backup": false, + "down": false + } + ], + "::nogroups": [ + { + "server": "127.0.0.1:4433", + "requestCounter": 8, + "inBytes": 5013, + "outBytes": 487585, + "responses": { + "1xx": 104, + "2xx": 204, + "3xx": 304, + "4xx": 404, + "5xx": 504 + }, + "requestMsec": 34, + "responseMsec": 35, + "weight": 36, + "maxFails": 37, + "failTimeout": 38, + "backup": true, + "down": false + }, + { + "server": "127.0.0.1:8080", + "requestCounter": 7, + "inBytes": 2926, + "outBytes": 3846638, + "responses": { + "1xx": 105, + "2xx": 205, + "3xx": 305, + "4xx": 405, + "5xx": 505 + }, + "requestMsec": 39, + "responseMsec": 40, + "weight": 41, + "maxFails": 42, + "failTimeout": 43, + "backup": true, + "down": true + } + ] + }, + "cacheZones": { + "example": { + "maxSize": 9223372036854776000, + "usedSize": 68639232, + "inBytes": 697138673, + "outBytes": 11305044106, + "responses": { + "miss": 44, + "bypass": 45, + "expired": 46, + "stale": 47, + "updating": 48, + "revalidated": 49, + "hit": 50, + "scarce": 51 + } + }, + "static": { + "maxSize": 9223372036854776000, + "usedSize": 569856, + "inBytes": 551652333, + "outBytes": 1114889271, + "responses": { + "miss": 52, + "bypass": 53, + "expired": 54, + "stale": 55, + "updating": 56, + "revalidated": 57, + "hit": 58, + "scarce": 59 + } + } + } +} +` + +func TestNginxPlusGeneratesMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + + if r.URL.Path == "/status" { + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &NginxVTS{ + Urls: []string{fmt.Sprintf("%s/status", ts.URL)}, + } + + var acc testutil.Accumulator + + err := n.Gather(&acc) + + require.NoError(t, err) + + addr, err := url.Parse(ts.URL) + if err != nil { + panic(err) + } + + host, port, err := net.SplitHostPort(addr.Host) + if err != nil { + host = addr.Host + if addr.Scheme == "http" { + port = "80" + } else if addr.Scheme == "https" { + port = "443" + } else { + port = "" + } + } + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_connections", + map[string]interface{}{ + "accepted": uint64(555), + "active": uint64(111), + "handled": uint64(666), + "reading": uint64(222), + "requests": uint64(777), + "waiting": uint64(444), + "writing": uint64(333), + }, + map[string]string{ + "source": host, + "port": port, + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_server", + map[string]interface{}{ + "requests": uint64(1415887), + "request_time": uint64(13), + "in_bytes": uint64(1296356607), + "out_bytes": uint64(4404939605), + + "response_1xx_count": uint64(100), + "response_2xx_count": uint64(200), + "response_3xx_count": uint64(300), + "response_4xx_count": uint64(400), + "response_5xx_count": uint64(500), + + "cache_miss": uint64(14), + "cache_bypass": uint64(15), + "cache_expired": uint64(16), + "cache_stale": uint64(17), + "cache_updating": uint64(18), + "cache_revalidated": uint64(19), + "cache_hit": uint64(20), + "cache_scarce": uint64(21), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "example.com", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_filter", + map[string]interface{}{ + "requests": uint64(60), + "request_time": uint64(69), + "in_bytes": uint64(2570), + "out_bytes": uint64(53597), + + "response_1xx_count": uint64(106), + "response_2xx_count": uint64(206), + "response_3xx_count": uint64(306), + "response_4xx_count": uint64(406), + "response_5xx_count": uint64(506), + + "cache_miss": uint64(61), + "cache_bypass": uint64(62), + "cache_expired": uint64(63), + "cache_stale": uint64(64), + "cache_updating": uint64(65), + "cache_revalidated": uint64(66), + "cache_hit": uint64(67), + "cache_scarce": uint64(68), + }, + map[string]string{ + "source": host, + "port": port, + "filter_key": "FI", + "filter_name": "country", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_server", + map[string]interface{}{ + "requests": uint64(505), + "request_time": uint64(12), + "in_bytes": uint64(171388), + "out_bytes": uint64(1273382), + + "response_1xx_count": uint64(101), + "response_2xx_count": uint64(201), + "response_3xx_count": uint64(301), + "response_4xx_count": uint64(401), + "response_5xx_count": uint64(501), + + "cache_miss": uint64(22), + "cache_bypass": uint64(23), + "cache_expired": uint64(24), + "cache_stale": uint64(25), + "cache_updating": uint64(26), + "cache_revalidated": uint64(27), + "cache_hit": uint64(28), + "cache_scarce": uint64(29), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "other.example.com", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_upstream", + map[string]interface{}{ + "requests": uint64(2103849), + "request_time": uint64(30), + "response_time": uint64(31), + "in_bytes": uint64(1774680141), + "out_bytes": uint64(11727669190), + + "response_1xx_count": uint64(103), + "response_2xx_count": uint64(203), + "response_3xx_count": uint64(303), + "response_4xx_count": uint64(403), + "response_5xx_count": uint64(503), + + "weight": uint64(32), + "max_fails": uint64(33), + "fail_timeout": uint64(34), + "backup": bool(false), + "down": bool(false), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "backend_cluster", + "upstream_address": "127.0.0.1:6000", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_upstream", + map[string]interface{}{ + "requests": uint64(8), + "request_time": uint64(34), + "response_time": uint64(35), + "in_bytes": uint64(5013), + "out_bytes": uint64(487585), + + "response_1xx_count": uint64(104), + "response_2xx_count": uint64(204), + "response_3xx_count": uint64(304), + "response_4xx_count": uint64(404), + "response_5xx_count": uint64(504), + + "weight": uint64(36), + "max_fails": uint64(37), + "fail_timeout": uint64(38), + "backup": bool(true), + "down": bool(false), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "::nogroups", + "upstream_address": "127.0.0.1:4433", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_upstream", + map[string]interface{}{ + "requests": uint64(7), + "request_time": uint64(39), + "response_time": uint64(40), + "in_bytes": uint64(2926), + "out_bytes": uint64(3846638), + + "response_1xx_count": uint64(105), + "response_2xx_count": uint64(205), + "response_3xx_count": uint64(305), + "response_4xx_count": uint64(405), + "response_5xx_count": uint64(505), + + "weight": uint64(41), + "max_fails": uint64(42), + "fail_timeout": uint64(43), + "backup": bool(true), + "down": bool(true), + }, + map[string]string{ + "source": host, + "port": port, + "upstream": "::nogroups", + "upstream_address": "127.0.0.1:8080", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_cache", + map[string]interface{}{ + "max_bytes": uint64(9223372036854776000), + "used_bytes": uint64(68639232), + "in_bytes": uint64(697138673), + "out_bytes": uint64(11305044106), + + "miss": uint64(44), + "bypass": uint64(45), + "expired": uint64(46), + "stale": uint64(47), + "updating": uint64(48), + "revalidated": uint64(49), + "hit": uint64(50), + "scarce": uint64(51), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "example", + }) + + acc.AssertContainsTaggedFields( + t, + "nginx_vts_cache", + map[string]interface{}{ + "max_bytes": uint64(9223372036854776000), + "used_bytes": uint64(569856), + "in_bytes": uint64(551652333), + "out_bytes": uint64(1114889271), + + "miss": uint64(52), + "bypass": uint64(53), + "expired": uint64(54), + "stale": uint64(55), + "updating": uint64(56), + "revalidated": uint64(57), + "hit": uint64(58), + "scarce": uint64(59), + }, + map[string]string{ + "source": host, + "port": port, + "zone": "static", + }) +} diff --git a/plugins/inputs/nsq/README.md b/plugins/inputs/nsq/README.md new file mode 100644 index 000000000..00c1089af --- /dev/null +++ b/plugins/inputs/nsq/README.md @@ -0,0 +1,17 @@ +# NSQ Input Plugin + +### Configuration: + +```toml +# Description +[[inputs.nsq]] + ## An array of NSQD HTTP API endpoints + endpoints = ["http://localhost:4151"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` diff --git a/plugins/inputs/nsq/nsq.go b/plugins/inputs/nsq/nsq.go index 1ef47ef05..5eab48ea5 100644 --- a/plugins/inputs/nsq/nsq.go +++ b/plugins/inputs/nsq/nsq.go @@ -33,17 +33,27 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) // Might add Lookupd endpoints for cluster discovery type NSQ struct { Endpoints []string + tls.ClientConfig + httpClient *http.Client } var sampleConfig = ` ## An array of NSQD HTTP API endpoints - endpoints = ["http://localhost:4151"] + endpoints = ["http://localhost:4151"] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` const ( @@ -52,10 +62,14 @@ const ( func init() { inputs.Add("nsq", func() telegraf.Input { - return &NSQ{} + return New() }) } +func New() *NSQ { + return &NSQ{} +} + func (n *NSQ) SampleConfig() string { return sampleConfig } @@ -65,6 +79,15 @@ func (n *NSQ) Description() string { } func (n *NSQ) Gather(acc telegraf.Accumulator) error { + var err error + + if n.httpClient == nil { + n.httpClient, err = n.getHttpClient() + if err != nil { + return err + } + } + var wg sync.WaitGroup for _, e := range n.Endpoints { wg.Add(1) @@ -78,13 +101,19 @@ func (n *NSQ) Gather(acc telegraf.Accumulator) error { return nil } -var tr = &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), -} - -var client = &http.Client{ - Transport: tr, - Timeout: time.Duration(4 * time.Second), +func (n *NSQ) getHttpClient() (*http.Client, error) { + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + tr := &http.Transport{ + TLSClientConfig: tlsConfig, + } + httpClient := &http.Client{ + Transport: tr, + Timeout: time.Duration(4 * time.Second), + } + return httpClient, nil } func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { @@ -92,7 +121,7 @@ func (n *NSQ) gatherEndpoint(e string, acc telegraf.Accumulator) error { if err != nil { return err } - r, err := client.Get(u.String()) + r, err := n.httpClient.Get(u.String()) if err != nil { return fmt.Errorf("Error while polling %s: %s", u.String(), err) } diff --git a/plugins/inputs/nsq/nsq_test.go b/plugins/inputs/nsq/nsq_test.go index f3e9ce868..23af13a4c 100644 --- a/plugins/inputs/nsq/nsq_test.go +++ b/plugins/inputs/nsq/nsq_test.go @@ -19,9 +19,8 @@ func TestNSQStatsV1(t *testing.T) { })) defer ts.Close() - n := &NSQ{ - Endpoints: []string{ts.URL}, - } + n := New() + n.Endpoints = []string{ts.URL} var acc testutil.Accumulator err := acc.GatherError(n.Gather) @@ -152,7 +151,7 @@ func TestNSQStatsV1(t *testing.T) { } } -// v1 version of localhost/stats?format=json reesponse body +// v1 version of localhost/stats?format=json response body var responseV1 = ` { "version": "1.0.0-compat", @@ -276,9 +275,8 @@ func TestNSQStatsPreV1(t *testing.T) { })) defer ts.Close() - n := &NSQ{ - Endpoints: []string{ts.URL}, - } + n := New() + n.Endpoints = []string{ts.URL} var acc testutil.Accumulator err := acc.GatherError(n.Gather) diff --git a/plugins/inputs/nsq_consumer/README.md b/plugins/inputs/nsq_consumer/README.md index 5ac156eec..d1e7194bb 100644 --- a/plugins/inputs/nsq_consumer/README.md +++ b/plugins/inputs/nsq_consumer/README.md @@ -1,23 +1,35 @@ # NSQ Consumer Input Plugin -The [NSQ](http://nsq.io/) consumer plugin polls a specified NSQD -topic and adds messages to InfluxDB. This plugin allows a message to be in any of the supported `data_format` types. +The [NSQ][nsq] consumer plugin reads from NSQD and creates metrics using one +of the supported [input data formats][]. -## Configuration +### Configuration: ```toml # Read metrics from NSQD topic(s) [[inputs.nsq_consumer]] ## Server option still works but is deprecated, we just prepend it to the nsqd array. # server = "localhost:4150" + ## An array representing the NSQD TCP HTTP Endpoints nsqd = ["localhost:4150"] + ## An array representing the NSQLookupd HTTP Endpoints nsqlookupd = ["localhost:4161"] topic = "telegraf" channel = "consumer" max_in_flight = 100 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -25,5 +37,5 @@ topic and adds messages to InfluxDB. This plugin allows a message to be in any o data_format = "influx" ``` -## Testing -The `nsq_consumer_test` mocks out the interaction with `NSQD`. It requires no outside dependencies. +[nsq]: https://nsq.io +[input data formats]: /docs/DATA_FORMATS_INPUT.md diff --git a/plugins/inputs/nsq_consumer/nsq_consumer.go b/plugins/inputs/nsq_consumer/nsq_consumer.go index 0823b3ac9..2c25cce7d 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer.go @@ -1,7 +1,8 @@ package nsq_consumer import ( - "fmt" + "context" + "sync" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" @@ -9,30 +10,67 @@ import ( nsq "github.com/nsqio/go-nsq" ) +const ( + defaultMaxUndeliveredMessages = 1000 +) + +type empty struct{} +type semaphore chan empty + +type logger struct { + log telegraf.Logger +} + +func (l *logger) Output(calldepth int, s string) error { + l.log.Debug(s) + return nil +} + //NSQConsumer represents the configuration of the plugin type NSQConsumer struct { - Server string - Nsqd []string - Nsqlookupd []string - Topic string - Channel string - MaxInFlight int - parser parsers.Parser - consumer *nsq.Consumer - acc telegraf.Accumulator + Server string `toml:"server"` + Nsqd []string `toml:"nsqd"` + Nsqlookupd []string `toml:"nsqlookupd"` + Topic string `toml:"topic"` + Channel string `toml:"channel"` + MaxInFlight int `toml:"max_in_flight"` + + MaxUndeliveredMessages int `toml:"max_undelivered_messages"` + + parser parsers.Parser + consumer *nsq.Consumer + + Log telegraf.Logger + + mu sync.Mutex + messages map[telegraf.TrackingID]*nsq.Message + wg sync.WaitGroup + cancel context.CancelFunc } var sampleConfig = ` ## Server option still works but is deprecated, we just prepend it to the nsqd array. # server = "localhost:4150" + ## An array representing the NSQD TCP HTTP Endpoints nsqd = ["localhost:4150"] + ## An array representing the NSQLookupd HTTP Endpoints nsqlookupd = ["localhost:4161"] topic = "telegraf" channel = "consumer" max_in_flight = 100 + ## Maximum messages to read from the broker that have not been written by an + ## output. For best throughput set based on the number of metrics within + ## each message and the size of the output's metric_batch_size. + ## + ## For example, if each message from the queue contains 10 metrics and the + ## output metric_batch_size is 1000, setting this to 100 will ensure that a + ## full batch is collected and the write is triggered immediately without + ## waiting until the next flush_interval. + # max_undelivered_messages = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -40,12 +78,6 @@ var sampleConfig = ` data_format = "influx" ` -func init() { - inputs.Add("nsq_consumer", func() telegraf.Input { - return &NSQConsumer{} - }) -} - // SetParser takes the data_format from the config and finds the right parser for that format func (n *NSQConsumer) SetParser(parser parsers.Parser) { n.parser = parser @@ -62,32 +94,88 @@ func (n *NSQConsumer) Description() string { } // Start pulls data from nsq -func (n *NSQConsumer) Start(acc telegraf.Accumulator) error { - n.acc = acc +func (n *NSQConsumer) Start(ac telegraf.Accumulator) error { + acc := ac.WithTracking(n.MaxUndeliveredMessages) + sem := make(semaphore, n.MaxUndeliveredMessages) + n.messages = make(map[telegraf.TrackingID]*nsq.Message, n.MaxUndeliveredMessages) + + ctx, cancel := context.WithCancel(context.Background()) + n.cancel = cancel + n.connect() - n.consumer.AddConcurrentHandlers(nsq.HandlerFunc(func(message *nsq.Message) error { + n.consumer.SetLogger(&logger{log: n.Log}, nsq.LogLevelInfo) + n.consumer.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error { metrics, err := n.parser.Parse(message.Body) if err != nil { - acc.AddError(fmt.Errorf("E! NSQConsumer Parse Error\nmessage:%s\nerror:%s", string(message.Body), err.Error())) + acc.AddError(err) + // Remove the message from the queue + message.Finish() return nil } - for _, metric := range metrics { - n.acc.AddFields(metric.Name(), metric.Fields(), metric.Tags(), metric.Time()) + if len(metrics) == 0 { + message.Finish() + return nil } - message.Finish() + + select { + case <-ctx.Done(): + return ctx.Err() + case sem <- empty{}: + break + } + + n.mu.Lock() + id := acc.AddTrackingMetricGroup(metrics) + n.messages[id] = message + n.mu.Unlock() + message.DisableAutoResponse() return nil - }), n.MaxInFlight) + })) if len(n.Nsqlookupd) > 0 { n.consumer.ConnectToNSQLookupds(n.Nsqlookupd) } n.consumer.ConnectToNSQDs(append(n.Nsqd, n.Server)) + + n.wg.Add(1) + go func() { + defer n.wg.Done() + n.onDelivery(ctx, acc, sem) + }() return nil } +func (n *NSQConsumer) onDelivery(ctx context.Context, acc telegraf.TrackingAccumulator, sem semaphore) { + for { + select { + case <-ctx.Done(): + return + case info := <-acc.Delivered(): + n.mu.Lock() + msg, ok := n.messages[info.ID()] + if !ok { + n.mu.Unlock() + continue + } + <-sem + delete(n.messages, info.ID()) + n.mu.Unlock() + + if info.Delivered() { + msg.Finish() + } else { + msg.Requeue(-1) + } + } + } +} + // Stop processing messages func (n *NSQConsumer) Stop() { + n.cancel() + n.wg.Wait() n.consumer.Stop() + <-n.consumer.StopChan } // Gather is a noop @@ -107,3 +195,11 @@ func (n *NSQConsumer) connect() error { } return nil } + +func init() { + inputs.Add("nsq_consumer", func() telegraf.Input { + return &NSQConsumer{ + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + } + }) +} diff --git a/plugins/inputs/nsq_consumer/nsq_consumer_test.go b/plugins/inputs/nsq_consumer/nsq_consumer_test.go index a6d8c27e5..e07b125cc 100644 --- a/plugins/inputs/nsq_consumer/nsq_consumer_test.go +++ b/plugins/inputs/nsq_consumer/nsq_consumer_test.go @@ -24,23 +24,25 @@ func TestReadsMetricsFromNSQ(t *testing.T) { script := []instruction{ // SUB - instruction{0, nsq.FrameTypeResponse, []byte("OK")}, + {0, nsq.FrameTypeResponse, []byte("OK")}, // IDENTIFY - instruction{0, nsq.FrameTypeResponse, []byte("OK")}, - instruction{20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)}, + {0, nsq.FrameTypeResponse, []byte("OK")}, + {20 * time.Millisecond, nsq.FrameTypeMessage, frameMessage(msg)}, // needed to exit test - instruction{100 * time.Millisecond, -1, []byte("exit")}, + {100 * time.Millisecond, -1, []byte("exit")}, } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:4155") newMockNSQD(script, addr.String()) consumer := &NSQConsumer{ - Server: "127.0.0.1:4155", - Topic: "telegraf", - Channel: "consume", - MaxInFlight: 1, - Nsqd: []string{"127.0.0.1:4155"}, + Log: testutil.Logger{}, + Server: "127.0.0.1:4155", + Topic: "telegraf", + Channel: "consume", + MaxInFlight: 1, + MaxUndeliveredMessages: defaultMaxUndeliveredMessages, + Nsqd: []string{"127.0.0.1:4155"}, } p, _ := parsers.NewInfluxParser() @@ -49,8 +51,6 @@ func TestReadsMetricsFromNSQ(t *testing.T) { assert.Equal(t, 0, len(acc.Metrics), "There should not be any points") if err := consumer.Start(&acc); err != nil { t.Fatal(err.Error()) - } else { - defer consumer.Stop() } waitForPoint(&acc, t) diff --git a/plugins/inputs/nstat/README.md b/plugins/inputs/nstat/README.md index c80f893b9..5d2ca6c0a 100644 --- a/plugins/inputs/nstat/README.md +++ b/plugins/inputs/nstat/README.md @@ -36,6 +36,8 @@ The sample config file # dump_zeros = true ``` +In case that `proc_net_snmp6` path doesn't exist (e.g. IPv6 is not enabled) no error would be raised. + ### Measurements & Fields - nstat diff --git a/plugins/inputs/nstat/nstat.go b/plugins/inputs/nstat/nstat.go index 5096d7b03..e6dcb420f 100644 --- a/plugins/inputs/nstat/nstat.go +++ b/plugins/inputs/nstat/nstat.go @@ -83,13 +83,14 @@ func (ns *Nstat) Gather(acc telegraf.Accumulator) error { return err } - // collect SNMP6 data + // collect SNMP6 data, if SNMP6 directory exists (IPv6 enabled) snmp6, err := ioutil.ReadFile(ns.ProcNetSNMP6) - if err != nil { - return err - } - err = ns.gatherSNMP6(snmp6, acc) - if err != nil { + if err == nil { + err = ns.gatherSNMP6(snmp6, acc) + if err != nil { + return err + } + } else if !os.IsNotExist(err) { return err } return nil diff --git a/plugins/inputs/ntpq/README.md b/plugins/inputs/ntpq/README.md index f6ee8e2af..e691200dd 100644 --- a/plugins/inputs/ntpq/README.md +++ b/plugins/inputs/ntpq/README.md @@ -29,7 +29,7 @@ server (RMS of difference of multiple time samples, milliseconds); ```toml # Get standard NTP query metrics, requires ntpq executable [[inputs.ntpq]] - ## If false, set the -n ntpq flag. Can reduce metric gather times. + ## If false, add -n for ntpq command. Can reduce metric gather times. dns_lookup = true ``` diff --git a/plugins/inputs/ntpq/ntpq.go b/plugins/inputs/ntpq/ntpq.go index ce7bb96d7..80b5dcd0f 100644 --- a/plugins/inputs/ntpq/ntpq.go +++ b/plugins/inputs/ntpq/ntpq.go @@ -21,30 +21,11 @@ var tagHeaders map[string]string = map[string]string{ "t": "type", } -// Mapping of the ntpq tag key to the index in the command output -var tagI map[string]int = map[string]int{ - "remote": -1, - "refid": -1, - "stratum": -1, - "type": -1, -} - -// Mapping of float metrics to their index in the command output -var floatI map[string]int = map[string]int{ - "delay": -1, - "offset": -1, - "jitter": -1, -} - -// Mapping of int metrics to their index in the command output -var intI map[string]int = map[string]int{ - "when": -1, - "poll": -1, - "reach": -1, -} - type NTPQ struct { - runQ func() ([]byte, error) + runQ func() ([]byte, error) + tagI map[string]int + floatI map[string]int + intI map[string]int DNSLookup bool `toml:"dns_lookup"` } @@ -75,6 +56,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { } lineCounter := 0 + numColumns := 0 scanner := bufio.NewScanner(bytes.NewReader(out)) for scanner.Scan() { line := scanner.Text() @@ -96,30 +78,35 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { // If lineCounter == 0, then this is the header line if lineCounter == 0 { + numColumns = len(fields) for i, field := range fields { // Check if field is a tag: if tagKey, ok := tagHeaders[field]; ok { - tagI[tagKey] = i + n.tagI[tagKey] = i continue } // check if field is a float metric: - if _, ok := floatI[field]; ok { - floatI[field] = i + if _, ok := n.floatI[field]; ok { + n.floatI[field] = i continue } // check if field is an int metric: - if _, ok := intI[field]; ok { - intI[field] = i + if _, ok := n.intI[field]; ok { + n.intI[field] = i continue } } } else { + if len(fields) != numColumns { + continue + } + mFields := make(map[string]interface{}) // Get tags from output - for key, index := range tagI { + for key, index := range n.tagI { if index == -1 { continue } @@ -127,7 +114,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { } // Get integer metrics from output - for key, index := range intI { + for key, index := range n.intI { if index == -1 || index >= len(fields) { continue } @@ -177,7 +164,7 @@ func (n *NTPQ) Gather(acc telegraf.Accumulator) error { } // get float metrics from output - for key, index := range floatI { + for key, index := range n.floatI { if index == -1 || index >= len(fields) { continue } @@ -217,10 +204,40 @@ func (n *NTPQ) runq() ([]byte, error) { return cmd.Output() } +func newNTPQ() *NTPQ { + // Mapping of the ntpq tag key to the index in the command output + tagI := map[string]int{ + "remote": -1, + "refid": -1, + "stratum": -1, + "type": -1, + } + + // Mapping of float metrics to their index in the command output + floatI := map[string]int{ + "delay": -1, + "offset": -1, + "jitter": -1, + } + + // Mapping of int metrics to their index in the command output + intI := map[string]int{ + "when": -1, + "poll": -1, + "reach": -1, + } + + n := &NTPQ{ + tagI: tagI, + floatI: floatI, + intI: intI, + } + n.runQ = n.runq + return n +} + func init() { inputs.Add("ntpq", func() telegraf.Input { - n := &NTPQ{} - n.runQ = n.runq - return n + return newNTPQ() }) } diff --git a/plugins/inputs/ntpq/ntpq_test.go b/plugins/inputs/ntpq/ntpq_test.go index 47b8cf8f4..b0db77e45 100644 --- a/plugins/inputs/ntpq/ntpq_test.go +++ b/plugins/inputs/ntpq/ntpq_test.go @@ -3,10 +3,12 @@ package ntpq import ( "fmt" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSingleNTPQ(t *testing.T) { @@ -14,9 +16,8 @@ func TestSingleNTPQ(t *testing.T) { ret: []byte(singleNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -39,43 +40,13 @@ func TestSingleNTPQ(t *testing.T) { acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) } -func TestMissingJitterField(t *testing.T) { - tt := tester{ - ret: []byte(missingJitterField), - err: nil, - } - n := &NTPQ{ - runQ: tt.runqTest, - } - - acc := testutil.Accumulator{} - assert.NoError(t, acc.GatherError(n.Gather)) - - fields := map[string]interface{}{ - "when": int64(101), - "poll": int64(256), - "reach": int64(37), - "delay": float64(51.016), - "offset": float64(233.010), - } - tags := map[string]string{ - "remote": "uschi5-ntp-002.", - "state_prefix": "*", - "refid": "10.177.80.46", - "stratum": "2", - "type": "u", - } - acc.AssertContainsTaggedFields(t, "ntpq", fields, tags) -} - func TestBadIntNTPQ(t *testing.T) { tt := tester{ ret: []byte(badIntParseNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.Error(t, acc.GatherError(n.Gather)) @@ -102,9 +73,8 @@ func TestBadFloatNTPQ(t *testing.T) { ret: []byte(badFloatParseNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.Error(t, acc.GatherError(n.Gather)) @@ -131,9 +101,8 @@ func TestDaysNTPQ(t *testing.T) { ret: []byte(whenDaysNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -161,9 +130,8 @@ func TestHoursNTPQ(t *testing.T) { ret: []byte(whenHoursNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -191,9 +159,8 @@ func TestMinutesNTPQ(t *testing.T) { ret: []byte(whenMinutesNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -221,9 +188,8 @@ func TestBadWhenNTPQ(t *testing.T) { ret: []byte(whenBadNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.Error(t, acc.GatherError(n.Gather)) @@ -253,9 +219,8 @@ func TestParserNTPQ(t *testing.T) { err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -316,9 +281,8 @@ func TestMultiNTPQ(t *testing.T) { ret: []byte(multiNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -357,14 +321,12 @@ func TestMultiNTPQ(t *testing.T) { } func TestBadHeaderNTPQ(t *testing.T) { - resetVars() tt := tester{ ret: []byte(badHeaderNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -387,14 +349,12 @@ func TestBadHeaderNTPQ(t *testing.T) { } func TestMissingDelayColumnNTPQ(t *testing.T) { - resetVars() tt := tester{ ret: []byte(missingDelayNTPQ), err: nil, } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.NoError(t, acc.GatherError(n.Gather)) @@ -420,14 +380,68 @@ func TestFailedNTPQ(t *testing.T) { ret: []byte(singleNTPQ), err: fmt.Errorf("Test failure"), } - n := &NTPQ{ - runQ: tt.runqTest, - } + n := newNTPQ() + n.runQ = tt.runqTest acc := testutil.Accumulator{} assert.Error(t, acc.GatherError(n.Gather)) } +// It is possible for the output of ntqp to be missing the refid column. This +// is believed to be http://bugs.ntp.org/show_bug.cgi?id=3484 which is fixed +// in ntp-4.2.8p12 (included first in Debian Buster). +func TestNoRefID(t *testing.T) { + now := time.Now() + expected := []telegraf.Metric{ + testutil.MustMetric("ntpq", + map[string]string{ + "refid": "10.177.80.37", + "remote": "83.137.98.96", + "stratum": "2", + "type": "u", + }, + map[string]interface{}{ + "delay": float64(54.033), + "jitter": float64(449514), + "offset": float64(243.426), + "poll": int64(1024), + "reach": int64(377), + "when": int64(740), + }, + now), + testutil.MustMetric("ntpq", + map[string]string{ + "refid": "10.177.80.37", + "remote": "131.188.3.221", + "stratum": "2", + "type": "u", + }, + map[string]interface{}{ + "delay": float64(111.820), + "jitter": float64(449528), + "offset": float64(261.921), + "poll": int64(1024), + "reach": int64(377), + "when": int64(783), + }, + now), + } + + tt := tester{ + ret: []byte(noRefID), + err: nil, + } + n := newNTPQ() + n.runQ = tt.runqTest + + acc := testutil.Accumulator{ + TimeFunc: func() time.Time { return now }, + } + + require.NoError(t, acc.GatherError(n.Gather)) + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + type tester struct { ret []byte err error @@ -437,48 +451,11 @@ func (t *tester) runqTest() ([]byte, error) { return t.ret, t.err } -func resetVars() { - // Mapping of ntpq header names to tag keys - tagHeaders = map[string]string{ - "remote": "remote", - "refid": "refid", - "st": "stratum", - "t": "type", - } - - // Mapping of the ntpq tag key to the index in the command output - tagI = map[string]int{ - "remote": -1, - "refid": -1, - "stratum": -1, - "type": -1, - } - - // Mapping of float metrics to their index in the command output - floatI = map[string]int{ - "delay": -1, - "offset": -1, - "jitter": -1, - } - - // Mapping of int metrics to their index in the command output - intI = map[string]int{ - "when": -1, - "poll": -1, - "reach": -1, - } -} - var singleNTPQ = ` remote refid st t when poll reach delay offset jitter ============================================================================== *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 ` -var missingJitterField = ` remote refid st t when poll reach delay offset jitter -============================================================================== -*uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 -` - var badHeaderNTPQ = `remote refid foobar t when poll reach delay offset jitter ============================================================================== *uschi5-ntp-002. 10.177.80.46 2 u 101 256 37 51.016 233.010 17.462 @@ -527,6 +504,7 @@ var multiNTPQ = ` remote refid st t when poll reach delay 5.9.29.107 10.177.80.37 2 u 703 1024 377 205.704 160.406 449602. 91.189.94.4 10.177.80.37 2 u 673 1024 377 143.047 274.726 449445. ` + var multiParserNTPQ = ` remote refid st t when poll reach delay offset jitter ============================================================================== *SHM(0) .PPS. 1 u 60 64 377 0.000 0.045 1.012 @@ -535,3 +513,10 @@ var multiParserNTPQ = ` remote refid st t when poll reach d +37.58.57.238 ( 192.53.103.103 2 u 10 1024 377 1.748 0.373 0.101 -SHM(1) .GPS. 1 u 121 128 377 0.000 10.105 2.012 ` + +var noRefID = ` remote refid st t when poll reach delay offset jitter +============================================================================== + 83.137.98.96 10.177.80.37 2 u 740 1024 377 54.033 243.426 449514. + 91.189.94.4 2 u 673 1024 377 143.047 274.726 449445. + 131.188.3.221 10.177.80.37 2 u 783 1024 377 111.820 261.921 449528. +` diff --git a/plugins/inputs/nvidia_smi/README.md b/plugins/inputs/nvidia_smi/README.md index 5d0cc99c6..892381cd5 100644 --- a/plugins/inputs/nvidia_smi/README.md +++ b/plugins/inputs/nvidia_smi/README.md @@ -7,13 +7,20 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid ```toml # Pulls statistics from nvidia GPUs attached to the host [[inputs.nvidia_smi]] -## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath -# bin_path = /usr/bin/nvidia-smi + ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + # bin_path = "/usr/bin/nvidia-smi" -## Optional: timeout for GPU polling -# timeout = 5s + ## Optional: timeout for GPU polling + # timeout = "5s" ``` +#### Windows + +On Windows, `nvidia-smi` is generally located at `C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe` +On Windows 10, you may also find this located here `C:\Windows\System32\nvidia-smi.exe` + +You'll need to escape the `\` within the `telegraf.conf` like this: `C:\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe` + ### Metrics - measurement: `nvidia_smi` - tags @@ -31,6 +38,15 @@ This plugin uses a query on the [`nvidia-smi`](https://developer.nvidia.com/nvid - `temperature_gpu` (integer, degrees C) - `utilization_gpu` (integer, percentage) - `utilization_memory` (integer, percentage) + - `pcie_link_gen_current` (integer) + - `pcie_link_width_current` (integer) + - `encoder_stats_session_count` (integer) + - `encoder_stats_average_fps` (integer) + - `encoder_stats_average_latency` (integer) + - `clocks_current_graphics` (integer, MHz) + - `clocks_current_sm` (integer, MHz) + - `clocks_current_memory` (integer, MHz) + - `clocks_current_video` (integer, MHz) ### Sample Query @@ -40,9 +56,29 @@ The below query could be used to alert on the average temperature of the your GP SELECT mean("temperature_gpu") FROM "nvidia_smi" WHERE time > now() - 5m GROUP BY time(1m), "index", "name", "host" ``` +### Troubleshooting + +Check the full output by running `nvidia-smi` binary manually. + +Linux: +``` +sudo -u telegraf -- /usr/bin/nvidia-smi -q -x +``` + +Windows: +``` +"C:\Program Files\NVIDIA Corporation\NVSMI\nvidia-smi.exe" -q -x +``` + +Please include the output of this command if opening an GitHub issue. + ### Example Output ``` nvidia_smi,compute_mode=Default,host=8218cf,index=0,name=GeForce\ GTX\ 1070,pstate=P2,uuid=GPU-823bc202-6279-6f2c-d729-868a30f14d96 fan_speed=100i,memory_free=7563i,memory_total=8112i,memory_used=549i,temperature_gpu=53i,utilization_gpu=100i,utilization_memory=90i 1523991122000000000 nvidia_smi,compute_mode=Default,host=8218cf,index=1,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=50i,utilization_gpu=100i,utilization_memory=85i 1523991122000000000 nvidia_smi,compute_mode=Default,host=8218cf,index=2,name=GeForce\ GTX\ 1080,pstate=P2,uuid=GPU-d4cfc28d-0481-8d07-b81a-ddfc63d74adf fan_speed=100i,memory_free=7557i,memory_total=8114i,memory_used=557i,temperature_gpu=58i,utilization_gpu=100i,utilization_memory=86i 1523991122000000000 ``` + +### Limitations +Note that there seems to be an issue with getting current memory clock values when the memory is overclocked. +This may or may not apply to everyone but it's confirmed to be an issue on an EVGA 2080 Ti. diff --git a/plugins/inputs/nvidia_smi/nvidia_smi.go b/plugins/inputs/nvidia_smi/nvidia_smi.go index 84784b765..b21e390c6 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi.go @@ -1,7 +1,7 @@ package nvidia_smi import ( - "bufio" + "encoding/xml" "fmt" "os" "os/exec" @@ -14,32 +14,12 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -var ( - measurement = "nvidia_smi" - metrics = "fan.speed,memory.total,memory.used,memory.free,pstate,temperature.gpu,name,uuid,compute_mode,utilization.gpu,utilization.memory,index,power.draw" - metricNames = [][]string{ - []string{"fan_speed", "integer"}, - []string{"memory_total", "integer"}, - []string{"memory_used", "integer"}, - []string{"memory_free", "integer"}, - []string{"pstate", "tag"}, - []string{"temperature_gpu", "integer"}, - []string{"name", "tag"}, - []string{"uuid", "tag"}, - []string{"compute_mode", "tag"}, - []string{"utilization_gpu", "integer"}, - []string{"utilization_memory", "integer"}, - []string{"index", "tag"}, - []string{"power_draw", "float"}, - } -) +const measurement = "nvidia_smi" // NvidiaSMI holds the methods for this plugin type NvidiaSMI struct { BinPath string Timeout internal.Duration - - metrics string } // Description returns the description of the NvidiaSMI plugin @@ -50,17 +30,16 @@ func (smi *NvidiaSMI) Description() string { // SampleConfig returns the sample configuration for the NvidiaSMI plugin func (smi *NvidiaSMI) SampleConfig() string { return ` -## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath -# bin_path = /usr/bin/nvidia-smi + ## Optional: path to nvidia-smi binary, defaults to $PATH via exec.LookPath + # bin_path = "/usr/bin/nvidia-smi" -## Optional: timeout for GPU polling -# timeout = 5s + ## Optional: timeout for GPU polling + # timeout = "5s" ` } // Gather implements the telegraf interface func (smi *NvidiaSMI) Gather(acc telegraf.Accumulator) error { - if _, err := os.Stat(smi.BinPath); os.IsNotExist(err) { return fmt.Errorf("nvidia-smi binary not at path %s, cannot gather GPU data", smi.BinPath) } @@ -83,84 +62,178 @@ func init() { return &NvidiaSMI{ BinPath: "/usr/bin/nvidia-smi", Timeout: internal.Duration{Duration: 5 * time.Second}, - metrics: metrics, } }) } -func (smi *NvidiaSMI) pollSMI() (string, error) { +func (smi *NvidiaSMI) pollSMI() ([]byte, error) { // Construct and execute metrics query - opts := []string{"--format=noheader,nounits,csv", fmt.Sprintf("--query-gpu=%s", smi.metrics)} - ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, opts...), smi.Timeout.Duration) + ret, err := internal.CombinedOutputTimeout(exec.Command(smi.BinPath, "-q", "-x"), smi.Timeout.Duration) if err != nil { - return "", err + return nil, err } - return string(ret), nil + return ret, nil } -func gatherNvidiaSMI(ret string, acc telegraf.Accumulator) error { - // First split the lines up and handle each one - scanner := bufio.NewScanner(strings.NewReader(ret)) - for scanner.Scan() { - tags, fields, err := parseLine(scanner.Text()) - if err != nil { - return err - } - acc.AddFields(measurement, fields, tags) +func gatherNvidiaSMI(ret []byte, acc telegraf.Accumulator) error { + smi := &SMI{} + err := xml.Unmarshal(ret, smi) + if err != nil { + return err } - if err := scanner.Err(); err != nil { - return fmt.Errorf("Error scanning text %s", ret) + metrics := smi.genTagsFields() + + for _, metric := range metrics { + acc.AddFields(measurement, metric.fields, metric.tags) } return nil } -func parseLine(line string) (map[string]string, map[string]interface{}, error) { - tags := make(map[string]string, 0) - fields := make(map[string]interface{}, 0) +type metric struct { + tags map[string]string + fields map[string]interface{} +} - // Next split up the comma delimited metrics - met := strings.Split(line, ",") - - // Make sure there are as many metrics in the line as there were queried. - if len(met) == len(metricNames) { - for i, m := range metricNames { - col := strings.TrimSpace(met[i]) - - // Handle the tags - if m[1] == "tag" { - tags[m[0]] = col - continue - } - - if strings.Contains(col, "[Not Supported]") { - continue - } - - // Parse the integers - if m[1] == "integer" { - out, err := strconv.ParseInt(col, 10, 64) - if err != nil { - return tags, fields, err - } - fields[m[0]] = out - } - - // Parse the floats - if m[1] == "float" { - out, err := strconv.ParseFloat(col, 64) - if err != nil { - return tags, fields, err - } - fields[m[0]] = out - } +func (s *SMI) genTagsFields() []metric { + metrics := []metric{} + for i, gpu := range s.GPU { + tags := map[string]string{ + "index": strconv.Itoa(i), } + fields := map[string]interface{}{} - // Return the tags and fields - return tags, fields, nil + setTagIfUsed(tags, "pstate", gpu.PState) + setTagIfUsed(tags, "name", gpu.ProdName) + setTagIfUsed(tags, "uuid", gpu.UUID) + setTagIfUsed(tags, "compute_mode", gpu.ComputeMode) + + setIfUsed("int", fields, "fan_speed", gpu.FanSpeed) + setIfUsed("int", fields, "memory_total", gpu.Memory.Total) + setIfUsed("int", fields, "memory_used", gpu.Memory.Used) + setIfUsed("int", fields, "memory_free", gpu.Memory.Free) + setIfUsed("int", fields, "temperature_gpu", gpu.Temp.GPUTemp) + setIfUsed("int", fields, "utilization_gpu", gpu.Utilization.GPU) + setIfUsed("int", fields, "utilization_memory", gpu.Utilization.Memory) + setIfUsed("int", fields, "pcie_link_gen_current", gpu.PCI.LinkInfo.PCIEGen.CurrentLinkGen) + setIfUsed("int", fields, "pcie_link_width_current", gpu.PCI.LinkInfo.LinkWidth.CurrentLinkWidth) + setIfUsed("int", fields, "encoder_stats_session_count", gpu.Encoder.SessionCount) + setIfUsed("int", fields, "encoder_stats_average_fps", gpu.Encoder.AverageFPS) + setIfUsed("int", fields, "encoder_stats_average_latency", gpu.Encoder.AverageLatency) + setIfUsed("int", fields, "clocks_current_graphics", gpu.Clocks.Graphics) + setIfUsed("int", fields, "clocks_current_sm", gpu.Clocks.SM) + setIfUsed("int", fields, "clocks_current_memory", gpu.Clocks.Memory) + setIfUsed("int", fields, "clocks_current_video", gpu.Clocks.Video) + + setIfUsed("float", fields, "power_draw", gpu.Power.PowerDraw) + metrics = append(metrics, metric{tags, fields}) + } + return metrics +} + +func setTagIfUsed(m map[string]string, k, v string) { + if v != "" { + m[k] = v + } +} + +func setIfUsed(t string, m map[string]interface{}, k, v string) { + vals := strings.Fields(v) + if len(vals) < 1 { + return } - // If the line is empty return an emptyline error - return tags, fields, fmt.Errorf("Different number of metrics returned (%d) than expeced (%d)", len(met), len(metricNames)) + val := vals[0] + if k == "pcie_link_width_current" { + val = strings.TrimSuffix(vals[0], "x") + } + + switch t { + case "float": + if val != "" { + f, err := strconv.ParseFloat(val, 64) + if err == nil { + m[k] = f + } + } + case "int": + if val != "" { + i, err := strconv.Atoi(val) + if err == nil { + m[k] = i + } + } + } +} + +// SMI defines the structure for the output of _nvidia-smi -q -x_. +type SMI struct { + GPU GPU `xml:"gpu"` +} + +// GPU defines the structure of the GPU portion of the smi output. +type GPU []struct { + FanSpeed string `xml:"fan_speed"` // int + Memory MemoryStats `xml:"fb_memory_usage"` + PState string `xml:"performance_state"` + Temp TempStats `xml:"temperature"` + ProdName string `xml:"product_name"` + UUID string `xml:"uuid"` + ComputeMode string `xml:"compute_mode"` + Utilization UtilizationStats `xml:"utilization"` + Power PowerReadings `xml:"power_readings"` + PCI PCI `xml:"pci"` + Encoder EncoderStats `xml:"encoder_stats"` + Clocks ClockStats `xml:"clocks"` +} + +// MemoryStats defines the structure of the memory portions in the smi output. +type MemoryStats struct { + Total string `xml:"total"` // int + Used string `xml:"used"` // int + Free string `xml:"free"` // int +} + +// TempStats defines the structure of the temperature portion of the smi output. +type TempStats struct { + GPUTemp string `xml:"gpu_temp"` // int +} + +// UtilizationStats defines the structure of the utilization portion of the smi output. +type UtilizationStats struct { + GPU string `xml:"gpu_util"` // int + Memory string `xml:"memory_util"` // int +} + +// PowerReadings defines the structure of the power_readings portion of the smi output. +type PowerReadings struct { + PowerDraw string `xml:"power_draw"` // float +} + +// PCI defines the structure of the pci portion of the smi output. +type PCI struct { + LinkInfo struct { + PCIEGen struct { + CurrentLinkGen string `xml:"current_link_gen"` // int + } `xml:"pcie_gen"` + LinkWidth struct { + CurrentLinkWidth string `xml:"current_link_width"` // int + } `xml:"link_widths"` + } `xml:"pci_gpu_link_info"` +} + +// EncoderStats defines the structure of the encoder_stats portion of the smi output. +type EncoderStats struct { + SessionCount string `xml:"session_count"` // int + AverageFPS string `xml:"average_fps"` // int + AverageLatency string `xml:"average_latency"` // int +} + +// ClockStats defines the structure of the clocks portion of the smi output. +type ClockStats struct { + Graphics string `xml:"graphics_clock"` // int + SM string `xml:"sm_clock"` // int + Memory string `xml:"mem_clock"` // int + Video string `xml:"video_clock"` // int } diff --git a/plugins/inputs/nvidia_smi/nvidia_smi_test.go b/plugins/inputs/nvidia_smi/nvidia_smi_test.go index 87785fe87..6fd37b570 100644 --- a/plugins/inputs/nvidia_smi/nvidia_smi_test.go +++ b/plugins/inputs/nvidia_smi/nvidia_smi_test.go @@ -1,44 +1,137 @@ package nvidia_smi import ( + "io/ioutil" + "path/filepath" "testing" + "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -func TestParseLineStandard(t *testing.T) { - line := "85, 8114, 553, 7561, P2, 61, GeForce GTX 1070 Ti, GPU-d1911b8a-f5c8-5e66-057c-486561269de8, Default, 100, 93, 1, 0.0\n" - tags, fields, err := parseLine(line) - if err != nil { - t.Fail() +func TestGatherValidXML(t *testing.T) { + tests := []struct { + name string + filename string + expected []telegraf.Metric + }{ + { + name: "GeForce GTX 1070 Ti", + filename: "gtx-1070-ti.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "name": "GeForce GTX 1070 Ti", + "compute_mode": "Default", + "index": "0", + "pstate": "P8", + "uuid": "GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665", + }, + map[string]interface{}{ + "clocks_current_graphics": 135, + "clocks_current_memory": 405, + "clocks_current_sm": 135, + "clocks_current_video": 405, + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fan_speed": 100, + "memory_free": 4054, + "memory_total": 4096, + "memory_used": 42, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 16, + "temperature_gpu": 39, + "utilization_gpu": 0, + "utilization_memory": 0, + }, + time.Unix(0, 0)), + }, + }, + { + name: "GeForce GTX 1660 Ti", + filename: "gtx-1660-ti.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "Graphics Device", + "pstate": "P8", + "uuid": "GPU-304a277d-3545-63b8-3a36-dfde3c992989", + }, + map[string]interface{}{ + "clocks_current_graphics": 300, + "clocks_current_memory": 405, + "clocks_current_sm": 300, + "clocks_current_video": 540, + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fan_speed": 0, + "memory_free": 5912, + "memory_total": 5912, + "memory_used": 0, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 16, + "power_draw": 8.93, + "temperature_gpu": 40, + "utilization_gpu": 0, + "utilization_memory": 1, + }, + time.Unix(0, 0)), + }, + }, + { + name: "Quadro P400", + filename: "quadro-p400.xml", + expected: []telegraf.Metric{ + testutil.MustMetric( + "nvidia_smi", + map[string]string{ + "compute_mode": "Default", + "index": "0", + "name": "Quadro P400", + "pstate": "P8", + "uuid": "GPU-8f750be4-dfbc-23b9-b33f-da729a536494", + }, + map[string]interface{}{ + "clocks_current_graphics": 139, + "clocks_current_memory": 405, + "clocks_current_sm": 139, + "clocks_current_video": 544, + "encoder_stats_average_fps": 0, + "encoder_stats_average_latency": 0, + "encoder_stats_session_count": 0, + "fan_speed": 34, + "memory_free": 1998, + "memory_total": 1998, + "memory_used": 0, + "pcie_link_gen_current": 1, + "pcie_link_width_current": 16, + "temperature_gpu": 33, + "utilization_gpu": 0, + "utilization_memory": 3, + }, + time.Unix(0, 0)), + }, + }, } - if tags["name"] != "GeForce GTX 1070 Ti" { - t.Fail() - } - if temp, ok := fields["temperature_gpu"].(int); ok && temp == 61 { - t.Fail() - } -} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator -func TestParseLineEmptyLine(t *testing.T) { - line := "\n" - _, _, err := parseLine(line) - if err == nil { - t.Fail() + octets, err := ioutil.ReadFile(filepath.Join("testdata", tt.filename)) + require.NoError(t, err) + + err = gatherNvidiaSMI(octets, &acc) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) + }) } } - -func TestParseLineBad(t *testing.T) { - line := "the quick brown fox jumped over the lazy dog" - _, _, err := parseLine(line) - if err == nil { - t.Fail() - } -} - -func TestParseLineNotSupported(t *testing.T) { - line := "[Not Supported], 7606, 0, 7606, P0, 38, Tesla P4, GPU-xxx, Default, 0, 0, 0, 0.0\n" - _, fields, err := parseLine(line) - require.NoError(t, err) - require.Equal(t, nil, fields["fan_speed"]) -} diff --git a/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml b/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml new file mode 100644 index 000000000..3e3e3ec87 --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/gtx-1070-ti.xml @@ -0,0 +1,47 @@ + + + + + GeForce GTX 1070 Ti + GPU-f9ba66fc-a7f5-94c5-da19-019ef2f9c665 + + + + 1 + + + 16x + + + + 100 % + P8 + + 4096 MiB + 42 MiB + 4054 MiB + + Default + + 0 % + 0 % + + + 0 + 0 + 0 + + + 39 C + + + N/A + + + 135 MHz + 135 MHz + 405 MHz + 405 MHz + + + diff --git a/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml b/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml new file mode 100644 index 000000000..1a6c7d089 --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/gtx-1660-ti.xml @@ -0,0 +1,189 @@ + + + Fri Mar 29 19:19:44 2019 + 418.43 + 10.1 + 1 + + Graphics Device + GeForce + Disabled + Disabled + Disabled + Disabled + 4000 + + N/A + N/A + + N/A + GPU-304a277d-3545-63b8-3a36-dfde3c992989 + 0 + 90.16.25.00.4C + No + 0x4300 + N/A + + G001.0000.02.04 + 1.1 + N/A + N/A + + + N/A + N/A + + + None + + + N/A + + + 43 + 00 + 0000 + 218410DE + 00000000:43:00.0 + 3FC81458 + + + 3 + 1 + + + 16x + 16x + + + + N/A + N/A + + 0 + 0 + 0 KB/s + 0 KB/s + + 0 % + P8 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 5912 MiB + 0 MiB + 5912 MiB + + + 256 MiB + 2 MiB + 254 MiB + + Default + + 0 % + 1 % + 0 % + 0 % + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + N/A + N/A + + + + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + + + + + N/A + N/A + + + N/A + N/A + + N/A + + + 40 C + 96 C + 93 C + 91 C + N/A + N/A + + + P8 + Supported + 8.93 W + 130.00 W + 130.00 W + 130.00 W + 70.00 W + 130.00 W + + + 300 MHz + 300 MHz + 405 MHz + 540 MHz + + + N/A + N/A + + + N/A + N/A + + + 2145 MHz + 2145 MHz + 4001 MHz + 1950 MHz + + + N/A + + + N/A + N/A + + N/A + + + + + + + diff --git a/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml b/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml new file mode 100644 index 000000000..ca9e2191e --- /dev/null +++ b/plugins/inputs/nvidia_smi/testdata/quadro-p400.xml @@ -0,0 +1,447 @@ + + + Mon Mar 11 17:03:27 2019 + 418.43 + 10.1 + 1 + + Quadro P400 + Quadro + Disabled + Disabled + Disabled + Disabled + 4000 + + N/A + N/A + + 0424418054852 + GPU-8f750be4-dfbc-23b9-b33f-da729a536494 + 0 + 86.07.3B.00.4A + No + 0x4300 + 900-5G212-1701-000 + + G212.0500.00.01 + 1.1 + N/A + N/A + + + N/A + N/A + + + None + + + N/A + + + 43 + 00 + 0000 + 1CB310DE + 00000000:43:00.0 + 11BE10DE + + + 3 + 1 + + + 16x + 16x + + + + N/A + N/A + + 0 + 0 + 0 KB/s + 0 KB/s + + 34 % + P8 + + Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + Not Active + + + 1998 MiB + 0 MiB + 1998 MiB + + + 256 MiB + 2 MiB + 254 MiB + + Default + + 0 % + 3 % + 0 % + 0 % + + + 0 + 0 + 0 + + + 0 + 0 + 0 + + + N/A + N/A + + + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + N/A + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + + + + N/A + N/A + + + N/A + N/A + + N/A + + + 33 C + 103 C + 100 C + N/A + N/A + N/A + + + P8 + N/A + N/A + N/A + N/A + N/A + N/A + N/A + + + 139 MHz + 139 MHz + 405 MHz + 544 MHz + + + 1227 MHz + 2005 MHz + + + 1227 MHz + 2005 MHz + + + 1252 MHz + 1252 MHz + 2005 MHz + 1126 MHz + + + 1252 MHz + + + N/A + N/A + + + + 2005 MHz + 1252 MHz + 1240 MHz + 1227 MHz + 1215 MHz + 1202 MHz + 1189 MHz + 1177 MHz + 1164 MHz + 1151 MHz + 1139 MHz + 1126 MHz + 1113 MHz + 1101 MHz + 1088 MHz + 1075 MHz + 1063 MHz + 1050 MHz + 1037 MHz + 1025 MHz + 1012 MHz + 999 MHz + 987 MHz + 974 MHz + 961 MHz + 949 MHz + 936 MHz + 923 MHz + 911 MHz + 898 MHz + 885 MHz + 873 MHz + 860 MHz + 847 MHz + 835 MHz + 822 MHz + 810 MHz + 797 MHz + 784 MHz + 772 MHz + 759 MHz + 746 MHz + 734 MHz + 721 MHz + 708 MHz + 696 MHz + 683 MHz + 670 MHz + 658 MHz + 645 MHz + 632 MHz + 620 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + 810 MHz + 1252 MHz + 1240 MHz + 1227 MHz + 1215 MHz + 1202 MHz + 1189 MHz + 1177 MHz + 1164 MHz + 1151 MHz + 1139 MHz + 1126 MHz + 1113 MHz + 1101 MHz + 1088 MHz + 1075 MHz + 1063 MHz + 1050 MHz + 1037 MHz + 1025 MHz + 1012 MHz + 999 MHz + 987 MHz + 974 MHz + 961 MHz + 949 MHz + 936 MHz + 923 MHz + 911 MHz + 898 MHz + 885 MHz + 873 MHz + 860 MHz + 847 MHz + 835 MHz + 822 MHz + 810 MHz + 797 MHz + 784 MHz + 772 MHz + 759 MHz + 746 MHz + 734 MHz + 721 MHz + 708 MHz + 696 MHz + 683 MHz + 670 MHz + 658 MHz + 645 MHz + 632 MHz + 620 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + 405 MHz + 607 MHz + 594 MHz + 582 MHz + 569 MHz + 556 MHz + 544 MHz + 531 MHz + 518 MHz + 506 MHz + 493 MHz + 480 MHz + 468 MHz + 455 MHz + 442 MHz + 430 MHz + 417 MHz + 405 MHz + 392 MHz + 379 MHz + 367 MHz + 354 MHz + 341 MHz + 329 MHz + 316 MHz + 303 MHz + 291 MHz + 278 MHz + 265 MHz + 253 MHz + 240 MHz + 227 MHz + 215 MHz + 202 MHz + 189 MHz + 177 MHz + 164 MHz + 151 MHz + 139 MHz + + + + + + + + + diff --git a/plugins/inputs/openldap/README.md b/plugins/inputs/openldap/README.md index 619e845c7..48f29cb60 100644 --- a/plugins/inputs/openldap/README.md +++ b/plugins/inputs/openldap/README.md @@ -4,7 +4,7 @@ This plugin gathers metrics from OpenLDAP's cn=Monitor backend. ### Configuration: -To use this plugin you must enable the [monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend. +To use this plugin you must enable the [slapd monitoring](https://www.openldap.org/devel/admin/monitoringslapd.html) backend. ```toml [[inputs.openldap]] diff --git a/plugins/inputs/openldap/openldap.go b/plugins/inputs/openldap/openldap.go index 9e69c8a21..2bfbc3fac 100644 --- a/plugins/inputs/openldap/openldap.go +++ b/plugins/inputs/openldap/openldap.go @@ -5,11 +5,10 @@ import ( "strconv" "strings" - "gopkg.in/ldap.v2" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" + "gopkg.in/ldap.v3" ) type Openldap struct { @@ -57,6 +56,12 @@ var attrTranslate = map[string]string{ "monitoredInfo": "", "monitorOpInitiated": "_initiated", "monitorOpCompleted": "_completed", + "olmMDBPagesMax": "_mdb_pages_max", + "olmMDBPagesUsed": "_mdb_pages_used", + "olmMDBPagesFree": "_mdb_pages_free", + "olmMDBReadersMax": "_mdb_readers_max", + "olmMDBReadersUsed": "_mdb_readers_used", + "olmMDBEntries": "_mdb_entries", } func (o *Openldap) SampleConfig() string { diff --git a/plugins/inputs/openldap/openldap_test.go b/plugins/inputs/openldap/openldap_test.go index 10835896f..76d9cc3a9 100644 --- a/plugins/inputs/openldap/openldap_test.go +++ b/plugins/inputs/openldap/openldap_test.go @@ -4,11 +4,10 @@ import ( "strconv" "testing" - "gopkg.in/ldap.v2" - "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "gopkg.in/ldap.v3" ) func TestOpenldapMockResult(t *testing.T) { diff --git a/plugins/inputs/openntpd/README.md b/plugins/inputs/openntpd/README.md new file mode 100644 index 000000000..877c3a460 --- /dev/null +++ b/plugins/inputs/openntpd/README.md @@ -0,0 +1,93 @@ +# OpenNTPD Input Plugin + +Get standard NTP query metrics from [OpenNTPD][] using the ntpctl command. + +[OpenNTPD]: http://www.openntpd.org/ + +Below is the documentation of the various headers returned from the NTP query +command when running `ntpctl -s peers`. + +- remote – The remote peer or server being synced to. +- wt – the peer weight +- tl – the peer trust level +- st (stratum) – The remote peer or server Stratum +- next – number of seconds until the next poll +- poll – polling interval in seconds +- delay – Round trip communication delay to the remote peer +or server (milliseconds); +- offset – Mean offset (phase) in the times reported between this local host and +the remote peer or server (RMS, milliseconds); +- jitter – Mean deviation (jitter) in the time reported for that remote peer or +server (RMS of difference of multiple time samples, milliseconds); + +### Configuration + +```toml +[[inputs.openntpd]] + ## Run ntpctl binary with sudo. + # use_sudo = false + + ## Location of the ntpctl binary. + # binary = "/usr/sbin/ntpctl" + + ## Maximum time the ntpctl binary is allowed to run. + # timeout = "5ms" +``` + +### Metrics + +- ntpctl + - tags: + - remote + - stratum + - fields: + - delay (float, milliseconds) + - jitter (float, milliseconds) + - offset (float, milliseconds) + - poll (int, seconds) + - next (int, seconds) + - wt (int) + - tl (int) + +### Permissions + +It's important to note that this plugin references ntpctl, which may require +additional permissions to execute successfully. +Depending on the user/group permissions of the telegraf user executing this +plugin, you may need to alter the group membership, set facls, or use sudo. + +**Group membership (Recommended)**: +```bash +$ groups telegraf +telegraf : telegraf + +$ usermod -a -G ntpd telegraf + +$ groups telegraf +telegraf : telegraf ntpd +``` + +**Sudo privileges**: +If you use this method, you will need the following in your telegraf config: +```toml +[[inputs.openntpd]] + use_sudo = true +``` + +You will also need to update your sudoers file: +```bash +$ visudo +# Add the following lines: +Cmnd_Alias NTPCTL = /usr/sbin/ntpctl +telegraf ALL=(ALL) NOPASSWD: NTPCTL +Defaults!NTPCTL !logfile, !syslog, !pam_session +``` + +Please use the solution you see as most appropriate. + +### Example Output + +``` +openntpd,remote=194.57.169.1,stratum=2,host=localhost tl=10i,poll=1007i, +offset=2.295,jitter=3.896,delay=53.766,next=266i,wt=1i 1514454299000000000 +``` diff --git a/plugins/inputs/openntpd/openntpd.go b/plugins/inputs/openntpd/openntpd.go new file mode 100644 index 000000000..e7723b480 --- /dev/null +++ b/plugins/inputs/openntpd/openntpd.go @@ -0,0 +1,223 @@ +package openntpd + +import ( + "bufio" + "bytes" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Mapping of ntpctl header names to tag keys +var tagHeaders = map[string]string{ + "st": "stratum", +} + +// Mapping of the ntpctl tag key to the index in the command output +var tagI = map[string]int{ + "stratum": 2, +} + +// Mapping of float metrics to their index in the command output +var floatI = map[string]int{ + "offset": 5, + "delay": 6, + "jitter": 7, +} + +// Mapping of int metrics to their index in the command output +var intI = map[string]int{ + "wt": 0, + "tl": 1, + "next": 3, + "poll": 4, +} + +type runner func(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) + +// Openntpd is used to store configuration values +type Openntpd struct { + Binary string + Timeout internal.Duration + UseSudo bool + + filter filter.Filter + run runner +} + +var defaultBinary = "/usr/sbin/ntpctl" +var defaultTimeout = internal.Duration{Duration: 5 * time.Second} + +func (n *Openntpd) Description() string { + return "Get standard NTP query metrics from OpenNTPD." +} + +func (n *Openntpd) SampleConfig() string { + return ` + ## Run ntpctl binary with sudo. + # use_sudo = false + + ## Location of the ntpctl binary. + # binary = "/usr/sbin/ntpctl" + + ## Maximum time the ntpctl binary is allowed to run. + # timeout = "5ms" + ` +} + +// Shell out to ntpctl and return the output +func openntpdRunner(cmdName string, Timeout internal.Duration, UseSudo bool) (*bytes.Buffer, error) { + cmdArgs := []string{"-s", "peers"} + + cmd := exec.Command(cmdName, cmdArgs...) + + if UseSudo { + cmdArgs = append([]string{cmdName}, cmdArgs...) + cmd = exec.Command("sudo", cmdArgs...) + } + + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, Timeout.Duration) + if err != nil { + return &out, fmt.Errorf("error running ntpctl: %s", err) + } + + return &out, nil +} + +func (n *Openntpd) Gather(acc telegraf.Accumulator) error { + out, err := n.run(n.Binary, n.Timeout, n.UseSudo) + if err != nil { + return fmt.Errorf("error gathering metrics: %s", err) + } + + lineCounter := 0 + scanner := bufio.NewScanner(out) + for scanner.Scan() { + // skip first (peer) and second (field list) line + if lineCounter < 2 { + lineCounter++ + continue + } + + line := scanner.Text() + + fields := strings.Fields(line) + + mFields := make(map[string]interface{}) + tags := make(map[string]string) + + // Even line ---> ntp server info + if lineCounter%2 == 0 { + // DNS resolution error ---> keep DNS name as remote name + if fields[0] != "not" { + tags["remote"] = fields[0] + } else { + tags["remote"] = fields[len(fields)-1] + } + } + + // Read next line - Odd line ---> ntp server stats + scanner.Scan() + line = scanner.Text() + lineCounter++ + + fields = strings.Fields(line) + + // if there is an ntpctl state prefix, remove it and make it it's own tag + if strings.ContainsAny(string(fields[0]), "*") { + tags["state_prefix"] = string(fields[0]) + fields = fields[1:] + } + + // Get tags from output + for key, index := range tagI { + if index >= len(fields) { + continue + } + tags[key] = fields[index] + } + + // Get integer metrics from output + for key, index := range intI { + if index >= len(fields) { + continue + } + if fields[index] == "-" { + continue + } + + if key == "next" || key == "poll" { + + m, err := strconv.ParseInt(strings.TrimSuffix(fields[index], "s"), 10, 64) + if err != nil { + acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index])) + continue + } + mFields[key] = m + + } else { + + m, err := strconv.ParseInt(fields[index], 10, 64) + if err != nil { + acc.AddError(fmt.Errorf("integer value expected, got: %s", fields[index])) + continue + } + mFields[key] = m + } + } + + // get float metrics from output + for key, index := range floatI { + if len(fields) <= index { + continue + } + if fields[index] == "-" || fields[index] == "----" || fields[index] == "peer" || fields[index] == "not" || fields[index] == "valid" { + continue + } + + if key == "offset" || key == "delay" || key == "jitter" { + + m, err := strconv.ParseFloat(strings.TrimSuffix(fields[index], "ms"), 64) + if err != nil { + acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index])) + continue + } + mFields[key] = m + + } else { + + m, err := strconv.ParseFloat(fields[index], 64) + if err != nil { + acc.AddError(fmt.Errorf("float value expected, got: %s", fields[index])) + continue + } + mFields[key] = m + + } + } + acc.AddFields("openntpd", mFields, tags) + + lineCounter++ + } + return nil +} + +func init() { + inputs.Add("openntpd", func() telegraf.Input { + return &Openntpd{ + run: openntpdRunner, + Binary: defaultBinary, + Timeout: defaultTimeout, + UseSudo: false, + } + }) +} diff --git a/plugins/inputs/openntpd/openntpd_test.go b/plugins/inputs/openntpd/openntpd_test.go new file mode 100644 index 000000000..d629949a5 --- /dev/null +++ b/plugins/inputs/openntpd/openntpd_test.go @@ -0,0 +1,329 @@ +package openntpd + +import ( + "bytes" + "testing" + "time" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +var TestTimeout = internal.Duration{Duration: time.Second} + +func OpenntpdCTL(output string, Timeout internal.Duration, useSudo bool) func(string, internal.Duration, bool) (*bytes.Buffer, error) { + return func(string, internal.Duration, bool) (*bytes.Buffer, error) { + return bytes.NewBuffer([]byte(output)), nil + } +} + +func TestParseSimpleOutput(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(simpleOutput, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(1)) + + assert.Equal(t, acc.NFields(), 7) + + firstpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(10), + "next": int64(56), + "poll": int64(63), + "offset": float64(9.271), + "delay": float64(44.662), + "jitter": float64(2.678), + } + + firstpeertags := map[string]string{ + "remote": "212.129.9.36", + "stratum": "3", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) +} + +func TestParseSimpleOutputwithStatePrefix(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(simpleOutputwithStatePrefix, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(1)) + + assert.Equal(t, acc.NFields(), 7) + + firstpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(10), + "next": int64(45), + "poll": int64(980), + "offset": float64(-9.901), + "delay": float64(67.573), + "jitter": float64(29.350), + } + + firstpeertags := map[string]string{ + "remote": "92.243.6.5", + "stratum": "2", + "state_prefix": "*", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) +} + +func TestParseSimpleOutputInvalidPeer(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(simpleOutputInvalidPeer, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(1)) + + assert.Equal(t, acc.NFields(), 4) + + firstpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(2), + "next": int64(203), + "poll": int64(300), + } + + firstpeertags := map[string]string{ + "remote": "178.33.111.49", + "stratum": "-", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) +} + +func TestParseSimpleOutputServersDNSError(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(simpleOutputServersDNSError, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(1)) + + assert.Equal(t, acc.NFields(), 4) + + firstpeerfields := map[string]interface{}{ + "next": int64(2), + "poll": int64(15), + "wt": int64(1), + "tl": int64(2), + } + + firstpeertags := map[string]string{ + "remote": "pool.nl.ntp.org", + "stratum": "-", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) + + secondpeerfields := map[string]interface{}{ + "next": int64(2), + "poll": int64(15), + "wt": int64(1), + "tl": int64(2), + } + + secondpeertags := map[string]string{ + "remote": "pool.nl.ntp.org", + "stratum": "-", + } + + acc.AssertContainsTaggedFields(t, "openntpd", secondpeerfields, secondpeertags) +} + +func TestParseSimpleOutputServerDNSError(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(simpleOutputServerDNSError, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(1)) + + assert.Equal(t, acc.NFields(), 4) + + firstpeerfields := map[string]interface{}{ + "next": int64(12), + "poll": int64(15), + "wt": int64(1), + "tl": int64(2), + } + + firstpeertags := map[string]string{ + "remote": "pool.fr.ntp.org", + "stratum": "-", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) +} + +func TestParseFullOutput(t *testing.T) { + acc := &testutil.Accumulator{} + v := &Openntpd{ + run: OpenntpdCTL(fullOutput, TestTimeout, false), + } + err := v.Gather(acc) + + assert.NoError(t, err) + assert.True(t, acc.HasMeasurement("openntpd")) + assert.Equal(t, acc.NMetrics(), uint64(20)) + + assert.Equal(t, acc.NFields(), 113) + + firstpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(10), + "next": int64(56), + "poll": int64(63), + "offset": float64(9.271), + "delay": float64(44.662), + "jitter": float64(2.678), + } + + firstpeertags := map[string]string{ + "remote": "212.129.9.36", + "stratum": "3", + } + + acc.AssertContainsTaggedFields(t, "openntpd", firstpeerfields, firstpeertags) + + secondpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(10), + "next": int64(21), + "poll": int64(64), + "offset": float64(-0.103), + "delay": float64(53.199), + "jitter": float64(9.046), + } + + secondpeertags := map[string]string{ + "remote": "163.172.25.19", + "stratum": "2", + } + + acc.AssertContainsTaggedFields(t, "openntpd", secondpeerfields, secondpeertags) + + thirdpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(10), + "next": int64(45), + "poll": int64(980), + "offset": float64(-9.901), + "delay": float64(67.573), + "jitter": float64(29.350), + } + + thirdpeertags := map[string]string{ + "remote": "92.243.6.5", + "stratum": "2", + "state_prefix": "*", + } + + acc.AssertContainsTaggedFields(t, "openntpd", thirdpeerfields, thirdpeertags) + + fourthpeerfields := map[string]interface{}{ + "wt": int64(1), + "tl": int64(2), + "next": int64(203), + "poll": int64(300), + } + + fourthpeertags := map[string]string{ + "remote": "178.33.111.49", + "stratum": "-", + } + + acc.AssertContainsTaggedFields(t, "openntpd", fourthpeerfields, fourthpeertags) +} + +var simpleOutput = `peer +wt tl st next poll offset delay jitter +212.129.9.36 from pool 0.debian.pool.ntp.org +1 10 3 56s 63s 9.271ms 44.662ms 2.678ms` + +var simpleOutputwithStatePrefix = `peer +wt tl st next poll offset delay jitter +92.243.6.5 from pool 0.debian.pool.ntp.org +* 1 10 2 45s 980s -9.901ms 67.573ms 29.350ms` + +var simpleOutputInvalidPeer = `peer +wt tl st next poll offset delay jitter +178.33.111.49 from pool 0.debian.pool.ntp.org +1 2 - 203s 300s ---- peer not valid ----` + +var simpleOutputServersDNSError = `peer +wt tl st next poll offset delay jitter +not resolved from pool pool.nl.ntp.org +1 2 - 2s 15s ---- peer not valid ---- +` +var simpleOutputServerDNSError = `peer +wt tl st next poll offset delay jitter +not resolved pool.fr.ntp.org +1 2 - 12s 15s ---- peer not valid ---- +` + +var fullOutput = `peer +wt tl st next poll offset delay jitter +212.129.9.36 from pool 0.debian.pool.ntp.org +1 10 3 56s 63s 9.271ms 44.662ms 2.678ms +163.172.25.19 from pool 0.debian.pool.ntp.org +1 10 2 21s 64s -0.103ms 53.199ms 9.046ms +92.243.6.5 from pool 0.debian.pool.ntp.org +* 1 10 2 45s 980s -9.901ms 67.573ms 29.350ms +178.33.111.49 from pool 0.debian.pool.ntp.org +1 2 - 203s 300s ---- peer not valid ---- +62.210.122.129 from pool 1.debian.pool.ntp.org +1 10 3 4s 60s 5.372ms 53.690ms 14.700ms +163.172.225.159 from pool 1.debian.pool.ntp.org +1 10 3 38s 61s 12.276ms 40.631ms 1.282ms +5.196.192.58 from pool 1.debian.pool.ntp.org +1 2 - 0s 300s ---- peer not valid ---- +129.250.35.250 from pool 1.debian.pool.ntp.org +1 10 2 28s 63s 11.236ms 43.874ms 1.381ms +2001:41d0:a:5a7::1 from pool 2.debian.pool.ntp.org +1 2 - 5s 15s ---- peer not valid ---- +2001:41d0:8:188d::16 from pool 2.debian.pool.ntp.org +1 2 - 3s 15s ---- peer not valid ---- +2001:4b98:dc0:41:216:3eff:fe69:46e3 from pool 2.debian.pool.ntp.org +1 2 - 14s 15s ---- peer not valid ---- +2a01:e0d:1:3:58bf:fa61:0:1 from pool 2.debian.pool.ntp.org +1 2 - 9s 15s ---- peer not valid ---- +163.172.179.38 from pool 2.debian.pool.ntp.org +1 10 2 51s 65s -19.229ms 85.404ms 48.734ms +5.135.3.88 from pool 2.debian.pool.ntp.org +1 2 - 173s 300s ---- peer not valid ---- +195.154.41.195 from pool 2.debian.pool.ntp.org +1 10 2 84s 1004s -3.956ms 54.549ms 13.658ms +62.210.81.130 from pool 2.debian.pool.ntp.org +1 10 2 158s 1043s -42.593ms 124.353ms 94.230ms +149.202.97.123 from pool 3.debian.pool.ntp.org +1 2 - 205s 300s ---- peer not valid ---- +51.15.175.224 from pool 3.debian.pool.ntp.org +1 10 2 9s 64s 8.861ms 46.640ms 0.668ms +37.187.5.167 from pool 3.debian.pool.ntp.org +1 2 - 105s 300s ---- peer not valid ---- +194.57.169.1 from pool 3.debian.pool.ntp.org +1 10 2 32s 63s 6.589ms 52.051ms 2.057ms` diff --git a/plugins/inputs/opensmtpd/README.md b/plugins/inputs/opensmtpd/README.md index c1166d9e5..5bbd4be89 100644 --- a/plugins/inputs/opensmtpd/README.md +++ b/plugins/inputs/opensmtpd/README.md @@ -5,15 +5,14 @@ This plugin gathers stats from [OpenSMTPD - a FREE implementation of the server- ### Configuration: ```toml - # A plugin to collect stats from OpenSMTPD - a FREE implementation of the server-side SMTP protocol - [[inputs.smtpctl]] + [[inputs.opensmtpd]] ## If running as a restricted user you can prepend sudo for additional access: #use_sudo = false ## The default location of the smtpctl binary can be overridden with: binary = "/usr/sbin/smtpctl" - # The default timeout of 1s can be overriden with: + # The default timeout of 1s can be overridden with: #timeout = "1s" ``` @@ -87,7 +86,9 @@ You will also need to update your sudoers file: ```bash $ visudo # Add the following line: -telegraf ALL=(ALL) NOPASSWD: /usr/sbin/smtpctl +Cmnd_Alias SMTPCTL = /usr/sbin/smtpctl +telegraf ALL=(ALL) NOPASSWD: SMTPCTL +Defaults!SMTPCTL !logfile, !syslog, !pam_session ``` Please use the solution you see as most appropriate. diff --git a/plugins/inputs/opensmtpd/opensmtpd.go b/plugins/inputs/opensmtpd/opensmtpd.go index 1c0e5690d..c3f76f2ef 100644 --- a/plugins/inputs/opensmtpd/opensmtpd.go +++ b/plugins/inputs/opensmtpd/opensmtpd.go @@ -37,7 +37,7 @@ var sampleConfig = ` ## The default location of the smtpctl binary can be overridden with: binary = "/usr/sbin/smtpctl" - ## The default timeout of 1000ms can be overriden with (in milliseconds): + ## The default timeout of 1000ms can be overridden with (in milliseconds): timeout = 1000 ` diff --git a/plugins/inputs/openweathermap/README.md b/plugins/inputs/openweathermap/README.md new file mode 100644 index 000000000..85803f76a --- /dev/null +++ b/plugins/inputs/openweathermap/README.md @@ -0,0 +1,82 @@ +# OpenWeatherMap Input Plugin + +Collect current weather and forecast data from OpenWeatherMap. + +To use this plugin you will need an [api key][] (app_id). + +City identifiers can be found in the [city list][]. Alternately you +can [search][] by name; the `city_id` can be found as the last digits +of the URL: https://openweathermap.org/city/2643743. Language +identifiers can be found in the [lang list][]. Documentation for +condition ID, icon, and main is at [weather conditions][]. + +### Configuration + +```toml +[[inputs.openweathermap]] + ## OpenWeatherMap API key. + app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + + ## City ID's to collect weather data from. + city_id = ["5391959"] + + ## Language of the description field. Can be one of "ar", "bg", + ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", + ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", + ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" + # lang = "en" + + ## APIs to fetch; can contain "weather" or "forecast". + fetch = ["weather", "forecast"] + + ## OpenWeatherMap base URL + # base_url = "https://api.openweathermap.org/" + + ## Timeout for HTTP response. + # response_timeout = "5s" + + ## Preferred unit system for temperature and wind speed. Can be one of + ## "metric", "imperial", or "standard". + # units = "metric" + + ## Query interval; OpenWeatherMap weather data is updated every 10 + ## minutes. + interval = "10m" +``` + +### Metrics + +- weather + - tags: + - city_id + - forecast + - condition_id + - condition_main + - fields: + - cloudiness (int, percent) + - humidity (int, percent) + - pressure (float, atmospheric pressure hPa) + - rain (float, rain volume for the last 1-3 hours (depending on API response) in mm) + - sunrise (int, nanoseconds since unix epoch) + - sunset (int, nanoseconds since unix epoch) + - temperature (float, degrees) + - visibility (int, meters, not available on forecast data) + - wind_degrees (float, wind direction in degrees) + - wind_speed (float, wind speed in meters/sec or miles/sec) + - condition_description (string, localized long description) + - condition_icon + + +### Example Output + +``` +> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=* cloudiness=1i,condition_description="clear sky",condition_icon="01d",humidity=35i,pressure=1012,rain=0,sunrise=1570630329000000000i,sunset=1570671689000000000i,temperature=21.52,visibility=16093i,wind_degrees=280,wind_speed=5.7 1570659256000000000 +> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=3h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=41i,pressure=1010,rain=0,temperature=22.34,wind_degrees=249.393,wind_speed=2.085 1570665600000000000 +> weather,city=San\ Francisco,city_id=5391959,condition_id=800,condition_main=Clear,country=US,forecast=6h cloudiness=0i,condition_description="clear sky",condition_icon="01n",humidity=50i,pressure=1012,rain=0,temperature=17.09,wind_degrees=310.754,wind_speed=3.009 1570676400000000000 +``` + +[api key]: https://openweathermap.org/appid +[city list]: http://bulk.openweathermap.org/sample/city.list.json.gz +[search]: https://openweathermap.org/find +[lang list]: https://openweathermap.org/current#multi +[weather conditions]: https://openweathermap.org/weather-conditions diff --git a/plugins/inputs/openweathermap/openweathermap.go b/plugins/inputs/openweathermap/openweathermap.go new file mode 100644 index 000000000..94055a6f8 --- /dev/null +++ b/plugins/inputs/openweathermap/openweathermap.go @@ -0,0 +1,363 @@ +package openweathermap + +import ( + "encoding/json" + "fmt" + "io" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + // https://openweathermap.org/current#severalid + // Call for several city IDs + // The limit of locations is 20. + owmRequestSeveralCityId int = 20 + + defaultBaseUrl = "https://api.openweathermap.org/" + defaultResponseTimeout time.Duration = time.Second * 5 + defaultUnits string = "metric" + defaultLang string = "en" +) + +type OpenWeatherMap struct { + AppId string `toml:"app_id"` + CityId []string `toml:"city_id"` + Lang string `toml:"lang"` + Fetch []string `toml:"fetch"` + BaseUrl string `toml:"base_url"` + ResponseTimeout internal.Duration `toml:"response_timeout"` + Units string `toml:"units"` + + client *http.Client + baseUrl *url.URL +} + +var sampleConfig = ` + ## OpenWeatherMap API key. + app_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + + ## City ID's to collect weather data from. + city_id = ["5391959"] + + ## Language of the description field. Can be one of "ar", "bg", + ## "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", "hr", "hu", + ## "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", "pt", "ro", "ru", + ## "se", "sk", "sl", "es", "tr", "ua", "vi", "zh_cn", "zh_tw" + # lang = "en" + + ## APIs to fetch; can contain "weather" or "forecast". + fetch = ["weather", "forecast"] + + ## OpenWeatherMap base URL + # base_url = "https://api.openweathermap.org/" + + ## Timeout for HTTP response. + # response_timeout = "5s" + + ## Preferred unit system for temperature and wind speed. Can be one of + ## "metric", "imperial", or "standard". + # units = "metric" + + ## Query interval; OpenWeatherMap updates their weather data every 10 + ## minutes. + interval = "10m" +` + +func (n *OpenWeatherMap) SampleConfig() string { + return sampleConfig +} + +func (n *OpenWeatherMap) Description() string { + return "Read current weather and forecasts data from openweathermap.org" +} + +func (n *OpenWeatherMap) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + var strs []string + + for _, fetch := range n.Fetch { + if fetch == "forecast" { + for _, city := range n.CityId { + addr := n.formatURL("/data/2.5/forecast", city) + wg.Add(1) + go func() { + defer wg.Done() + status, err := n.gatherUrl(addr) + if err != nil { + acc.AddError(err) + return + } + + gatherForecast(acc, status) + }() + } + } else if fetch == "weather" { + j := 0 + for j < len(n.CityId) { + strs = make([]string, 0) + for i := 0; j < len(n.CityId) && i < owmRequestSeveralCityId; i++ { + strs = append(strs, n.CityId[j]) + j++ + } + cities := strings.Join(strs, ",") + + addr := n.formatURL("/data/2.5/group", cities) + wg.Add(1) + go func() { + defer wg.Done() + status, err := n.gatherUrl(addr) + if err != nil { + acc.AddError(err) + return + } + + gatherWeather(acc, status) + }() + } + + } + } + + wg.Wait() + return nil +} + +func (n *OpenWeatherMap) createHttpClient() (*http.Client, error) { + if n.ResponseTimeout.Duration < time.Second { + n.ResponseTimeout.Duration = defaultResponseTimeout + } + + client := &http.Client{ + Transport: &http.Transport{}, + Timeout: n.ResponseTimeout.Duration, + } + + return client, nil +} + +func (n *OpenWeatherMap) gatherUrl(addr string) (*Status, error) { + resp, err := n.client.Get(addr) + if err != nil { + return nil, fmt.Errorf("error making HTTP request to %s: %s", addr, err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s returned HTTP status %s", addr, resp.Status) + } + + mediaType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + + if mediaType != "application/json" { + return nil, fmt.Errorf("%s returned unexpected content type %s", addr, mediaType) + } + + return gatherWeatherUrl(resp.Body) +} + +type WeatherEntry struct { + Dt int64 `json:"dt"` + Clouds struct { + All int64 `json:"all"` + } `json:"clouds"` + Main struct { + Humidity int64 `json:"humidity"` + Pressure float64 `json:"pressure"` + Temp float64 `json:"temp"` + } `json:"main"` + Rain struct { + Rain1 float64 `json:"1h"` + Rain3 float64 `json:"3h"` + } `json:"rain"` + Sys struct { + Country string `json:"country"` + Sunrise int64 `json:"sunrise"` + Sunset int64 `json:"sunset"` + } `json:"sys"` + Wind struct { + Deg float64 `json:"deg"` + Speed float64 `json:"speed"` + } `json:"wind"` + Id int64 `json:"id"` + Name string `json:"name"` + Coord struct { + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` + } `json:"coord"` + Visibility int64 `json:"visibility"` + Weather []struct { + ID int64 `json:"id"` + Main string `json:"main"` + Description string `json:"description"` + Icon string `json:"icon"` + } `json:"weather"` +} + +type Status struct { + City struct { + Coord struct { + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` + } `json:"coord"` + Country string `json:"country"` + Id int64 `json:"id"` + Name string `json:"name"` + } `json:"city"` + List []WeatherEntry `json:"list"` +} + +func gatherWeatherUrl(r io.Reader) (*Status, error) { + dec := json.NewDecoder(r) + status := &Status{} + if err := dec.Decode(status); err != nil { + return nil, fmt.Errorf("error while decoding JSON response: %s", err) + } + return status, nil +} + +func gatherRain(e WeatherEntry) float64 { + if e.Rain.Rain1 > 0 { + return e.Rain.Rain1 + } + return e.Rain.Rain3 +} + +func gatherWeather(acc telegraf.Accumulator, status *Status) { + for _, e := range status.List { + tm := time.Unix(e.Dt, 0) + + fields := map[string]interface{}{ + "cloudiness": e.Clouds.All, + "humidity": e.Main.Humidity, + "pressure": e.Main.Pressure, + "rain": gatherRain(e), + "sunrise": time.Unix(e.Sys.Sunrise, 0).UnixNano(), + "sunset": time.Unix(e.Sys.Sunset, 0).UnixNano(), + "temperature": e.Main.Temp, + "visibility": e.Visibility, + "wind_degrees": e.Wind.Deg, + "wind_speed": e.Wind.Speed, + } + tags := map[string]string{ + "city": e.Name, + "city_id": strconv.FormatInt(e.Id, 10), + "country": e.Sys.Country, + "forecast": "*", + } + + if len(e.Weather) > 0 { + fields["condition_description"] = e.Weather[0].Description + fields["condition_icon"] = e.Weather[0].Icon + tags["condition_id"] = strconv.FormatInt(e.Weather[0].ID, 10) + tags["condition_main"] = e.Weather[0].Main + } + + acc.AddFields("weather", fields, tags, tm) + } +} + +func gatherForecast(acc telegraf.Accumulator, status *Status) { + tags := map[string]string{ + "city_id": strconv.FormatInt(status.City.Id, 10), + "forecast": "*", + "city": status.City.Name, + "country": status.City.Country, + } + for i, e := range status.List { + tm := time.Unix(e.Dt, 0) + fields := map[string]interface{}{ + "cloudiness": e.Clouds.All, + "humidity": e.Main.Humidity, + "pressure": e.Main.Pressure, + "rain": gatherRain(e), + "temperature": e.Main.Temp, + "wind_degrees": e.Wind.Deg, + "wind_speed": e.Wind.Speed, + } + if len(e.Weather) > 0 { + fields["condition_description"] = e.Weather[0].Description + fields["condition_icon"] = e.Weather[0].Icon + tags["condition_id"] = strconv.FormatInt(e.Weather[0].ID, 10) + tags["condition_main"] = e.Weather[0].Main + } + tags["forecast"] = fmt.Sprintf("%dh", (i+1)*3) + acc.AddFields("weather", fields, tags, tm) + } +} + +func init() { + inputs.Add("openweathermap", func() telegraf.Input { + tmout := internal.Duration{ + Duration: defaultResponseTimeout, + } + return &OpenWeatherMap{ + ResponseTimeout: tmout, + BaseUrl: defaultBaseUrl, + } + }) +} + +func (n *OpenWeatherMap) Init() error { + var err error + n.baseUrl, err = url.Parse(n.BaseUrl) + if err != nil { + return err + } + + // Create an HTTP client that is re-used for each + // collection interval + n.client, err = n.createHttpClient() + if err != nil { + return err + } + + switch n.Units { + case "imperial", "standard", "metric": + case "": + n.Units = defaultUnits + default: + return fmt.Errorf("unknown units: %s", n.Units) + } + + switch n.Lang { + case "ar", "bg", "ca", "cz", "de", "el", "en", "fa", "fi", "fr", "gl", + "hr", "hu", "it", "ja", "kr", "la", "lt", "mk", "nl", "pl", + "pt", "ro", "ru", "se", "sk", "sl", "es", "tr", "ua", "vi", + "zh_cn", "zh_tw": + case "": + n.Lang = defaultLang + default: + return fmt.Errorf("unknown language: %s", n.Lang) + } + + return nil +} + +func (n *OpenWeatherMap) formatURL(path string, city string) string { + v := url.Values{ + "id": []string{city}, + "APPID": []string{n.AppId}, + "lang": []string{n.Lang}, + "units": []string{n.Units}, + } + + relative := &url.URL{ + Path: path, + RawQuery: v.Encode(), + } + + return n.baseUrl.ResolveReference(relative).String() +} diff --git a/plugins/inputs/openweathermap/openweathermap_test.go b/plugins/inputs/openweathermap/openweathermap_test.go new file mode 100644 index 000000000..9bee1d2e9 --- /dev/null +++ b/plugins/inputs/openweathermap/openweathermap_test.go @@ -0,0 +1,830 @@ +package openweathermap + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +const sampleNoContent = ` +{ +} +` + +const sampleStatusResponse = ` +{ + "city": { + "coord": { + "lat": 48.8534, + "lon": 2.3488 + }, + "country": "FR", + "id": 2988507, + "name": "Paris" + }, + "cnt": 40, + "cod": "200", + "list": [ + { + "clouds": { + "all": 88 + }, + "dt": 1543622400, + "dt_txt": "2018-12-01 00:00:00", + "main": { + "grnd_level": 1018.65, + "humidity": 91, + "pressure": 1018.65, + "sea_level": 1030.99, + "temp": 6.71, + "temp_kf": -2.14 + }, + "rain": { + "3h": 0.035 + }, + "sys": { + "pod": "n" + }, + "weather": [ + { + "description": "light rain", + "icon": "10n", + "id": 500, + "main": "Rain" + } + ], + "wind": { + "deg": 228.501, + "speed": 3.76 + } + }, + { + "clouds": { + "all": 92 + }, + "dt": 1544043600, + "dt_txt": "2018-12-05 21:00:00", + "main": { + "grnd_level": 1032.18, + "humidity": 98, + "pressure": 1032.18, + "sea_level": 1044.78, + "temp": 6.38, + "temp_kf": 0 + }, + "rain": { + "3h": 0.049999999999997 + }, + "sys": { + "pod": "n" + }, + "weather": [ + { + "description": "light rain", + "icon": "10n", + "id": 500, + "main": "Rain" + } + ], + "wind": { + "deg": 335.005, + "speed": 2.66 + } + } + ], + "message": 0.0025 +} +` + +const groupWeatherResponse = ` +{ + "cnt": 1, + "list": [{ + "clouds": { + "all": 0 + }, + "coord": { + "lat": 48.85, + "lon": 2.35 + }, + "dt": 1544194800, + "id": 2988507, + "main": { + "humidity": 87, + "pressure": 1007, + "temp": 9.25 + }, + "name": "Paris", + "sys": { + "country": "FR", + "id": 6550, + "message": 0.002, + "sunrise": 1544167818, + "sunset": 1544198047, + "type": 1 + }, + "visibility": 10000, + "weather": [ + { + "description": "light intensity drizzle", + "icon": "09d", + "id": 300, + "main": "Drizzle" + } + ], + "wind": { + "deg": 290, + "speed": 8.7 + } + }] +} +` + +const rainWeatherResponse = ` +{ + "cnt": 2, + "list": [{ + "dt": 1544194800, + "id": 111, + "main": { + "humidity": 87, + "pressure": 1007, + "temp": 9.25 + }, + "name": "Paris", + "sys": { + "country": "FR", + "id": 6550, + "message": 0.002, + "sunrise": 1544167818, + "sunset": 1544198047, + "type": 1 + }, + "visibility": 10000, + "weather": [ + { + "description": "light intensity drizzle", + "icon": "09d", + "id": 300, + "main": "Drizzle" + } + ], + "rain": { + "1h": 1.000 + }, + "wind": { + "deg": 290, + "speed": 8.7 + } + }, + { + "dt": 1544194800, + "id": 222, + "main": { + "humidity": 87, + "pressure": 1007, + "temp": 9.25 + }, + "name": "Paris", + "sys": { + "country": "FR", + "id": 6550, + "message": 0.002, + "sunrise": 1544167818, + "sunset": 1544198047, + "type": 1 + }, + "visibility": 10000, + "weather": [ + { + "description": "light intensity drizzle", + "icon": "09d", + "id": 300, + "main": "Drizzle" + } + ], + "rain": { + "3h": 3.000 + }, + "wind": { + "deg": 290, + "speed": 8.7 + } + }, + { + "dt": 1544194800, + "id": 333, + "main": { + "humidity": 87, + "pressure": 1007, + "temp": 9.25 + }, + "name": "Paris", + "sys": { + "country": "FR", + "id": 6550, + "message": 0.002, + "sunrise": 1544167818, + "sunset": 1544198047, + "type": 1 + }, + "visibility": 10000, + "weather": [ + { + "description": "light intensity drizzle", + "icon": "09d", + "id": 300, + "main": "Drizzle" + } + ], + "rain": { + "1h": 1.300, + "3h": 999 + }, + "wind": { + "deg": 290, + "speed": 8.7 + } + }, + { + "dt": 1544194800, + "id": 444, + "main": { + "humidity": 87, + "pressure": 1007, + "temp": 9.25 + }, + "name": "Paris", + "sys": { + "country": "FR", + "id": 6550, + "message": 0.002, + "sunrise": 1544167818, + "sunset": 1544198047, + "type": 1 + }, + "visibility": 10000, + "weather": [ + { + "description": "light intensity drizzle", + "icon": "09d", + "id": 300, + "main": "Drizzle" + } + ], + "wind": { + "deg": 290, + "speed": 8.7 + } + }] +} +` +const batchWeatherResponse = ` +{ + "cnt": 3, + "list": [{ + "coord": { + "lon": 37.62, + "lat": 55.75 + }, + "sys": { + "type": 1, + "id": 9029, + "message": 0.0061, + "country": "RU", + "sunrise": 1556416455, + "sunset": 1556470779 + }, + "weather": [{ + "id": 802, + "main": "Clouds", + "description": "scattered clouds", + "icon": "03d" + }], + "main": { + "temp": 9.57, + "pressure": 1014, + "humidity": 46 + }, + "visibility": 10000, + "wind": { + "speed": 5, + "deg": 60 + }, + "clouds": { + "all": 40 + }, + "dt": 1556444155, + "id": 524901, + "name": "Moscow" + }, { + "coord": { + "lon": 30.52, + "lat": 50.43 + }, + "sys": { + "type": 1, + "id": 8903, + "message": 0.0076, + "country": "UA", + "sunrise": 1556419155, + "sunset": 1556471486 + }, + "weather": [{ + "id": 520, + "main": "Rain", + "description": "light intensity shower rain", + "icon": "09d" + }], + "main": { + "temp": 19.29, + "pressure": 1009, + "humidity": 63 + }, + "visibility": 10000, + "wind": { + "speed": 1 + }, + "clouds": { + "all": 0 + }, + "dt": 1556444155, + "id": 703448, + "name": "Kiev" + }, { + "coord": { + "lon": -0.13, + "lat": 51.51 + }, + "sys": { + "type": 1, + "id": 1414, + "message": 0.0088, + "country": "GB", + "sunrise": 1556426319, + "sunset": 1556479032 + }, + "weather": [{ + "id": 803, + "main": "Clouds", + "description": "broken clouds", + "icon": "04d" + }], + "main": { + "temp": 10.62, + "pressure": 1019, + "humidity": 66 + }, + "visibility": 10000, + "wind": { + "speed": 6.2, + "deg": 290 + }, + "rain": { + "3h": 0.072 + }, + "clouds": { + "all": 75 + }, + "dt": 1556444155, + "id": 2643743, + "name": "London" + }] +} +` + +func TestForecastGeneratesMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + if r.URL.Path == "/data/2.5/forecast" { + rsp = sampleStatusResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else if r.URL.Path == "/data/2.5/group" { + rsp = sampleNoContent + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &OpenWeatherMap{ + BaseUrl: ts.URL, + AppId: "noappid", + CityId: []string{"2988507"}, + Fetch: []string{"weather", "forecast"}, + Units: "metric", + } + n.Init() + + var acc testutil.Accumulator + + err := n.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "2988507", + "forecast": "3h", + "city": "Paris", + "country": "FR", + "condition_id": "500", + "condition_main": "Rain", + }, + map[string]interface{}{ + "cloudiness": int64(88), + "humidity": int64(91), + "pressure": 1018.65, + "temperature": 6.71, + "rain": 0.035, + "wind_degrees": 228.501, + "wind_speed": 3.76, + "condition_description": "light rain", + "condition_icon": "10n", + }, + time.Unix(1543622400, 0), + ), + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "2988507", + "forecast": "6h", + "city": "Paris", + "country": "FR", + "condition_id": "500", + "condition_main": "Rain", + }, + map[string]interface{}{ + "cloudiness": int64(92), + "humidity": int64(98), + "pressure": 1032.18, + "temperature": 6.38, + "rain": 0.049999999999997, + "wind_degrees": 335.005, + "wind_speed": 2.66, + "condition_description": "light rain", + "condition_icon": "10n", + }, + time.Unix(1544043600, 0), + ), + } + + testutil.RequireMetricsEqual(t, + expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) +} + +func TestWeatherGeneratesMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + if r.URL.Path == "/data/2.5/group" { + rsp = groupWeatherResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else if r.URL.Path == "/data/2.5/forecast" { + rsp = sampleNoContent + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &OpenWeatherMap{ + BaseUrl: ts.URL, + AppId: "noappid", + CityId: []string{"2988507"}, + Fetch: []string{"weather"}, + Units: "metric", + } + n.Init() + + var acc testutil.Accumulator + + err := n.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "2988507", + "forecast": "*", + "city": "Paris", + "country": "FR", + "condition_id": "300", + "condition_main": "Drizzle", + }, + map[string]interface{}{ + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 0.0, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + "condition_description": "light intensity drizzle", + "condition_icon": "09d", + }, + time.Unix(1544194800, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +// Ensure that results containing "1h", "3h", both, or no rain values are parsed correctly +func TestRainMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + if r.URL.Path == "/data/2.5/group" { + rsp = rainWeatherResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &OpenWeatherMap{ + BaseUrl: ts.URL, + AppId: "noappid", + CityId: []string{"111", "222", "333", "444"}, + Fetch: []string{"weather"}, + Units: "metric", + } + n.Init() + + var acc testutil.Accumulator + + err := n.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + // City with 1h rain value + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "111", + "forecast": "*", + "city": "Paris", + "country": "FR", + "condition_id": "300", + "condition_main": "Drizzle", + }, + map[string]interface{}{ + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 1.0, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + "condition_description": "light intensity drizzle", + "condition_icon": "09d", + }, + time.Unix(1544194800, 0), + ), + // City with 3h rain value + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "222", + "forecast": "*", + "city": "Paris", + "country": "FR", + "condition_id": "300", + "condition_main": "Drizzle", + }, + map[string]interface{}{ + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 3.0, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + "condition_description": "light intensity drizzle", + "condition_icon": "09d", + }, + time.Unix(1544194800, 0), + ), + // City with both 1h and 3h rain values, prefer the 1h value + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "333", + "forecast": "*", + "city": "Paris", + "country": "FR", + "condition_id": "300", + "condition_main": "Drizzle", + }, + map[string]interface{}{ + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 1.3, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + "condition_description": "light intensity drizzle", + "condition_icon": "09d", + }, + time.Unix(1544194800, 0), + ), + // City with no rain values + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "444", + "forecast": "*", + "city": "Paris", + "country": "FR", + "condition_id": "300", + "condition_main": "Drizzle", + }, + map[string]interface{}{ + "cloudiness": int64(0), + "humidity": int64(87), + "pressure": 1007.0, + "temperature": 9.25, + "rain": 0.0, + "sunrise": int64(1544167818000000000), + "sunset": int64(1544198047000000000), + "wind_degrees": 290.0, + "wind_speed": 8.7, + "visibility": 10000, + "condition_description": "light intensity drizzle", + "condition_icon": "09d", + }, + time.Unix(1544194800, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +func TestBatchWeatherGeneratesMetrics(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + var rsp string + if r.URL.Path == "/data/2.5/group" { + rsp = batchWeatherResponse + w.Header()["Content-Type"] = []string{"application/json"} + } else if r.URL.Path == "/data/2.5/forecast" { + rsp = sampleNoContent + } else { + panic("Cannot handle request") + } + + fmt.Fprintln(w, rsp) + })) + defer ts.Close() + + n := &OpenWeatherMap{ + BaseUrl: ts.URL, + AppId: "noappid", + CityId: []string{"524901", "703448", "2643743"}, + Fetch: []string{"weather"}, + Units: "metric", + } + n.Init() + + var acc testutil.Accumulator + + err := n.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "524901", + "forecast": "*", + "city": "Moscow", + "country": "RU", + "condition_id": "802", + "condition_main": "Clouds", + }, + map[string]interface{}{ + "cloudiness": 40, + "humidity": int64(46), + "pressure": 1014.0, + "temperature": 9.57, + "wind_degrees": 60.0, + "wind_speed": 5.0, + "rain": 0.0, + "sunrise": int64(1556416455000000000), + "sunset": int64(1556470779000000000), + "visibility": 10000, + "condition_description": "scattered clouds", + "condition_icon": "03d", + }, + time.Unix(1556444155, 0), + ), + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "703448", + "forecast": "*", + "city": "Kiev", + "country": "UA", + "condition_id": "520", + "condition_main": "Rain", + }, + map[string]interface{}{ + "cloudiness": 0, + "humidity": int64(63), + "pressure": 1009.0, + "temperature": 19.29, + "wind_degrees": 0.0, + "wind_speed": 1.0, + "rain": 0.0, + "sunrise": int64(1556419155000000000), + "sunset": int64(1556471486000000000), + "visibility": 10000, + "condition_description": "light intensity shower rain", + "condition_icon": "09d", + }, + time.Unix(1556444155, 0), + ), + testutil.MustMetric( + "weather", + map[string]string{ + "city_id": "2643743", + "forecast": "*", + "city": "London", + "country": "GB", + "condition_id": "803", + "condition_main": "Clouds", + }, + map[string]interface{}{ + "cloudiness": 75, + "humidity": int64(66), + "pressure": 1019.0, + "temperature": 10.62, + "wind_degrees": 290.0, + "wind_speed": 6.2, + "rain": 0.072, + "sunrise": int64(1556426319000000000), + "sunset": int64(1556479032000000000), + "visibility": 10000, + "condition_description": "broken clouds", + "condition_icon": "04d", + }, + time.Unix(1556444155, 0), + ), + } + testutil.RequireMetricsEqual(t, + expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) +} + +func TestFormatURL(t *testing.T) { + n := &OpenWeatherMap{ + AppId: "appid", + Units: "units", + Lang: "lang", + BaseUrl: "http://foo.com", + } + n.Init() + + require.Equal(t, + "http://foo.com/data/2.5/forecast?APPID=appid&id=12345&lang=lang&units=units", + n.formatURL("/data/2.5/forecast", "12345")) +} + +func TestDefaultUnits(t *testing.T) { + n := &OpenWeatherMap{} + n.Init() + + require.Equal(t, "metric", n.Units) +} + +func TestDefaultLang(t *testing.T) { + n := &OpenWeatherMap{} + n.Init() + + require.Equal(t, "en", n.Lang) +} diff --git a/plugins/inputs/pf/README.md b/plugins/inputs/pf/README.md index 2e70de5b7..96a5ed488 100644 --- a/plugins/inputs/pf/README.md +++ b/plugins/inputs/pf/README.md @@ -1,8 +1,8 @@ # PF Plugin -The pf plugin gathers information from the FreeBSD/OpenBSD pf firewall. Currently it can retrive information about the state table: the number of current entries in the table, and counters for the number of searches, inserts, and removals to the table. +The pf plugin gathers information from the FreeBSD/OpenBSD pf firewall. Currently it can retrieve information about the state table: the number of current entries in the table, and counters for the number of searches, inserts, and removals to the table. -The pf plugin retrives this information by invoking the `pfstat` command. The `pfstat` command requires read access to the device file `/dev/pf`. You have several options to permit telegraf to run `pfctl`: +The pf plugin retrieves this information by invoking the `pfstat` command. The `pfstat` command requires read access to the device file `/dev/pf`. You have several options to permit telegraf to run `pfctl`: * Run telegraf as root. This is strongly discouraged. * Change the ownership and permissions for /dev/pf such that the user telegraf runs at can read the /dev/pf device file. This is probably not that good of an idea either. diff --git a/plugins/inputs/pf/pf.go b/plugins/inputs/pf/pf.go index 04b5f9d21..035c44fbe 100644 --- a/plugins/inputs/pf/pf.go +++ b/plugins/inputs/pf/pf.go @@ -72,11 +72,11 @@ type pfctlOutputStanza struct { } var pfctlOutputStanzas = []*pfctlOutputStanza{ - &pfctlOutputStanza{ + { HeaderRE: regexp.MustCompile("^State Table"), ParseFunc: parseStateTable, }, - &pfctlOutputStanza{ + { HeaderRE: regexp.MustCompile("^Counters"), ParseFunc: parseCounterTable, }, @@ -127,10 +127,10 @@ type Entry struct { } var StateTable = []*Entry{ - &Entry{"entries", "current entries", -1}, - &Entry{"searches", "searches", -1}, - &Entry{"inserts", "inserts", -1}, - &Entry{"removals", "removals", -1}, + {"entries", "current entries", -1}, + {"searches", "searches", -1}, + {"inserts", "inserts", -1}, + {"removals", "removals", -1}, } var stateTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) @@ -140,21 +140,21 @@ func parseStateTable(lines []string, fields map[string]interface{}) error { } var CounterTable = []*Entry{ - &Entry{"match", "match", -1}, - &Entry{"bad-offset", "bad-offset", -1}, - &Entry{"fragment", "fragment", -1}, - &Entry{"short", "short", -1}, - &Entry{"normalize", "normalize", -1}, - &Entry{"memory", "memory", -1}, - &Entry{"bad-timestamp", "bad-timestamp", -1}, - &Entry{"congestion", "congestion", -1}, - &Entry{"ip-option", "ip-option", -1}, - &Entry{"proto-cksum", "proto-cksum", -1}, - &Entry{"state-mismatch", "state-mismatch", -1}, - &Entry{"state-insert", "state-insert", -1}, - &Entry{"state-limit", "state-limit", -1}, - &Entry{"src-limit", "src-limit", -1}, - &Entry{"synproxy", "synproxy", -1}, + {"match", "match", -1}, + {"bad-offset", "bad-offset", -1}, + {"fragment", "fragment", -1}, + {"short", "short", -1}, + {"normalize", "normalize", -1}, + {"memory", "memory", -1}, + {"bad-timestamp", "bad-timestamp", -1}, + {"congestion", "congestion", -1}, + {"ip-option", "ip-option", -1}, + {"proto-cksum", "proto-cksum", -1}, + {"state-mismatch", "state-mismatch", -1}, + {"state-insert", "state-insert", -1}, + {"state-limit", "state-limit", -1}, + {"src-limit", "src-limit", -1}, + {"synproxy", "synproxy", -1}, } var counterTableRE = regexp.MustCompile(`^ (.*?)\s+(\d+)`) diff --git a/plugins/inputs/pf/pf_test.go b/plugins/inputs/pf/pf_test.go index 0b90d949a..af73d66ad 100644 --- a/plugins/inputs/pf/pf_test.go +++ b/plugins/inputs/pf/pf_test.go @@ -23,13 +23,13 @@ func TestPfctlInvocation(t *testing.T) { var testCases = []pfctlInvocationTestCase{ // 0: no sudo - pfctlInvocationTestCase{ + { config: PF{UseSudo: false}, cmd: "fakepfctl", args: []string{"-s", "info"}, }, // 1: with sudo - pfctlInvocationTestCase{ + { config: PF{UseSudo: true}, cmd: "fakesudo", args: []string{"fakepfctl", "-s", "info"}, @@ -60,9 +60,9 @@ func TestPfMeasurements(t *testing.T) { testCases := []pfTestCase{ // 0: nil input should raise an error - pfTestCase{TestInput: "", err: errParseHeader}, + {TestInput: "", err: errParseHeader}, // 1: changes to pfctl output should raise an error - pfTestCase{TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent + {TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent Interface Stats for re1 IPv4 IPv6 Bytes In 2585823744614 1059233657221 @@ -99,7 +99,7 @@ Counters err: errMissingData("current entries"), }, // 2: bad numbers should raise an error - pfTestCase{TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent + {TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent State Table Total Rate current entries -23 @@ -125,7 +125,7 @@ Counters `, err: errMissingData("current entries"), }, - pfTestCase{TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent + {TestInput: `Status: Enabled for 0 days 00:26:05 Debug: Urgent State Table Total Rate current entries 2 @@ -150,7 +150,7 @@ Counters synproxy 0 0.0/s `, measurements: []measurementResult{ - measurementResult{ + { fields: map[string]interface{}{ "entries": int64(2), "searches": int64(11325), @@ -175,7 +175,7 @@ Counters }, }, }, - pfTestCase{TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent + {TestInput: `Status: Enabled for 161 days 21:24:45 Debug: Urgent Interface Stats for re1 IPv4 IPv6 Bytes In 2585823744614 1059233657221 @@ -210,7 +210,7 @@ Counters synproxy 0 0.0/s `, measurements: []measurementResult{ - measurementResult{ + { fields: map[string]interface{}{ "entries": int64(649), "searches": int64(18421725761), diff --git a/plugins/inputs/pgbouncer/README.md b/plugins/inputs/pgbouncer/README.md new file mode 100644 index 000000000..53737a81a --- /dev/null +++ b/plugins/inputs/pgbouncer/README.md @@ -0,0 +1,82 @@ +# PgBouncer Input Plugin + +The `pgbouncer` plugin provides metrics for your PgBouncer load balancer. + +More information about the meaning of these metrics can be found in the +[PgBouncer Documentation](https://pgbouncer.github.io/usage.html). + +- PgBouncer minimum tested version: 1.5 + +### Configuration example + +```toml +[[inputs.pgbouncer]] + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@host:port[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + address = "host=localhost user=pgbouncer sslmode=disable" +``` + +#### `address` + +Specify address via a postgresql connection string: + + `host=/run/postgresql port=6432 user=telegraf database=pgbouncer` + +Or via an url matching: + + `postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=[disable|verify-ca|verify-full]` + +All connection parameters are optional. + +Without the dbname parameter, the driver will default to a database with the same name as the user. +This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. + +### Metrics + +- pgbouncer + - tags: + - db + - server + - fields: + - avg_query_count + - avg_query_time + - avg_wait_time + - avg_xact_count + - avg_xact_time + - total_query_count + - total_query_time + - total_received + - total_sent + - total_wait_time + - total_xact_count + - total_xact_time + ++ pgbouncer_pools + - tags: + - db + - pool_mode + - server + - user + - fields: + - cl_active + - cl_waiting + - maxwait + - maxwait_us + - sv_active + - sv_idle + - sv_login + - sv_tested + - sv_used + +### Example Output + +``` +pgbouncer,db=pgbouncer,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ avg_query_count=0i,avg_query_time=0i,avg_wait_time=0i,avg_xact_count=0i,avg_xact_time=0i,total_query_count=26i,total_query_time=0i,total_received=0i,total_sent=0i,total_wait_time=0i,total_xact_count=26i,total_xact_time=0i 1581569936000000000 +pgbouncer_pools,db=pgbouncer,pool_mode=statement,server=host\=debian-buster-postgres\ user\=dbn\ port\=6432\ dbname\=pgbouncer\ ,user=pgbouncer cl_active=1i,cl_waiting=0i,maxwait=0i,maxwait_us=0i,sv_active=0i,sv_idle=0i,sv_login=0i,sv_tested=0i,sv_used=0i 1581569936000000000 +``` diff --git a/plugins/inputs/pgbouncer/pgbouncer.go b/plugins/inputs/pgbouncer/pgbouncer.go new file mode 100644 index 000000000..0b8c8c16a --- /dev/null +++ b/plugins/inputs/pgbouncer/pgbouncer.go @@ -0,0 +1,201 @@ +package pgbouncer + +import ( + "bytes" + "strconv" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" + _ "github.com/jackc/pgx/stdlib" // register driver +) + +type PgBouncer struct { + postgresql.Service +} + +var ignoredColumns = map[string]bool{"user": true, "database": true, "pool_mode": true, + "avg_req": true, "avg_recv": true, "avg_sent": true, "avg_query": true, +} + +var sampleConfig = ` + ## specify address via a url matching: + ## postgres://[pqgotest[:password]]@localhost[/dbname]\ + ## ?sslmode=[disable|verify-ca|verify-full] + ## or a simple string: + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production + ## + ## All connection parameters are optional. + ## + address = "host=localhost user=pgbouncer sslmode=disable" +` + +func (p *PgBouncer) SampleConfig() string { + return sampleConfig +} + +func (p *PgBouncer) Description() string { + return "Read metrics from one or many pgbouncer servers" +} + +func (p *PgBouncer) Gather(acc telegraf.Accumulator) error { + var ( + err error + query string + columns []string + ) + + query = `SHOW STATS` + + rows, err := p.DB.Query(query) + if err != nil { + return err + } + + defer rows.Close() + + // grab the column information from the result + if columns, err = rows.Columns(); err != nil { + return err + } + + for rows.Next() { + tags, columnMap, err := p.accRow(rows, acc, columns) + + if err != nil { + return err + } + + fields := make(map[string]interface{}) + for col, val := range columnMap { + _, ignore := ignoredColumns[col] + if ignore { + continue + } + + switch v := (*val).(type) { + case int64: + // Integer fields are returned in pgbouncer 1.5 through 1.9 + fields[col] = v + case string: + // Integer fields are returned in pgbouncer 1.12 + integer, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return err + } + + fields[col] = integer + } + } + acc.AddFields("pgbouncer", fields, tags) + } + + err = rows.Err() + if err != nil { + return err + } + + query = `SHOW POOLS` + + poolRows, err := p.DB.Query(query) + if err != nil { + return err + } + + defer poolRows.Close() + + // grab the column information from the result + if columns, err = poolRows.Columns(); err != nil { + return err + } + + for poolRows.Next() { + tags, columnMap, err := p.accRow(poolRows, acc, columns) + if err != nil { + return err + } + + if user, ok := columnMap["user"]; ok { + if s, ok := (*user).(string); ok && s != "" { + tags["user"] = s + } + } + + if poolMode, ok := columnMap["pool_mode"]; ok { + if s, ok := (*poolMode).(string); ok && s != "" { + tags["pool_mode"] = s + } + } + + fields := make(map[string]interface{}) + for col, val := range columnMap { + _, ignore := ignoredColumns[col] + if !ignore { + fields[col] = *val + } + } + acc.AddFields("pgbouncer_pools", fields, tags) + } + + return poolRows.Err() +} + +type scanner interface { + Scan(dest ...interface{}) error +} + +func (p *PgBouncer) accRow(row scanner, acc telegraf.Accumulator, columns []string) (map[string]string, + map[string]*interface{}, error) { + var columnVars []interface{} + var dbname bytes.Buffer + + // this is where we'll store the column name with its *interface{} + columnMap := make(map[string]*interface{}) + + for _, column := range columns { + columnMap[column] = new(interface{}) + } + + // populate the array of interface{} with the pointers in the right order + for i := 0; i < len(columnMap); i++ { + columnVars = append(columnVars, columnMap[columns[i]]) + } + + // deconstruct array of variables and send to Scan + err := row.Scan(columnVars...) + + if err != nil { + return nil, nil, err + } + if columnMap["database"] != nil { + // extract the database name from the column map + dbname.WriteString((*columnMap["database"]).(string)) + } else { + dbname.WriteString("postgres") + } + + var tagAddress string + tagAddress, err = p.SanitizedAddress() + if err != nil { + return nil, nil, err + } + + // Return basic tags and the mapped columns + return map[string]string{"server": tagAddress, "db": dbname.String()}, columnMap, nil +} + +func init() { + inputs.Add("pgbouncer", func() telegraf.Input { + return &PgBouncer{ + Service: postgresql.Service{ + MaxIdle: 1, + MaxOpen: 1, + MaxLifetime: internal.Duration{ + Duration: 0, + }, + IsPgBouncer: true, + }, + } + }) +} diff --git a/plugins/inputs/pgbouncer/pgbouncer_test.go b/plugins/inputs/pgbouncer/pgbouncer_test.go new file mode 100644 index 000000000..44e28c7f3 --- /dev/null +++ b/plugins/inputs/pgbouncer/pgbouncer_test.go @@ -0,0 +1,66 @@ +package pgbouncer + +import ( + "fmt" + "github.com/influxdata/telegraf/plugins/inputs/postgresql" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" +) + +func TestPgBouncerGeneratesMetrics(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + p := &PgBouncer{ + Service: postgresql.Service{ + Address: fmt.Sprintf( + "host=%s user=pgbouncer password=pgbouncer dbname=pgbouncer port=6432 sslmode=disable", + testutil.GetLocalHost(), + ), + IsPgBouncer: true, + }, + } + + var acc testutil.Accumulator + require.NoError(t, p.Start(&acc)) + require.NoError(t, p.Gather(&acc)) + + intMetrics := []string{ + "total_requests", + "total_received", + "total_sent", + "total_query_time", + "avg_req", + "avg_recv", + "avg_sent", + "avg_query", + "cl_active", + "cl_waiting", + "sv_active", + "sv_idle", + "sv_used", + "sv_tested", + "sv_login", + "maxwait", + } + + int32Metrics := []string{} + + metricsCounted := 0 + + for _, metric := range intMetrics { + assert.True(t, acc.HasInt64Field("pgbouncer", metric)) + metricsCounted++ + } + + for _, metric := range int32Metrics { + assert.True(t, acc.HasInt32Field("pgbouncer", metric)) + metricsCounted++ + } + + assert.True(t, metricsCounted > 0) + assert.Equal(t, len(intMetrics)+len(int32Metrics), metricsCounted) +} diff --git a/plugins/inputs/phpfpm/README.md b/plugins/inputs/phpfpm/README.md index 531edae24..b31f4b7e4 100644 --- a/plugins/inputs/phpfpm/README.md +++ b/plugins/inputs/phpfpm/README.md @@ -19,6 +19,8 @@ Get phpfpm stats using either HTTP status page or fpm socket. ## "/var/run/php5-fpm.sock" ## or using a custom fpm status path: ## "/var/run/php5-fpm.sock:fpm-custom-status-path" + ## glob patterns are also supported: + ## "/var/run/php*.sock" ## ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: ## "fcgi://10.0.0.12:9000/status" @@ -27,6 +29,16 @@ Get phpfpm stats using either HTTP status page or fpm socket. ## Example of multiple gathering from local socket and remote host ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] urls = ["http://localhost/status"] + + ## Duration allowed to complete HTTP requests. + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ``` When using `unixsocket`, you have to ensure that telegraf runs on same diff --git a/plugins/inputs/phpfpm/fcgi_client.go b/plugins/inputs/phpfpm/fcgi_client.go index 5a4d20019..9b42d91bd 100644 --- a/plugins/inputs/phpfpm/fcgi_client.go +++ b/plugins/inputs/phpfpm/fcgi_client.go @@ -59,7 +59,7 @@ func (client *conn) Request( rec := &record{} var err1 error - // recive until EOF or FCGI_END_REQUEST + // receive until EOF or FCGI_END_REQUEST READ_LOOP: for { err1 = rec.read(client.rwc) diff --git a/plugins/inputs/phpfpm/phpfpm.go b/plugins/inputs/phpfpm/phpfpm.go index e40dae174..4bb6443ab 100644 --- a/plugins/inputs/phpfpm/phpfpm.go +++ b/plugins/inputs/phpfpm/phpfpm.go @@ -13,12 +13,16 @@ import ( "sync" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/globpath" + "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) const ( PF_POOL = "pool" PF_PROCESS_MANAGER = "process manager" + PF_START_SINCE = "start since" PF_ACCEPTED_CONN = "accepted conn" PF_LISTEN_QUEUE = "listen queue" PF_MAX_LISTEN_QUEUE = "max listen queue" @@ -35,7 +39,9 @@ type metric map[string]int64 type poolStat map[string]metric type phpfpm struct { - Urls []string + Urls []string + Timeout internal.Duration + tls.ClientConfig client *http.Client } @@ -58,9 +64,19 @@ var sampleConfig = ` ## "fcgi://10.0.0.12:9000/status" ## "cgi://10.0.10.12:9001/status" ## - ## Example of multiple gathering from local socket and remove host + ## Example of multiple gathering from local socket and remote host ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] urls = ["http://localhost/status"] + + ## Duration allowed to complete HTTP requests. + # timeout = "5s" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false ` func (r *phpfpm) SampleConfig() string { @@ -80,7 +96,12 @@ func (g *phpfpm) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup - for _, serv := range g.Urls { + urls, err := expandUrls(g.Urls) + if err != nil { + return err + } + + for _, serv := range urls { wg.Add(1) go func(serv string) { defer wg.Done() @@ -96,8 +117,17 @@ func (g *phpfpm) Gather(acc telegraf.Accumulator) error { // Request status page to get stat raw data and import it func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { if g.client == nil { - client := &http.Client{} - g.client = client + tlsCfg, err := g.ClientConfig.TLSConfig() + if err != nil { + return err + } + tr := &http.Transport{ + TLSClientConfig: tlsCfg, + } + g.client = &http.Client{ + Transport: tr, + Timeout: g.Timeout.Duration, + } } if strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") { @@ -129,18 +159,10 @@ func (g *phpfpm) gatherServer(addr string, acc telegraf.Accumulator) error { statusPath = "status" } } else { - socketAddr := strings.Split(addr, ":") - if len(socketAddr) >= 2 { - socketPath = socketAddr[0] - statusPath = socketAddr[1] - } else { - socketPath = socketAddr[0] + socketPath, statusPath = unixSocketPaths(addr) + if statusPath == "" { statusPath = "status" } - - if _, err := os.Stat(socketPath); os.IsNotExist(err) { - return fmt.Errorf("Socket doesn't exist '%s': %s", socketPath, err) - } fcgi, err = newFcgiClient("unix", socketPath) } @@ -219,7 +241,8 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) (poolStat, // Start to parse metric for current pool switch fieldName { - case PF_ACCEPTED_CONN, + case PF_START_SINCE, + PF_ACCEPTED_CONN, PF_LISTEN_QUEUE, PF_MAX_LISTEN_QUEUE, PF_LISTEN_QUEUE_LEN, @@ -252,6 +275,70 @@ func importMetric(r io.Reader, acc telegraf.Accumulator, addr string) (poolStat, return stats, nil } +func expandUrls(urls []string) ([]string, error) { + addrs := make([]string, 0, len(urls)) + for _, url := range urls { + if isNetworkURL(url) { + addrs = append(addrs, url) + continue + } + paths, err := globUnixSocket(url) + if err != nil { + return nil, err + } + addrs = append(addrs, paths...) + } + return addrs, nil +} + +func globUnixSocket(url string) ([]string, error) { + pattern, status := unixSocketPaths(url) + glob, err := globpath.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("could not compile glob %q: %v", pattern, err) + } + paths := glob.Match() + if len(paths) == 0 { + if _, err := os.Stat(paths[0]); err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Socket doesn't exist '%s': %s", pattern, err) + } + return nil, err + } + return nil, nil + } + + addrs := make([]string, 0, len(paths)) + + for _, path := range paths { + if status != "" { + path = path + ":" + status + } + addrs = append(addrs, path) + } + + return addrs, nil +} + +func unixSocketPaths(addr string) (string, string) { + var socketPath, statusPath string + + socketAddr := strings.Split(addr, ":") + if len(socketAddr) >= 2 { + socketPath = socketAddr[0] + statusPath = socketAddr[1] + } else { + socketPath = socketAddr[0] + statusPath = "" + } + + return socketPath, statusPath +} + +func isNetworkURL(addr string) bool { + return strings.HasPrefix(addr, "http://") || strings.HasPrefix(addr, "https://") || strings.HasPrefix(addr, "fcgi://") || strings.HasPrefix(addr, "cgi://") +} + func init() { inputs.Add("phpfpm", func() telegraf.Input { return &phpfpm{} diff --git a/plugins/inputs/phpfpm/phpfpm_test.go b/plugins/inputs/phpfpm/phpfpm_test.go index ba24b0f36..e7e36c360 100644 --- a/plugins/inputs/phpfpm/phpfpm_test.go +++ b/plugins/inputs/phpfpm/phpfpm_test.go @@ -44,6 +44,7 @@ func TestPhpFpmGeneratesMetrics_From_Http(t *testing.T) { } fields := map[string]interface{}{ + "start_since": int64(1991), "accepted_conn": int64(3), "listen_queue": int64(1), "max_listen_queue": int64(0), @@ -85,6 +86,7 @@ func TestPhpFpmGeneratesMetrics_From_Fcgi(t *testing.T) { } fields := map[string]interface{}{ + "start_since": int64(1991), "accepted_conn": int64(3), "listen_queue": int64(1), "max_listen_queue": int64(0), @@ -130,6 +132,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { } fields := map[string]interface{}{ + "start_since": int64(1991), "accepted_conn": int64(3), "listen_queue": int64(1), "max_listen_queue": int64(0), @@ -145,6 +148,71 @@ func TestPhpFpmGeneratesMetrics_From_Socket(t *testing.T) { acc.AssertContainsTaggedFields(t, "phpfpm", fields, tags) } +func TestPhpFpmGeneratesMetrics_From_Multiple_Sockets_With_Glob(t *testing.T) { + // Create a socket in /tmp because we always have write permission and if the + // removing of socket fail when system restart /tmp is clear so + // we don't have junk files around + var randomNumber int64 + binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + socket1 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber) + tcp1, err := net.Listen("unix", socket1) + if err != nil { + t.Fatal("Cannot initialize server on port ") + } + defer tcp1.Close() + + binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + socket2 := fmt.Sprintf("/tmp/test-fpm%d.sock", randomNumber) + tcp2, err := net.Listen("unix", socket2) + if err != nil { + t.Fatal("Cannot initialize server on port ") + } + defer tcp2.Close() + + s := statServer{} + go fcgi.Serve(tcp1, s) + go fcgi.Serve(tcp2, s) + + r := &phpfpm{ + Urls: []string{"/tmp/test-fpm[\\-0-9]*.sock"}, + } + + var acc1, acc2 testutil.Accumulator + + err = acc1.GatherError(r.Gather) + require.NoError(t, err) + + err = acc2.GatherError(r.Gather) + require.NoError(t, err) + + tags1 := map[string]string{ + "pool": "www", + "url": socket1, + } + + tags2 := map[string]string{ + "pool": "www", + "url": socket2, + } + + fields := map[string]interface{}{ + "start_since": int64(1991), + "accepted_conn": int64(3), + "listen_queue": int64(1), + "max_listen_queue": int64(0), + "listen_queue_len": int64(0), + "idle_processes": int64(1), + "active_processes": int64(1), + "total_processes": int64(2), + "max_active_processes": int64(1), + "max_children_reached": int64(2), + "slow_requests": int64(1), + } + + acc1.AssertContainsTaggedFields(t, "phpfpm", fields, tags1) + acc2.AssertContainsTaggedFields(t, "phpfpm", fields, tags2) +} + func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { // Create a socket in /tmp because we always have write permission. If the // removing of socket fail we won't have junk files around. Cuz when system @@ -175,6 +243,7 @@ func TestPhpFpmGeneratesMetrics_From_Socket_Custom_Status_Path(t *testing.T) { } fields := map[string]interface{}{ + "start_since": int64(1991), "accepted_conn": int64(3), "listen_queue": int64(1), "max_listen_queue": int64(0), @@ -211,7 +280,8 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Fpm_Status_Is_Not_Responding(t err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone': Get http://aninvalidone: dial tcp: lookup aninvalidone`) + assert.Contains(t, err.Error(), `Unable to connect to phpfpm status page 'http://aninvalidone'`) + assert.Contains(t, err.Error(), `lookup aninvalidone`) } func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testing.T) { @@ -223,7 +293,7 @@ func TestPhpFpmGeneratesMetrics_Throw_Error_When_Socket_Path_Is_Invalid(t *testi err := acc.GatherError(r.Gather) require.Error(t, err) - assert.Equal(t, `Socket doesn't exist '/tmp/invalid.sock': stat /tmp/invalid.sock: no such file or directory`, err.Error()) + assert.Equal(t, `dial unix /tmp/invalid.sock: connect: no such file or directory`, err.Error()) } diff --git a/plugins/inputs/ping/README.md b/plugins/inputs/ping/README.md index eadc60ab7..91af1b2ae 100644 --- a/plugins/inputs/ping/README.md +++ b/plugins/inputs/ping/README.md @@ -1,54 +1,174 @@ -# Ping Input plugin +# Ping Input Plugin -This input plugin will measures the round-trip +Sends a ping message by executing the system ping command and reports the results. + +This plugin has two main methods of operation: `exec` and `native`. The +recommended method is `native`, which has greater system compatibility and +performance. However, for backwards compatibility the `exec` method is the +default. + +When using `method = "exec"`, the systems ping utility is executed to send the +ping packets. + +Most ping command implementations are supported, one notable exception being +that there is currently no support for GNU Inetutils ping. You may instead use +the iputils-ping implementation: +``` +apt-get install iputils-ping +``` + +When using `method = "native"` a ping is sent and the results are reported in +native Go by the Telegraf process, eliminating the need to execute the system +`ping` command. ### Configuration: -``` -# NOTE: this plugin forks the ping command. You may need to set capabilities -# via setcap cap_net_raw+p /bin/ping +```toml [[inputs.ping]] -## List of urls to ping -urls = ["www.google.com"] # required -## number of pings to send per collection (ping -c ) -# count = 1 -## interval, in s, at which to ping. 0 == default (ping -i ) -## Not available in Windows. -# ping_interval = 1.0 -## per-ping timeout, in s. 0 == no timeout (ping -W ) -# timeout = 1.0 -## total-ping deadline, in s. 0 == no deadline (ping -w ) -# deadline = 10 -## interface or source address to send ping from (ping -I ) -## on Darwin and Freebsd only source address possible: (ping -S ) -# interface = "" + ## Hosts to send ping packets to. + urls = ["example.org"] + + ## Method used for sending pings, can be either "exec" or "native". When set + ## to "exec" the systems ping command will be executed. When set to "native" + ## the plugin will send pings directly. + ## + ## While the default is "exec" for backwards compatibility, new deployments + ## are encouraged to use the "native" method for improved compatibility and + ## performance. + # method = "exec" + + ## Number of ping packets to send per interval. Corresponds to the "-c" + ## option of the ping command. + # count = 1 + + ## Time to wait between sending ping packets in seconds. Operates like the + ## "-i" option of the ping command. + # ping_interval = 1.0 + + ## If set, the time to wait for a ping response in seconds. Operates like + ## the "-W" option of the ping command. + # timeout = 1.0 + + ## If set, the total ping deadline, in seconds. Operates like the -w option + ## of the ping command. + # deadline = 10 + + ## Interface or source address to send ping from. Operates like the -I or -S + ## option of the ping command. + # interface = "" + + ## Specify the ping executable binary. + # binary = "ping" + + ## Arguments for ping command. When arguments is not empty, the command from + ## the binary option will be used and other options (ping_interval, timeout, + ## etc) will be ignored. + # arguments = ["-c", "3"] + + ## Use only IPv6 addresses when resolving a hostname. + # ipv6 = false ``` -### Measurements & Fields: +#### File Limit -- packets_transmitted ( from ping output ) -- reply_received ( increasing only on valid metric from echo replay, eg. 'Destination net unreachable' reply will increment packets_received but not reply_received ) -- packets_received ( from ping output ) -- percent_reply_loss ( compute from packets_transmitted and reply_received ) -- percent_packets_loss ( compute from packets_transmitted and packets_received ) -- errors ( when host can not be found or wrong parameters is passed to application ) -- response time - - average_response_ms ( compute from minimum_response_ms and maximum_response_ms ) - - minimum_response_ms ( from ping output ) - - maximum_response_ms ( from ping output ) -- result_code - - 0: success - - 1: no such host +Since this plugin runs the ping command, it may need to open multiple files per +host. The number of files used is lessened with the `native` option but still +many files are used. With a large host list you may receive a `too many open +files` error. -### Tags: +To increase this limit on platforms using systemd the recommended method is to +use the "drop-in directory", usually located at +`/etc/systemd/system/telegraf.service.d`. -- host -- url +You can create or edit a drop-in file in the correct location using: +```sh +$ systemctl edit telegraf +``` -### Example Output: +Increase the number of open files: +```ini +[Service] +LimitNOFILE=8192 +``` + +Restart Telegraf: +```sh +$ systemctl edit telegraf +``` + +#### Linux Permissions + +When using `method = "native"`, Telegraf will attempt to use privileged raw +ICMP sockets. On most systems, doing so requires `CAP_NET_RAW` capabilities. + +With systemd: +```sh +$ systemctl edit telegraf +``` +```ini +[Service] +CapabilityBoundingSet=CAP_NET_RAW +AmbientCapabilities=CAP_NET_RAW +``` +```sh +$ systemctl restart telegraf +``` + +Without systemd: +```sh +$ setcap cap_net_raw=eip /usr/bin/telegraf +``` + +Reference [`man 7 capabilities`][man 7 capabilities] for more information about +setting capabilities. + +[man 7 capabilities]: http://man7.org/linux/man-pages/man7/capabilities.7.html + +When Telegraf cannot listen on a privileged ICMP socket it will attempt to use +ICMP echo sockets. If you wish to use this method you must ensure Telegraf's +group, usually `telegraf`, is allowed to use ICMP echo sockets: + +```sh +$ sysctl -w net.ipv4.ping_group_range="GROUP_ID_LOW GROUP_ID_HIGH" +``` + +Reference [`man 7 icmp`][man 7 icmp] for more information about ICMP echo +sockets and the `ping_group_range` setting. + +[man 7 icmp]: http://man7.org/linux/man-pages/man7/icmp.7.html + +### Metrics + +- ping + - tags: + - url + - fields: + - packets_transmitted (integer) + - packets_received (integer) + - percent_packet_loss (float) + - ttl (integer, Not available on Windows) + - average_response_ms (integer) + - minimum_response_ms (integer) + - maximum_response_ms (integer) + - standard_deviation_ms (integer, Available on Windows only with native ping) + - errors (float, Windows only) + - reply_received (integer, Windows with method = "exec" only) + - percent_reply_loss (float, Windows with method = "exec" only) + - result_code (int, success = 0, no such host = 1, ping error = 2) + +##### reply_received vs packets_received + +On Windows systems with `method = "exec"`, the "Destination net unreachable" reply will increment `packets_received` but not `reply_received`*. + +##### ttl + +There is currently no support for TTL on windows with `"native"`; track +progress at https://github.com/golang/go/issues/7175 and +https://github.com/golang/go/issues/7174 + + +### Example Output ``` -$ ./telegraf --config telegraf.conf --input-filter ping --test -* Plugin: ping, Collection 1 -ping,host=WIN-PBAPLP511R7,url=www.google.com result_code=0i,average_response_ms=7i,maximum_response_ms=9i,minimum_response_ms=7i,packets_received=4i,packets_transmitted=4i,percent_packet_loss=0,percent_reply_loss=0,reply_received=4i 1469879119000000000 +ping,url=example.org average_response_ms=23.066,ttl=63,maximum_response_ms=24.64,minimum_response_ms=22.451,packets_received=5i,packets_transmitted=5i,percent_packet_loss=0,result_code=0i,standard_deviation_ms=0.809 1535747258000000000 ``` diff --git a/plugins/inputs/ping/ping.go b/plugins/inputs/ping/ping.go index 492474786..008cfceac 100644 --- a/plugins/inputs/ping/ping.go +++ b/plugins/inputs/ping/ping.go @@ -1,19 +1,18 @@ -// +build !windows - package ping import ( + "context" "errors" - "fmt" + "log" + "math" "net" "os/exec" "runtime" - "strconv" "strings" "sync" - "syscall" "time" + "github.com/glinton/ping" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" @@ -22,16 +21,22 @@ import ( // HostPinger is a function that runs the "ping" function using a list of // passed arguments. This can be easily switched with a mocked ping function // for unit test purposes (see ping_test.go) -type HostPinger func(timeout float64, args ...string) (string, error) +type HostPinger func(binary string, timeout float64, args ...string) (string, error) + +type HostResolver func(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) + +type IsCorrectNetwork func(ip net.IPAddr) bool type Ping struct { + wg sync.WaitGroup + // Interval at which to ping (ping -i ) PingInterval float64 `toml:"ping_interval"` // Number of pings to send (ping -c ) Count int - // Ping timeout, in seconds. 0 means no timeout (ping -W ) + // Per-ping timeout, in seconds. 0 means no timeout (ping -W ) Timeout float64 // Ping deadline, in seconds. 0 means no deadline. (ping -w ) @@ -43,120 +48,143 @@ type Ping struct { // URLs to ping Urls []string + // Method defines how to ping (native or exec) + Method string + + // Ping executable binary + Binary string + + // Arguments for ping command. When arguments is not empty, system binary will be used and + // other options (ping_interval, timeout, etc) will be ignored + Arguments []string + + // Whether to resolve addresses using ipv6 or not. + IPv6 bool + // host ping function pingHost HostPinger + + // resolve host function + resolveHost HostResolver + + // listenAddr is the address associated with the interface defined. + listenAddr string } -func (_ *Ping) Description() string { +func (*Ping) Description() string { return "Ping given url(s) and return statistics" } const sampleConfig = ` - ## NOTE: this plugin forks the ping command. You may need to set capabilities - ## via setcap cap_net_raw+p /bin/ping - # - ## List of urls to ping - urls = ["www.google.com"] # required - ## number of pings to send per collection (ping -c ) + ## Hosts to send ping packets to. + urls = ["example.org"] + + ## Method used for sending pings, can be either "exec" or "native". When set + ## to "exec" the systems ping command will be executed. When set to "native" + ## the plugin will send pings directly. + ## + ## While the default is "exec" for backwards compatibility, new deployments + ## are encouraged to use the "native" method for improved compatibility and + ## performance. + # method = "exec" + + ## Number of ping packets to send per interval. Corresponds to the "-c" + ## option of the ping command. # count = 1 - ## interval, in s, at which to ping. 0 == default (ping -i ) + + ## Time to wait between sending ping packets in seconds. Operates like the + ## "-i" option of the ping command. # ping_interval = 1.0 - ## per-ping timeout, in s. 0 == no timeout (ping -W ) + + ## If set, the time to wait for a ping response in seconds. Operates like + ## the "-W" option of the ping command. # timeout = 1.0 - ## total-ping deadline, in s. 0 == no deadline (ping -w ) + + ## If set, the total ping deadline, in seconds. Operates like the -w option + ## of the ping command. # deadline = 10 - ## interface or source address to send ping from (ping -I ) - ## on Darwin and Freebsd only source address possible: (ping -S ) + + ## Interface or source address to send ping from. Operates like the -I or -S + ## option of the ping command. # interface = "" + + ## Specify the ping executable binary. + # binary = "ping" + + ## Arguments for ping command. When arguments is not empty, the command from + ## the binary option will be used and other options (ping_interval, timeout, + ## etc) will be ignored. + # arguments = ["-c", "3"] + + ## Use only IPv6 addresses when resolving a hostname. + # ipv6 = false ` -func (_ *Ping) SampleConfig() string { +func (*Ping) SampleConfig() string { return sampleConfig } func (p *Ping) Gather(acc telegraf.Accumulator) error { - - var wg sync.WaitGroup - - // Spin off a go routine for each url to ping - for _, url := range p.Urls { - wg.Add(1) - go func(u string) { - defer wg.Done() - tags := map[string]string{"url": u} - fields := map[string]interface{}{"result_code": 0} - - _, err := net.LookupHost(u) - if err != nil { - acc.AddError(err) - fields["result_code"] = 1 - acc.AddFields("ping", fields, tags) - return - } - - args := p.args(u) - totalTimeout := float64(p.Count)*p.Timeout + float64(p.Count-1)*p.PingInterval - - out, err := p.pingHost(totalTimeout, args...) - if err != nil { - // Some implementations of ping return a 1 exit code on - // timeout, if this occurs we will not exit and try to parse - // the output. - status := -1 - if exitError, ok := err.(*exec.ExitError); ok { - if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { - status = ws.ExitStatus() - } - } - - if status != 1 { - // Combine go err + stderr output - out = strings.TrimSpace(out) - if len(out) > 0 { - acc.AddError(fmt.Errorf("host %s: %s, %s", u, out, err)) - } else { - acc.AddError(fmt.Errorf("host %s: %s", u, err)) - } - acc.AddFields("ping", fields, tags) - return - } - } - - trans, rec, min, avg, max, stddev, err := processPingOutput(out) - if err != nil { - // fatal error - acc.AddError(fmt.Errorf("%s: %s", err, u)) - acc.AddFields("ping", fields, tags) - return - } - // Calculate packet loss percentage - loss := float64(trans-rec) / float64(trans) * 100.0 - fields["packets_transmitted"] = trans - fields["packets_received"] = rec - fields["percent_packet_loss"] = loss - if min >= 0 { - fields["minimum_response_ms"] = min - } - if avg >= 0 { - fields["average_response_ms"] = avg - } - if max >= 0 { - fields["maximum_response_ms"] = max - } - if stddev >= 0 { - fields["standard_deviation_ms"] = stddev - } - acc.AddFields("ping", fields, tags) - }(url) + if p.Interface != "" && p.listenAddr == "" { + p.listenAddr = getAddr(p.Interface) } - wg.Wait() + for _, host := range p.Urls { + p.wg.Add(1) + go func(host string) { + defer p.wg.Done() + + switch p.Method { + case "native": + p.pingToURLNative(host, acc) + default: + p.pingToURL(host, acc) + } + }(host) + } + + p.wg.Wait() return nil } -func hostPinger(timeout float64, args ...string) (string, error) { - bin, err := exec.LookPath("ping") +func getAddr(iface string) string { + if addr := net.ParseIP(iface); addr != nil { + return addr.String() + } + + ifaces, err := net.Interfaces() + if err != nil { + return "" + } + + var ip net.IP + for i := range ifaces { + if ifaces[i].Name == iface { + addrs, err := ifaces[i].Addrs() + if err != nil { + return "" + } + if len(addrs) > 0 { + switch v := addrs[0].(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + if len(ip) == 0 { + return "" + } + return ip.String() + } + } + } + + return "" +} + +func hostPinger(binary string, timeout float64, args ...string) (string, error) { + bin, err := exec.LookPath(binary) if err != nil { return "", err } @@ -166,115 +194,245 @@ func hostPinger(timeout float64, args ...string) (string, error) { return string(out), err } -// args returns the arguments for the 'ping' executable -func (p *Ping) args(url string) []string { - // Build the ping command args based on toml config - args := []string{"-c", strconv.Itoa(p.Count), "-n", "-s", "16"} - if p.PingInterval > 0 { - args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', -1, 64)) - } - if p.Timeout > 0 { - switch runtime.GOOS { - case "darwin", "freebsd", "netbsd", "openbsd": - args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) - case "linux": - args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64)) - default: - // Not sure the best option here, just assume GNU ping? - args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64)) +func filterIPs(addrs []net.IPAddr, filterFunc IsCorrectNetwork) []net.IPAddr { + n := 0 + for _, x := range addrs { + if filterFunc(x) { + addrs[n] = x + n++ } } - if p.Deadline > 0 { - switch runtime.GOOS { - case "darwin", "freebsd", "netbsd", "openbsd": - args = append(args, "-t", strconv.Itoa(p.Deadline)) - case "linux": - args = append(args, "-w", strconv.Itoa(p.Deadline)) - default: - // Not sure the best option here, just assume GNU ping? - args = append(args, "-w", strconv.Itoa(p.Deadline)) - } - } - if p.Interface != "" { - switch runtime.GOOS { - case "darwin", "freebsd", "netbsd", "openbsd": - args = append(args, "-S", p.Interface) - case "linux": - args = append(args, "-I", p.Interface) - default: - // Not sure the best option here, just assume GNU ping? - args = append(args, "-I", p.Interface) - } - } - args = append(args, url) - return args + return addrs[:n] } -// processPingOutput takes in a string output from the ping command, like: -// -// PING www.google.com (173.194.115.84): 56 data bytes -// 64 bytes from 173.194.115.84: icmp_seq=0 ttl=54 time=52.172 ms -// 64 bytes from 173.194.115.84: icmp_seq=1 ttl=54 time=34.843 ms -// -// --- www.google.com ping statistics --- -// 2 packets transmitted, 2 packets received, 0.0% packet loss -// round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms -// -// It returns (, , ) -func processPingOutput(out string) (int, int, float64, float64, float64, float64, error) { - var trans, recv int - var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0 - // Set this error to nil if we find a 'transmitted' line - err := errors.New("Fatal error processing ping output") - lines := strings.Split(out, "\n") - for _, line := range lines { - if strings.Contains(line, "transmitted") && - strings.Contains(line, "received") { - stats := strings.Split(line, ", ") - // Transmitted packets - trans, err = strconv.Atoi(strings.Split(stats[0], " ")[0]) - if err != nil { - return trans, recv, min, avg, max, stddev, err +func hostResolver(ctx context.Context, ipv6 bool, destination string) (*net.IPAddr, error) { + resolver := &net.Resolver{} + ips, err := resolver.LookupIPAddr(ctx, destination) + + if err != nil { + return nil, err + } + + if ipv6 { + ips = filterIPs(ips, isV6) + } else { + ips = filterIPs(ips, isV4) + } + + if len(ips) == 0 { + return nil, errors.New("Cannot resolve ip address") + } + return &ips[0], err +} + +func isV4(ip net.IPAddr) bool { + return ip.IP.To4() != nil +} + +func isV6(ip net.IPAddr) bool { + return !isV4(ip) +} + +func (p *Ping) pingToURLNative(destination string, acc telegraf.Accumulator) { + ctx := context.Background() + interval := p.PingInterval + if interval < 0.2 { + interval = 0.2 + } + + timeout := p.Timeout + if timeout == 0 { + timeout = 5 + } + + tick := time.NewTicker(time.Duration(interval * float64(time.Second))) + defer tick.Stop() + + if p.Deadline > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(p.Deadline)*time.Second) + defer cancel() + } + + host, err := p.resolveHost(ctx, p.IPv6, destination) + if err != nil { + acc.AddFields( + "ping", + map[string]interface{}{"result_code": 1}, + map[string]string{"url": destination}, + ) + acc.AddError(err) + return + } + + resps := make(chan *ping.Response) + rsps := []*ping.Response{} + + r := &sync.WaitGroup{} + r.Add(1) + go func() { + for res := range resps { + rsps = append(rsps, res) + } + r.Done() + }() + + wg := &sync.WaitGroup{} + c := ping.Client{} + + var doErr error + var packetsSent int + + type sentReq struct { + err error + sent bool + } + sents := make(chan sentReq) + + r.Add(1) + go func() { + for sent := range sents { + if sent.err != nil { + doErr = sent.err } - // Received packets - recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0]) - if err != nil { - return trans, recv, min, avg, max, stddev, err - } - } else if strings.Contains(line, "min/avg/max") { - stats := strings.Split(line, " ")[3] - data := strings.Split(stats, "/") - min, err = strconv.ParseFloat(data[0], 64) - if err != nil { - return trans, recv, min, avg, max, stddev, err - } - avg, err = strconv.ParseFloat(data[1], 64) - if err != nil { - return trans, recv, min, avg, max, stddev, err - } - max, err = strconv.ParseFloat(data[2], 64) - if err != nil { - return trans, recv, min, avg, max, stddev, err - } - if len(data) == 4 { - stddev, err = strconv.ParseFloat(data[3], 64) - if err != nil { - return trans, recv, min, avg, max, stddev, err - } + if sent.sent { + packetsSent++ } } + r.Done() + }() + + for i := 0; i < p.Count; i++ { + select { + case <-ctx.Done(): + goto finish + case <-tick.C: + ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout*float64(time.Second))) + defer cancel() + + wg.Add(1) + go func(seq int) { + defer wg.Done() + resp, err := c.Do(ctx, &ping.Request{ + Dst: net.ParseIP(host.String()), + Src: net.ParseIP(p.listenAddr), + Seq: seq, + }) + + sent := sentReq{err: err, sent: true} + if err != nil { + if strings.Contains(err.Error(), "not permitted") { + sent.sent = false + } + sents <- sent + return + } + + resps <- resp + sents <- sent + }(i + 1) + } } - return trans, recv, min, avg, max, stddev, err + +finish: + wg.Wait() + close(resps) + close(sents) + + r.Wait() + + if doErr != nil && strings.Contains(doErr.Error(), "not permitted") { + log.Printf("D! [inputs.ping] %s", doErr.Error()) + } + + tags, fields := onFin(packetsSent, rsps, doErr, destination) + acc.AddFields("ping", fields, tags) +} + +func onFin(packetsSent int, resps []*ping.Response, err error, destination string) (map[string]string, map[string]interface{}) { + packetsRcvd := len(resps) + + tags := map[string]string{"url": destination} + fields := map[string]interface{}{ + "result_code": 0, + "packets_transmitted": packetsSent, + "packets_received": packetsRcvd, + } + + if packetsSent == 0 { + if err != nil { + fields["result_code"] = 2 + } + return tags, fields + } + + if packetsRcvd == 0 { + if err != nil { + fields["result_code"] = 1 + } + fields["percent_packet_loss"] = float64(100) + return tags, fields + } + + fields["percent_packet_loss"] = float64(packetsSent-packetsRcvd) / float64(packetsSent) * 100 + ttl := resps[0].TTL + + var min, max, avg, total time.Duration + min = resps[0].RTT + max = resps[0].RTT + + for _, res := range resps { + if res.RTT < min { + min = res.RTT + } + if res.RTT > max { + max = res.RTT + } + total += res.RTT + } + + avg = total / time.Duration(packetsRcvd) + var sumsquares time.Duration + for _, res := range resps { + sumsquares += (res.RTT - avg) * (res.RTT - avg) + } + stdDev := time.Duration(math.Sqrt(float64(sumsquares / time.Duration(packetsRcvd)))) + + // Set TTL only on supported platform. See golang.org/x/net/ipv4/payload_cmsg.go + switch runtime.GOOS { + case "aix", "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + fields["ttl"] = ttl + } + + fields["minimum_response_ms"] = float64(min.Nanoseconds()) / float64(time.Millisecond) + fields["average_response_ms"] = float64(avg.Nanoseconds()) / float64(time.Millisecond) + fields["maximum_response_ms"] = float64(max.Nanoseconds()) / float64(time.Millisecond) + fields["standard_deviation_ms"] = float64(stdDev.Nanoseconds()) / float64(time.Millisecond) + + return tags, fields +} + +// Init ensures the plugin is configured correctly. +func (p *Ping) Init() error { + if p.Count < 1 { + return errors.New("bad number of packets to transmit") + } + + return nil } func init() { inputs.Add("ping", func() telegraf.Input { return &Ping{ pingHost: hostPinger, + resolveHost: hostResolver, PingInterval: 1.0, Count: 1, Timeout: 1.0, Deadline: 10, + Method: "exec", + Binary: "ping", + Arguments: []string{}, } }) } diff --git a/plugins/inputs/ping/ping_notwindows.go b/plugins/inputs/ping/ping_notwindows.go new file mode 100644 index 000000000..b39ffdd8f --- /dev/null +++ b/plugins/inputs/ping/ping_notwindows.go @@ -0,0 +1,212 @@ +// +build !windows + +package ping + +import ( + "errors" + "fmt" + "os/exec" + "regexp" + "runtime" + "strconv" + "strings" + "syscall" + + "github.com/influxdata/telegraf" +) + +func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { + tags := map[string]string{"url": u} + fields := map[string]interface{}{"result_code": 0} + + out, err := p.pingHost(p.Binary, 60.0, p.args(u, runtime.GOOS)...) + if err != nil { + // Some implementations of ping return a 1 exit code on + // timeout, if this occurs we will not exit and try to parse + // the output. + status := -1 + if exitError, ok := err.(*exec.ExitError); ok { + if ws, ok := exitError.Sys().(syscall.WaitStatus); ok { + status = ws.ExitStatus() + fields["result_code"] = status + } + } + + if status != 1 { + // Combine go err + stderr output + out = strings.TrimSpace(out) + if len(out) > 0 { + acc.AddError(fmt.Errorf("host %s: %s, %s", u, out, err)) + } else { + acc.AddError(fmt.Errorf("host %s: %s", u, err)) + } + fields["result_code"] = 2 + acc.AddFields("ping", fields, tags) + return + } + } + trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(out) + if err != nil { + // fatal error + acc.AddError(fmt.Errorf("%s: %s", err, u)) + fields["result_code"] = 2 + acc.AddFields("ping", fields, tags) + return + } + + // Calculate packet loss percentage + loss := float64(trans-rec) / float64(trans) * 100.0 + + fields["packets_transmitted"] = trans + fields["packets_received"] = rec + fields["percent_packet_loss"] = loss + if ttl >= 0 { + fields["ttl"] = ttl + } + if min >= 0 { + fields["minimum_response_ms"] = min + } + if avg >= 0 { + fields["average_response_ms"] = avg + } + if max >= 0 { + fields["maximum_response_ms"] = max + } + if stddev >= 0 { + fields["standard_deviation_ms"] = stddev + } + acc.AddFields("ping", fields, tags) +} + +// args returns the arguments for the 'ping' executable +func (p *Ping) args(url string, system string) []string { + if len(p.Arguments) > 0 { + return append(p.Arguments, url) + } + + // build the ping command args based on toml config + args := []string{"-c", strconv.Itoa(p.Count), "-n", "-s", "16"} + if p.PingInterval > 0 { + args = append(args, "-i", strconv.FormatFloat(p.PingInterval, 'f', -1, 64)) + } + if p.Timeout > 0 { + switch system { + case "darwin": + args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) + case "freebsd", "netbsd", "openbsd": + args = append(args, "-W", strconv.FormatFloat(p.Timeout*1000, 'f', -1, 64)) + case "linux": + args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64)) + default: + // Not sure the best option here, just assume GNU ping? + args = append(args, "-W", strconv.FormatFloat(p.Timeout, 'f', -1, 64)) + } + } + if p.Deadline > 0 { + switch system { + case "darwin", "freebsd", "netbsd", "openbsd": + args = append(args, "-t", strconv.Itoa(p.Deadline)) + case "linux": + args = append(args, "-w", strconv.Itoa(p.Deadline)) + default: + // not sure the best option here, just assume gnu ping? + args = append(args, "-w", strconv.Itoa(p.Deadline)) + } + } + if p.Interface != "" { + switch system { + case "darwin": + args = append(args, "-I", p.Interface) + case "freebsd", "netbsd", "openbsd": + args = append(args, "-S", p.Interface) + case "linux": + args = append(args, "-I", p.Interface) + default: + // not sure the best option here, just assume gnu ping? + args = append(args, "-i", p.Interface) + } + } + args = append(args, url) + return args +} + +// processPingOutput takes in a string output from the ping command, like: +// +// ping www.google.com (173.194.115.84): 56 data bytes +// 64 bytes from 173.194.115.84: icmp_seq=0 ttl=54 time=52.172 ms +// 64 bytes from 173.194.115.84: icmp_seq=1 ttl=54 time=34.843 ms +// +// --- www.google.com ping statistics --- +// 2 packets transmitted, 2 packets received, 0.0% packet loss +// round-trip min/avg/max/stddev = 34.843/43.508/52.172/8.664 ms +// +// It returns (, , ) +func processPingOutput(out string) (int, int, int, float64, float64, float64, float64, error) { + var trans, recv, ttl int = 0, 0, -1 + var min, avg, max, stddev float64 = -1.0, -1.0, -1.0, -1.0 + // Set this error to nil if we find a 'transmitted' line + err := errors.New("Fatal error processing ping output") + lines := strings.Split(out, "\n") + for _, line := range lines { + // Reading only first TTL, ignoring other TTL messages + if ttl == -1 && strings.Contains(line, "ttl=") { + ttl, err = getTTL(line) + } else if strings.Contains(line, "transmitted") && + strings.Contains(line, "received") { + trans, recv, err = getPacketStats(line, trans, recv) + if err != nil { + return trans, recv, ttl, min, avg, max, stddev, err + } + } else if strings.Contains(line, "min/avg/max") { + min, avg, max, stddev, err = checkRoundTripTimeStats(line, min, avg, max, stddev) + if err != nil { + return trans, recv, ttl, min, avg, max, stddev, err + } + } + } + return trans, recv, ttl, min, avg, max, stddev, err +} + +func getPacketStats(line string, trans, recv int) (int, int, error) { + stats := strings.Split(line, ", ") + // Transmitted packets + trans, err := strconv.Atoi(strings.Split(stats[0], " ")[0]) + if err != nil { + return trans, recv, err + } + // Received packets + recv, err = strconv.Atoi(strings.Split(stats[1], " ")[0]) + return trans, recv, err +} + +func getTTL(line string) (int, error) { + ttlLine := regexp.MustCompile(`ttl=(\d+)`) + ttlMatch := ttlLine.FindStringSubmatch(line) + return strconv.Atoi(ttlMatch[1]) +} + +func checkRoundTripTimeStats(line string, min, avg, max, + stddev float64) (float64, float64, float64, float64, error) { + stats := strings.Split(line, " ")[3] + data := strings.Split(stats, "/") + + min, err := strconv.ParseFloat(data[0], 64) + if err != nil { + return min, avg, max, stddev, err + } + avg, err = strconv.ParseFloat(data[1], 64) + if err != nil { + return min, avg, max, stddev, err + } + max, err = strconv.ParseFloat(data[2], 64) + if err != nil { + return min, avg, max, stddev, err + } + if len(data) == 4 { + stddev, err = strconv.ParseFloat(data[3], 64) + if err != nil { + return min, avg, max, stddev, err + } + } + return min, avg, max, stddev, err +} diff --git a/plugins/inputs/ping/ping_test.go b/plugins/inputs/ping/ping_test.go index 9817d07c6..d6f78bb79 100644 --- a/plugins/inputs/ping/ping_test.go +++ b/plugins/inputs/ping/ping_test.go @@ -3,14 +3,16 @@ package ping import ( + "context" "errors" + "net" "reflect" - "runtime" "sort" "testing" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // BSD/Darwin ping output @@ -61,8 +63,9 @@ ping: -i interval too short: Operation not permitted // Test that ping command output is processed properly func TestProcessPingOutput(t *testing.T) { - trans, rec, min, avg, max, stddev, err := processPingOutput(bsdPingOutput) + trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(bsdPingOutput) assert.NoError(t, err) + assert.Equal(t, 55, ttl, "ttl value is 55") assert.Equal(t, 5, trans, "5 packets were transmitted") assert.Equal(t, 5, rec, "5 packets were transmitted") assert.InDelta(t, 15.087, min, 0.001) @@ -70,8 +73,9 @@ func TestProcessPingOutput(t *testing.T) { assert.InDelta(t, 27.263, max, 0.001) assert.InDelta(t, 4.076, stddev, 0.001) - trans, rec, min, avg, max, stddev, err = processPingOutput(linuxPingOutput) + trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(linuxPingOutput) assert.NoError(t, err) + assert.Equal(t, 63, ttl, "ttl value is 63") assert.Equal(t, 5, trans, "5 packets were transmitted") assert.Equal(t, 5, rec, "5 packets were transmitted") assert.InDelta(t, 35.225, min, 0.001) @@ -79,8 +83,9 @@ func TestProcessPingOutput(t *testing.T) { assert.InDelta(t, 51.806, max, 0.001) assert.InDelta(t, 5.325, stddev, 0.001) - trans, rec, min, avg, max, stddev, err = processPingOutput(busyBoxPingOutput) + trans, rec, ttl, min, avg, max, stddev, err = processPingOutput(busyBoxPingOutput) assert.NoError(t, err) + assert.Equal(t, 56, ttl, "ttl value is 56") assert.Equal(t, 4, trans, "4 packets were transmitted") assert.Equal(t, 4, rec, "4 packets were transmitted") assert.InDelta(t, 15.810, min, 0.001) @@ -89,81 +94,87 @@ func TestProcessPingOutput(t *testing.T) { assert.InDelta(t, -1.0, stddev, 0.001) } +// Linux ping output with varying TTL +var linuxPingOutputWithVaryingTTL = ` +PING www.google.com (216.58.218.164) 56(84) bytes of data. +64 bytes from host.net (216.58.218.164): icmp_seq=1 ttl=63 time=35.2 ms +64 bytes from host.net (216.58.218.164): icmp_seq=2 ttl=255 time=42.3 ms +64 bytes from host.net (216.58.218.164): icmp_seq=3 ttl=64 time=45.1 ms +64 bytes from host.net (216.58.218.164): icmp_seq=4 ttl=64 time=43.5 ms +64 bytes from host.net (216.58.218.164): icmp_seq=5 ttl=255 time=51.8 ms + +--- www.google.com ping statistics --- +5 packets transmitted, 5 received, 0% packet loss, time 4010ms +rtt min/avg/max/mdev = 35.225/43.628/51.806/5.325 ms +` + +// Test that ping command output is processed properly +func TestProcessPingOutputWithVaryingTTL(t *testing.T) { + trans, rec, ttl, min, avg, max, stddev, err := processPingOutput(linuxPingOutputWithVaryingTTL) + assert.NoError(t, err) + assert.Equal(t, 63, ttl, "ttl value is 63") + assert.Equal(t, 5, trans, "5 packets were transmitted") + assert.Equal(t, 5, rec, "5 packets were transmitted") + assert.InDelta(t, 35.225, min, 0.001) + assert.InDelta(t, 43.628, avg, 0.001) + assert.InDelta(t, 51.806, max, 0.001) + assert.InDelta(t, 5.325, stddev, 0.001) +} + // Test that processPingOutput returns an error when 'ping' fails to run, such // as when an invalid argument is provided func TestErrorProcessPingOutput(t *testing.T) { - _, _, _, _, _, _, err := processPingOutput(fatalPingOutput) + _, _, _, _, _, _, _, err := processPingOutput(fatalPingOutput) assert.Error(t, err, "Error was expected from processPingOutput") } // Test that arg lists and created correctly func TestArgs(t *testing.T) { p := Ping{ - Count: 2, + Count: 2, + Interface: "eth0", + Timeout: 12.0, + Deadline: 24, + PingInterval: 1.2, } - // Actual and Expected arg lists must be sorted for reflect.DeepEqual - - actual := p.args("www.google.com") - expected := []string{"-c", "2", "-n", "-s", "16", "www.google.com"} - sort.Strings(actual) - sort.Strings(expected) - assert.True(t, reflect.DeepEqual(expected, actual), - "Expected: %s Actual: %s", expected, actual) - - p.Interface = "eth0" - actual = p.args("www.google.com") - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", - "www.google.com"} - sort.Strings(actual) - sort.Strings(expected) - assert.True(t, reflect.DeepEqual(expected, actual), - "Expected: %s Actual: %s", expected, actual) - - p.Timeout = 12.0 - actual = p.args("www.google.com") - switch runtime.GOOS { - case "darwin": - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12000.0", "www.google.com"} - default: - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12", "www.google.com"} + var systemCases = []struct { + system string + output []string + }{ + {"darwin", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12000", "-t", "24", "-I", "eth0", "www.google.com"}}, + {"linux", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12", "-w", "24", "-I", "eth0", "www.google.com"}}, + {"anything else", []string{"-c", "2", "-n", "-s", "16", "-i", "1.2", "-W", "12", "-w", "24", "-i", "eth0", "www.google.com"}}, } - - p.Deadline = 24 - actual = p.args("www.google.com") - switch runtime.GOOS { - case "darwin": - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12000.0", "-t", "24", "www.google.com"} - default: - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12", "-w", "24", "www.google.com"} + for i := range systemCases { + actual := p.args("www.google.com", systemCases[i].system) + expected := systemCases[i].output + sort.Strings(actual) + sort.Strings(expected) + require.True(t, reflect.DeepEqual(expected, actual), + "Expected: %s Actual: %s", expected, actual) } - - sort.Strings(actual) - sort.Strings(expected) - assert.True(t, reflect.DeepEqual(expected, actual), - "Expected: %s Actual: %s", expected, actual) - - p.PingInterval = 1.2 - actual = p.args("www.google.com") - switch runtime.GOOS { - case "darwin": - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12000.0", "-t", "24", "-i", "1.2", "www.google.com"} - default: - expected = []string{"-c", "2", "-n", "-s", "16", "-I", "eth0", "-W", - "12", "-w", "24", "-i", "1.2", "www.google.com"} - } - sort.Strings(actual) - sort.Strings(expected) - assert.True(t, reflect.DeepEqual(expected, actual), - "Expected: %s Actual: %s", expected, actual) } -func mockHostPinger(timeout float64, args ...string) (string, error) { +func TestArguments(t *testing.T) { + arguments := []string{"-c", "3"} + expected := append(arguments, "www.google.com") + p := Ping{ + Count: 2, + Interface: "eth0", + Timeout: 12.0, + Deadline: 24, + PingInterval: 1.2, + Arguments: arguments, + } + + for _, system := range []string{"darwin", "linux", "anything else"} { + actual := p.args("www.google.com", system) + require.True(t, reflect.DeepEqual(actual, expected), "Expected: %s Actual: %s", expected, actual) + } +} + +func mockHostPinger(binary string, timeout float64, args ...string) (string, error) { return linuxPingOutput, nil } @@ -171,16 +182,17 @@ func mockHostPinger(timeout float64, args ...string) (string, error) { func TestPingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ - Urls: []string{"www.google.com", "www.reddit.com"}, + Urls: []string{"localhost", "influxdata.com"}, pingHost: mockHostPinger, } acc.GatherError(p.Gather) - tags := map[string]string{"url": "www.google.com"} + tags := map[string]string{"url": "localhost"} fields := map[string]interface{}{ "packets_transmitted": 5, "packets_received": 5, "percent_packet_loss": 0.0, + "ttl": 63, "minimum_response_ms": 35.225, "average_response_ms": 43.628, "maximum_response_ms": 51.806, @@ -189,7 +201,7 @@ func TestPingGather(t *testing.T) { } acc.AssertContainsTaggedFields(t, "ping", fields, tags) - tags = map[string]string{"url": "www.reddit.com"} + tags = map[string]string{"url": "influxdata.com"} acc.AssertContainsTaggedFields(t, "ping", fields, tags) } @@ -204,7 +216,7 @@ PING www.google.com (216.58.218.164) 56(84) bytes of data. rtt min/avg/max/mdev = 35.225/44.033/51.806/5.325 ms ` -func mockLossyHostPinger(timeout float64, args ...string) (string, error) { +func mockLossyHostPinger(binary string, timeout float64, args ...string) (string, error) { return lossyPingOutput, nil } @@ -222,6 +234,7 @@ func TestLossyPingGather(t *testing.T) { "packets_transmitted": 5, "packets_received": 3, "percent_packet_loss": 40.0, + "ttl": 63, "minimum_response_ms": 35.225, "average_response_ms": 44.033, "maximum_response_ms": 51.806, @@ -239,7 +252,7 @@ Request timeout for icmp_seq 0 2 packets transmitted, 0 packets received, 100.0% packet loss ` -func mockErrorHostPinger(timeout float64, args ...string) (string, error) { +func mockErrorHostPinger(binary string, timeout float64, args ...string) (string, error) { // This error will not trigger correct error paths return errorPingOutput, nil } @@ -264,7 +277,7 @@ func TestBadPingGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "ping", fields, tags) } -func mockFatalHostPinger(timeout float64, args ...string) (string, error) { +func mockFatalHostPinger(binary string, timeout float64, args ...string) (string, error) { return fatalPingOutput, errors.New("So very bad") } @@ -283,6 +296,8 @@ func TestFatalPingGather(t *testing.T) { "Fatal ping should not have packet measurements") assert.False(t, acc.HasMeasurement("percent_packet_loss"), "Fatal ping should not have packet measurements") + assert.False(t, acc.HasMeasurement("ttl"), + "Fatal ping should not have packet measurements") assert.False(t, acc.HasMeasurement("minimum_response_ms"), "Fatal ping should not have packet measurements") assert.False(t, acc.HasMeasurement("average_response_ms"), @@ -304,7 +319,7 @@ func TestErrorWithHostNamePingGather(t *testing.T) { var acc testutil.Accumulator p := Ping{ Urls: []string{"www.amazon.com"}, - pingHost: func(timeout float64, args ...string) (string, error) { + pingHost: func(binary string, timeout float64, args ...string) (string, error) { return param.out, errors.New("So very bad") }, } @@ -313,3 +328,63 @@ func TestErrorWithHostNamePingGather(t *testing.T) { assert.Contains(t, acc.Errors, param.error) } } + +func TestPingBinary(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.google.com"}, + Binary: "ping6", + pingHost: func(binary string, timeout float64, args ...string) (string, error) { + assert.True(t, binary == "ping6") + return "", nil + }, + } + acc.GatherError(p.Gather) +} + +func mockHostResolver(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { + ipaddr := net.IPAddr{} + ipaddr.IP = net.IPv4(127, 0, 0, 1) + return &ipaddr, nil +} + +// Test that Gather function works using native ping +func TestPingGatherNative(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test due to permission requirements.") + } + + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"localhost", "127.0.0.2"}, + Method: "native", + Count: 5, + resolveHost: mockHostResolver, + } + + assert.NoError(t, acc.GatherError(p.Gather)) + assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_transmitted", 5)) + assert.True(t, acc.HasPoint("ping", map[string]string{"url": "localhost"}, "packets_received", 5)) +} + +func mockHostResolverError(ctx context.Context, ipv6 bool, host string) (*net.IPAddr, error) { + return nil, errors.New("myMock error") +} + +// Test failed DNS resolutions +func TestDNSLookupError(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test due to permission requirements.") + } + + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"localhost"}, + Method: "native", + IPv6: false, + resolveHost: mockHostResolverError, + } + + acc.GatherError(p.Gather) + assert.True(t, len(acc.Errors) > 0) +} diff --git a/plugins/inputs/ping/ping_windows.go b/plugins/inputs/ping/ping_windows.go index 06a7f590e..adfd60480 100644 --- a/plugins/inputs/ping/ping_windows.go +++ b/plugins/inputs/ping/ping_windows.go @@ -4,66 +4,86 @@ package ping import ( "errors" - "net" - "os/exec" + "fmt" "regexp" "strconv" "strings" - "sync" - "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/plugins/inputs" ) -// HostPinger is a function that runs the "ping" function using a list of -// passed arguments. This can be easily switched with a mocked ping function -// for unit test purposes (see ping_test.go) -type HostPinger func(timeout float64, args ...string) (string, error) - -type Ping struct { - // Number of pings to send (ping -c ) - Count int - - // Ping timeout, in seconds. 0 means no timeout (ping -W ) - Timeout float64 - - // URLs to ping - Urls []string - - // host ping function - pingHost HostPinger -} - -func (s *Ping) Description() string { - return "Ping given url(s) and return statistics" -} - -const sampleConfig = ` - ## List of urls to ping - urls = ["www.google.com"] - - ## number of pings to send per collection (ping -n ) - # count = 1 - - ## Ping timeout, in seconds. 0.0 means default timeout (ping -w ) - # timeout = 0.0 -` - -func (s *Ping) SampleConfig() string { - return sampleConfig -} - -func hostPinger(timeout float64, args ...string) (string, error) { - bin, err := exec.LookPath("ping") - if err != nil { - return "", err +func (p *Ping) pingToURL(u string, acc telegraf.Accumulator) { + if p.Count < 1 { + p.Count = 1 } - c := exec.Command(bin, args...) - out, err := internal.CombinedOutputTimeout(c, - time.Second*time.Duration(timeout+1)) - return string(out), err + + tags := map[string]string{"url": u} + fields := map[string]interface{}{"result_code": 0} + + args := p.args(u) + totalTimeout := 60.0 + if len(p.Arguments) == 0 { + totalTimeout = p.timeout() * float64(p.Count) + } + + out, err := p.pingHost(p.Binary, totalTimeout, args...) + // ping host return exitcode != 0 also when there was no response from host + // but command was execute successfully + var pendingError error + if err != nil { + // Combine go err + stderr output + pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error()) + } + trans, recReply, receivePacket, avg, min, max, err := processPingOutput(out) + if err != nil { + // fatal error + if pendingError != nil { + acc.AddError(fmt.Errorf("%s: %s", pendingError, u)) + } else { + acc.AddError(fmt.Errorf("%s: %s", err, u)) + } + + fields["result_code"] = 2 + fields["errors"] = 100.0 + acc.AddFields("ping", fields, tags) + return + } + // Calculate packet loss percentage + lossReply := float64(trans-recReply) / float64(trans) * 100.0 + lossPackets := float64(trans-receivePacket) / float64(trans) * 100.0 + + fields["packets_transmitted"] = trans + fields["reply_received"] = recReply + fields["packets_received"] = receivePacket + fields["percent_packet_loss"] = lossPackets + fields["percent_reply_loss"] = lossReply + if avg >= 0 { + fields["average_response_ms"] = float64(avg) + } + if min >= 0 { + fields["minimum_response_ms"] = float64(min) + } + if max >= 0 { + fields["maximum_response_ms"] = float64(max) + } + acc.AddFields("ping", fields, tags) +} + +// args returns the arguments for the 'ping' executable +func (p *Ping) args(url string) []string { + if len(p.Arguments) > 0 { + return p.Arguments + } + + args := []string{"-n", strconv.Itoa(p.Count)} + + if p.Timeout > 0 { + args = append(args, "-w", strconv.FormatFloat(p.Timeout*1000, 'f', 0, 64)) + } + + args = append(args, url) + + return args } // processPingOutput takes in a string output from the ping command @@ -133,107 +153,3 @@ func (p *Ping) timeout() float64 { } return 4 + 1 } - -// args returns the arguments for the 'ping' executable -func (p *Ping) args(url string) []string { - args := []string{"-n", strconv.Itoa(p.Count)} - - if p.Timeout > 0 { - args = append(args, "-w", strconv.FormatFloat(p.Timeout*1000, 'f', 0, 64)) - } - - args = append(args, url) - - return args -} - -func (p *Ping) Gather(acc telegraf.Accumulator) error { - if p.Count < 1 { - p.Count = 1 - } - var wg sync.WaitGroup - errorChannel := make(chan error, len(p.Urls)*2) - var pendingError error = nil - // Spin off a go routine for each url to ping - for _, url := range p.Urls { - wg.Add(1) - go func(u string) { - defer wg.Done() - - tags := map[string]string{"url": u} - fields := map[string]interface{}{"result_code": 0} - - _, err := net.LookupHost(u) - if err != nil { - errorChannel <- err - fields["result_code"] = 1 - acc.AddFields("ping", fields, tags) - return - } - - args := p.args(u) - totalTimeout := p.timeout() * float64(p.Count) - out, err := p.pingHost(totalTimeout, args...) - // ping host return exitcode != 0 also when there was no response from host - // but command was execute successfully - if err != nil { - // Combine go err + stderr output - pendingError = errors.New(strings.TrimSpace(out) + ", " + err.Error()) - } - trans, recReply, receivePacket, avg, min, max, err := processPingOutput(out) - if err != nil { - // fatal error - if pendingError != nil { - errorChannel <- pendingError - } - errorChannel <- err - - fields["errors"] = 100.0 - acc.AddFields("ping", fields, tags) - return - } - // Calculate packet loss percentage - lossReply := float64(trans-recReply) / float64(trans) * 100.0 - lossPackets := float64(trans-receivePacket) / float64(trans) * 100.0 - - fields["packets_transmitted"] = trans - fields["reply_received"] = recReply - fields["packets_received"] = receivePacket - fields["percent_packet_loss"] = lossPackets - fields["percent_reply_loss"] = lossReply - if avg >= 0 { - fields["average_response_ms"] = float64(avg) - } - if min >= 0 { - fields["minimum_response_ms"] = float64(min) - } - if max >= 0 { - fields["maximum_response_ms"] = float64(max) - } - acc.AddFields("ping", fields, tags) - }(url) - } - - wg.Wait() - close(errorChannel) - - // Get all errors and return them as one giant error - errorStrings := []string{} - for err := range errorChannel { - errorStrings = append(errorStrings, err.Error()) - } - - if len(errorStrings) == 0 { - return nil - } - return errors.New(strings.Join(errorStrings, "\n")) -} - -func init() { - inputs.Add("ping", func() telegraf.Input { - return &Ping{ - pingHost: hostPinger, - Count: 1, - } - }) -} diff --git a/plugins/inputs/ping/ping_windows_test.go b/plugins/inputs/ping/ping_windows_test.go index 178e42fcb..4618ec4db 100644 --- a/plugins/inputs/ping/ping_windows_test.go +++ b/plugins/inputs/ping/ping_windows_test.go @@ -4,10 +4,12 @@ package ping import ( "errors" + "reflect" "testing" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Windows ping format ( should support multilanguage ?) @@ -59,7 +61,7 @@ func TestHost(t *testing.T) { assert.Equal(t, 52, max, "Max 52") } -func mockHostPinger(timeout float64, args ...string) (string, error) { +func mockHostPinger(binary string, timeout float64, args ...string) (string, error) { return winENPingOutput, nil } @@ -102,7 +104,7 @@ Statystyka badania ping dla 195.187.242.157: (100% straty), ` -func mockErrorHostPinger(timeout float64, args ...string) (string, error) { +func mockErrorHostPinger(binary string, timeout float64, args ...string) (string, error) { return errorPingOutput, errors.New("No packets received") } @@ -128,6 +130,18 @@ func TestBadPingGather(t *testing.T) { acc.AssertContainsTaggedFields(t, "ping", fields, tags) } +func TestArguments(t *testing.T) { + arguments := []string{"-c", "3"} + p := Ping{ + Count: 2, + Timeout: 12.0, + Arguments: arguments, + } + + actual := p.args("www.google.com") + require.True(t, reflect.DeepEqual(actual, arguments), "Expected : %s Actual: %s", arguments, actual) +} + var lossyPingOutput = ` Badanie thecodinglove.com [66.6.44.4] z 9800 bajtami danych: Upłynął limit czasu żądania. @@ -147,7 +161,7 @@ Szacunkowy czas błądzenia pakietów w millisekundach: Minimum = 114 ms, Maksimum = 119 ms, Czas średni = 115 ms ` -func mockLossyHostPinger(timeout float64, args ...string) (string, error) { +func mockLossyHostPinger(binary string, timeout float64, args ...string) (string, error) { return lossyPingOutput, nil } @@ -207,7 +221,7 @@ Options: ` -func mockFatalHostPinger(timeout float64, args ...string) (string, error) { +func mockFatalHostPinger(binary string, timeout float64, args ...string) (string, error) { return fatalPingOutput, errors.New("So very bad") } @@ -249,7 +263,7 @@ Ping statistics for 8.8.8.8: Packets: Sent = 4, Received = 1, Lost = 3 (75% loss), ` -func mockUnreachableHostPinger(timeout float64, args ...string) (string, error) { +func mockUnreachableHostPinger(binary string, timeout float64, args ...string) (string, error) { return UnreachablePingOutput, errors.New("So very bad") } @@ -298,7 +312,7 @@ Ping statistics for 8.8.8.8: Packets: Sent = 4, Received = 1, Lost = 3 (75% loss), ` -func mockTTLExpiredPinger(timeout float64, args ...string) (string, error) { +func mockTTLExpiredPinger(binary string, timeout float64, args ...string) (string, error) { return TTLExpiredPingOutput, errors.New("So very bad") } @@ -333,3 +347,16 @@ func TestTTLExpiredPingGather(t *testing.T) { assert.False(t, acc.HasInt64Field("ping", "minimum_response_ms"), "Fatal ping should not have packet measurements") } + +func TestPingBinary(t *testing.T) { + var acc testutil.Accumulator + p := Ping{ + Urls: []string{"www.google.com"}, + Binary: "ping6", + pingHost: func(binary string, timeout float64, args ...string) (string, error) { + assert.True(t, binary == "ping6") + return "", nil + }, + } + acc.GatherError(p.Gather) +} diff --git a/plugins/inputs/postfix/README.md b/plugins/inputs/postfix/README.md index 3dab2b39d..2fdfacd9d 100644 --- a/plugins/inputs/postfix/README.md +++ b/plugins/inputs/postfix/README.md @@ -2,7 +2,10 @@ The postfix plugin reports metrics on the postfix queues. -For each of the active, hold, incoming, maildrop, and deferred queues (http://www.postfix.org/QSHAPE_README.html#queues), it will report the queue length (number of items), size (bytes used by items), and age (age of oldest item in seconds). +For each of the active, hold, incoming, maildrop, and deferred queues +(http://www.postfix.org/QSHAPE_README.html#queues), it will report the queue +length (number of items), size (bytes used by items), and age (age of oldest +item in seconds). ### Configuration @@ -13,12 +16,15 @@ For each of the active, hold, incoming, maildrop, and deferred queues (http://ww # queue_directory = "/var/spool/postfix" ``` -#### Permissions: +#### Permissions Telegraf will need read access to the files in the queue directory. You may need to alter the permissions of these directories to provide access to the telegraf user. +This can be setup either using standard unix permissions or with Posix ACLs, +you will only need to use one method: + Unix permissions: ```sh $ sudo chgrp -R telegraf /var/spool/postfix/{active,hold,incoming,deferred} @@ -29,20 +35,20 @@ $ sudo chmod g+r /var/spool/postfix/maildrop Posix ACL: ```sh -$ sudo setfacl -Rdm u:telegraf:rX /var/spool/postfix/{active,hold,incoming,deferred,maildrop} +$ sudo setfacl -Rm g:telegraf:rX /var/spool/postfix/ +$ sudo setfacl -dm g:telegraf:rX /var/spool/postfix/ ``` -### Measurements & Fields: +### Metrics - postfix_queue + - tags: + - queue + - fields: - length (integer) - size (integer, bytes) - age (integer, seconds) -### Tags: - -- postfix_queue - - queue ### Example Output diff --git a/plugins/inputs/postgresql/README.md b/plugins/inputs/postgresql/README.md index e31fcff69..2ebc33ad6 100644 --- a/plugins/inputs/postgresql/README.md +++ b/plugins/inputs/postgresql/README.md @@ -31,9 +31,13 @@ _* value ignored and therefore not recorded._ More information about the meaning of these metrics can be found in the [PostgreSQL Documentation](http://www.postgresql.org/docs/9.2/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW) ## Configuration -Specify address via a url matching: +Specify address via a postgresql connection string: - `postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=[disable|verify-ca|verify-full]` + `host=localhost port=5432 user=telegraf database=telegraf` + +Or via an url matching: + + `postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=[disable|verify-ca|verify-full]` All connection parameters are optional. Without the dbname parameter, the driver will default to a database with the same name as the user. This dbname is just for instantiating a connection with the server and doesn't restrict the databases we are trying to grab metrics for. @@ -45,6 +49,13 @@ A list of databases to pull metrics about. If not specified, metrics for all dat `databases = ["app_production", "testing"]` +### TLS Configuration + +Add the `sslkey`, `sslcert` and `sslrootcert` options to your DSN: +``` +host=localhost user=pgotest dbname=app_production sslmode=require sslkey=/etc/telegraf/key.pem sslcert=/etc/telegraf/cert.pem sslrootcert=/etc/telegraf/ca.pem +``` + ### Configuration example ``` [[inputs.postgresql]] diff --git a/plugins/inputs/postgresql/postgresql.go b/plugins/inputs/postgresql/postgresql.go index 19c9db9ce..0911b20ce 100644 --- a/plugins/inputs/postgresql/postgresql.go +++ b/plugins/inputs/postgresql/postgresql.go @@ -26,7 +26,7 @@ var sampleConfig = ` ## postgres://[pqgotest[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production ## ## All connection parameters are optional. ## @@ -155,7 +155,12 @@ func (p *Postgresql) accRow(row scanner, acc telegraf.Accumulator, columns []str } if columnMap["datname"] != nil { // extract the database name from the column map - dbname.WriteString((*columnMap["datname"]).(string)) + if dbNameStr, ok := (*columnMap["datname"]).(string); ok { + dbname.WriteString(dbNameStr) + } else { + // PG 12 adds tracking of global objects to pg_stat_database + dbname.WriteString("postgres_global") + } } else { dbname.WriteString("postgres") } @@ -189,6 +194,7 @@ func init() { MaxLifetime: internal.Duration{ Duration: 0, }, + IsPgBouncer: false, }, } }) diff --git a/plugins/inputs/postgresql/postgresql_test.go b/plugins/inputs/postgresql/postgresql_test.go index 306dca3b6..b23321019 100644 --- a/plugins/inputs/postgresql/postgresql_test.go +++ b/plugins/inputs/postgresql/postgresql_test.go @@ -20,6 +20,7 @@ func TestPostgresqlGeneratesMetrics(t *testing.T) { "host=%s user=postgres sslmode=disable", testutil.GetLocalHost(), ), + IsPgBouncer: false, }, Databases: []string{"postgres"}, } diff --git a/plugins/inputs/postgresql/service.go b/plugins/inputs/postgresql/service.go index 4f7b21e54..96a9a6317 100644 --- a/plugins/inputs/postgresql/service.go +++ b/plugins/inputs/postgresql/service.go @@ -3,6 +3,9 @@ package postgresql import ( "database/sql" "fmt" + "github.com/jackc/pgx" + "github.com/jackc/pgx/pgtype" + "github.com/jackc/pgx/stdlib" "net" "net/url" "regexp" @@ -90,6 +93,7 @@ type Service struct { MaxOpen int MaxLifetime internal.Duration DB *sql.DB + IsPgBouncer bool } // Start starts the ServiceInput's service, whatever that may be @@ -100,7 +104,41 @@ func (p *Service) Start(telegraf.Accumulator) (err error) { p.Address = localhost } - if p.DB, err = sql.Open("pgx", p.Address); err != nil { + connectionString := p.Address + + // Specific support to make it work with PgBouncer too + // See https://github.com/influxdata/telegraf/issues/3253#issuecomment-357505343 + if p.IsPgBouncer { + d := &stdlib.DriverConfig{ + ConnConfig: pgx.ConnConfig{ + PreferSimpleProtocol: true, + RuntimeParams: map[string]string{ + "client_encoding": "UTF8", + }, + CustomConnInfo: func(c *pgx.Conn) (*pgtype.ConnInfo, error) { + info := c.ConnInfo.DeepCopy() + info.RegisterDataType(pgtype.DataType{ + Value: &pgtype.OIDValue{}, + Name: "int8OID", + OID: pgtype.Int8OID, + }) + // Newer versions of pgbouncer need this defined. See the discussion here: + // https://github.com/jackc/pgx/issues/649 + info.RegisterDataType(pgtype.DataType{ + Value: &pgtype.OIDValue{}, + Name: "numericOID", + OID: pgtype.NumericOID, + }) + + return info, nil + }, + }, + } + stdlib.RegisterDriverConfig(d) + connectionString = d.ConnectionString(p.Address) + } + + if p.DB, err = sql.Open("pgx", connectionString); err != nil { return err } diff --git a/plugins/inputs/postgresql_extensible/README.md b/plugins/inputs/postgresql_extensible/README.md index 29c5e36d8..5b121b66b 100644 --- a/plugins/inputs/postgresql_extensible/README.md +++ b/plugins/inputs/postgresql_extensible/README.md @@ -14,11 +14,11 @@ The example below has two queries are specified, with the following parameters: ``` [[inputs.postgresql_extensible]] # specify address via a url matching: - # postgres://[pqgotest[:password]]@localhost[/dbname]?sslmode=... + # postgres://[pqgotest[:password]]@host:port[/dbname]?sslmode=... # or a simple string: - # host=localhost user=pqotest password=... sslmode=... dbname=app_production + # host=localhost port=5432 user=pqgotest password=... sslmode=... dbname=app_production # - # All connection parameters are optional. + # All connection parameters are optional. # Without the dbname parameter, the driver will default to a database # with the same name as the user. This dbname is just for instantiating a # connection with the server and doesn't restrict the databases we are trying @@ -44,6 +44,9 @@ The example below has two queries are specified, with the following parameters: # Be careful that if the withdbname is set to false you don't have to define # the where clause (aka with the dbname) # + # The script option can be used to specify the .sql file path. + # If script and sqlquery options specified at same time, sqlquery will be used + # # the tagvalue field is used to define custom tags (separated by comas). # the query is expected to return columns which match the names of the # defined tags. The values in these columns must be of a string-type, @@ -61,14 +64,14 @@ The example below has two queries are specified, with the following parameters: withdbname=false tagvalue="" [[inputs.postgresql_extensible.query]] - sqlquery="SELECT * FROM pg_stat_bgwriter" + script="your_sql-filepath.sql" version=901 withdbname=false tagvalue="" ``` The system can be easily extended using homemade metrics collection tools or -using postgreql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/)) +using postgresql extensions ([pg_stat_statements](http://www.postgresql.org/docs/current/static/pgstatstatements.html), [pg_proctab](https://github.com/markwkm/pg_proctab) or [powa](http://dalibo.github.io/powa/)) # Sample Queries : - telegraf.conf postgresql_extensible queries (assuming that you have configured diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible.go b/plugins/inputs/postgresql_extensible/postgresql_extensible.go index 056f4afc8..f91feaf40 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible.go @@ -3,10 +3,10 @@ package postgresql_extensible import ( "bytes" "fmt" - "log" + "io/ioutil" + "os" "strings" - // register in driver. _ "github.com/jackc/pgx/stdlib" "github.com/influxdata/telegraf" @@ -19,18 +19,15 @@ type Postgresql struct { postgresql.Service Databases []string AdditionalTags []string - Query []struct { - Sqlquery string - Version int - Withdbname bool - Tagvalue string - Measurement string - } - Debug bool + Query query + Debug bool + + Log telegraf.Logger } type query []struct { Sqlquery string + Script string Version int Withdbname bool Tagvalue string @@ -44,7 +41,7 @@ var sampleConfig = ` ## postgres://[pqgotest[:password]]@localhost[/dbname]\ ## ?sslmode=[disable|verify-ca|verify-full] ## or a simple string: - ## host=localhost user=pqotest password=... sslmode=... dbname=app_production + ## host=localhost user=pqgotest password=... sslmode=... dbname=app_production # ## All connection parameters are optional. # ## Without the dbname parameter, the driver will default to a database @@ -81,7 +78,10 @@ var sampleConfig = ` ## field is used to define custom tags (separated by commas) ## The optional "measurement" value can be used to override the default ## output measurement name ("postgresql"). - # + ## + ## The script option can be used to specify the .sql file path. + ## If script and sqlquery options specified at same time, sqlquery will be used + ## ## Structure : ## [[inputs.postgresql_extensible.query]] ## sqlquery string @@ -102,6 +102,19 @@ var sampleConfig = ` tagvalue="postgresql.stats" ` +func (p *Postgresql) Init() error { + var err error + for i := range p.Query { + if p.Query[i].Sqlquery == "" { + p.Query[i].Sqlquery, err = ReadQueryFromFile(p.Query[i].Script) + if err != nil { + return err + } + } + } + return nil +} + func (p *Postgresql) SampleConfig() string { return sampleConfig } @@ -114,6 +127,20 @@ func (p *Postgresql) IgnoredColumns() map[string]bool { return ignoredColumns } +func ReadQueryFromFile(filePath string) (string, error) { + file, err := os.Open(filePath) + if err != nil { + return "", err + } + defer file.Close() + + query, err := ioutil.ReadAll(file) + if err != nil { + return "", err + } + return string(query), err +} + func (p *Postgresql) Gather(acc telegraf.Accumulator) error { var ( err error @@ -126,19 +153,18 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { columns []string ) - // Retreiving the database version - - query = `select substring(setting from 1 for 3) as version from pg_settings where name='server_version_num'` + // Retrieving the database version + query = `SELECT setting::integer / 100 AS version FROM pg_settings WHERE name = 'server_version_num'` if err = p.DB.QueryRow(query).Scan(&db_version); err != nil { db_version = 0 } // We loop in order to process each query // Query is not run if Database version does not match the query version. - for i := range p.Query { sql_query = p.Query[i].Sqlquery tag_value = p.Query[i].Tagvalue + if p.Query[i].Measurement != "" { meas_name = p.Query[i].Measurement } else { @@ -160,7 +186,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { if p.Query[i].Version <= db_version { rows, err := p.DB.Query(sql_query) if err != nil { - acc.AddError(err) + p.Log.Error(err.Error()) continue } @@ -168,7 +194,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { // grab the column information from the result if columns, err = rows.Columns(); err != nil { - acc.AddError(err) + p.Log.Error(err.Error()) continue } @@ -183,7 +209,7 @@ func (p *Postgresql) Gather(acc telegraf.Accumulator) error { for rows.Next() { err = p.accRow(meas_name, rows, acc, columns) if err != nil { - acc.AddError(err) + p.Log.Error(err.Error()) break } } @@ -221,9 +247,14 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula return err } - if columnMap["datname"] != nil { + if c, ok := columnMap["datname"]; ok && *c != nil { // extract the database name from the column map - dbname.WriteString((*columnMap["datname"]).(string)) + switch datname := (*c).(type) { + case string: + dbname.WriteString(datname) + default: + dbname.WriteString("postgres") + } } else { dbname.WriteString("postgres") } @@ -241,7 +272,7 @@ func (p *Postgresql) accRow(meas_name string, row scanner, acc telegraf.Accumula fields := make(map[string]interface{}) COLUMN: for col, val := range columnMap { - log.Printf("D! postgresql_extensible: column: %s = %T: %v\n", col, *val, *val) + p.Log.Debugf("Column: %s = %T: %v\n", col, *val, *val) _, ignore := ignoredColumns[col] if ignore || *val == nil { continue @@ -259,7 +290,7 @@ COLUMN: case int64, int32, int: tags[col] = fmt.Sprintf("%d", v) default: - log.Println("failed to add additional tag", col) + p.Log.Debugf("Failed to add %q as additional tag", col) } continue COLUMN } @@ -283,6 +314,7 @@ func init() { MaxLifetime: internal.Duration{ Duration: 0, }, + IsPgBouncer: false, }, } }) diff --git a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go index 77db5feb5..bca009f16 100644 --- a/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go +++ b/plugins/inputs/postgresql_extensible/postgresql_extensible_test.go @@ -1,6 +1,7 @@ package postgresql_extensible import ( + "errors" "fmt" "testing" @@ -12,18 +13,20 @@ import ( func queryRunner(t *testing.T, q query) *testutil.Accumulator { p := &Postgresql{ + Log: testutil.Logger{}, Service: postgresql.Service{ Address: fmt.Sprintf( "host=%s user=postgres sslmode=disable", testutil.GetLocalHost(), ), + IsPgBouncer: false, }, Databases: []string{"postgres"}, Query: q, } var acc testutil.Accumulator p.Start(&acc) - + p.Init() require.NoError(t, acc.GatherError(p.Gather)) return &acc } @@ -199,12 +202,39 @@ func TestPostgresqlFieldOutput(t *testing.T) { } } +func TestPostgresqlSqlScript(t *testing.T) { + q := query{{ + Script: "testdata/test.sql", + Version: 901, + Withdbname: false, + Tagvalue: "", + }} + p := &Postgresql{ + Log: testutil.Logger{}, + Service: postgresql.Service{ + Address: fmt.Sprintf( + "host=%s user=postgres sslmode=disable", + testutil.GetLocalHost(), + ), + IsPgBouncer: false, + }, + Databases: []string{"postgres"}, + Query: q, + } + var acc testutil.Accumulator + p.Start(&acc) + p.Init() + + require.NoError(t, acc.GatherError(p.Gather)) +} + func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } p := &Postgresql{ + Log: testutil.Logger{}, Service: postgresql.Service{ Address: fmt.Sprintf( "host=%s user=postgres sslmode=disable", @@ -222,3 +252,44 @@ func TestPostgresqlIgnoresUnwantedColumns(t *testing.T) { assert.False(t, acc.HasMeasurement(col)) } } + +func TestAccRow(t *testing.T) { + p := Postgresql{ + Log: testutil.Logger{}, + } + + var acc testutil.Accumulator + columns := []string{"datname", "cat"} + + testRows := []fakeRow{ + {fields: []interface{}{1, "gato"}}, + {fields: []interface{}{nil, "gato"}}, + {fields: []interface{}{"name", "gato"}}, + } + for i := range testRows { + err := p.accRow("pgTEST", testRows[i], &acc, columns) + if err != nil { + t.Fatalf("Scan failed: %s", err) + } + } +} + +type fakeRow struct { + fields []interface{} +} + +func (f fakeRow) Scan(dest ...interface{}) error { + if len(f.fields) != len(dest) { + return errors.New("Nada matchy buddy") + } + + for i, d := range dest { + switch d.(type) { + case (*interface{}): + *d.(*interface{}) = f.fields[i] + default: + return fmt.Errorf("Bad type %T", d) + } + } + return nil +} diff --git a/plugins/inputs/postgresql_extensible/testdata/test.sql b/plugins/inputs/postgresql_extensible/testdata/test.sql new file mode 100644 index 000000000..49ec02b25 --- /dev/null +++ b/plugins/inputs/postgresql_extensible/testdata/test.sql @@ -0,0 +1 @@ +select * from pg_stat_database \ No newline at end of file diff --git a/plugins/inputs/powerdns/README.md b/plugins/inputs/powerdns/README.md index 4b1732782..2e245eeff 100644 --- a/plugins/inputs/powerdns/README.md +++ b/plugins/inputs/powerdns/README.md @@ -14,6 +14,16 @@ The powerdns plugin gathers metrics about PowerDNS using unix socket. unix_sockets = ["/var/run/pdns.controlsocket"] ``` +#### Permissions + +Telegraf will need read access to the powerdns control socket. + +On many systems this can be accomplished by adding the `telegraf` user to the +`pdns` group: +``` +usermod telegraf -a -G pdns +``` + ### Measurements & Fields: - powerdns diff --git a/plugins/inputs/powerdns/powerdns.go b/plugins/inputs/powerdns/powerdns.go index e53373baf..3c661990c 100644 --- a/plugins/inputs/powerdns/powerdns.go +++ b/plugins/inputs/powerdns/powerdns.go @@ -110,8 +110,8 @@ func parseResponse(metrics string) map[string]interface{} { i, err := strconv.ParseInt(m[1], 10, 64) if err != nil { - log.Printf("E! powerdns: Error parsing integer for metric [%s]: %s", - metric, err) + log.Printf("E! [inputs.powerdns] error parsing integer for metric %q: %s", + metric, err.Error()) continue } values[m[0]] = i diff --git a/plugins/inputs/powerdns/powerdns_test.go b/plugins/inputs/powerdns/powerdns_test.go index 56666a886..fe64be5db 100644 --- a/plugins/inputs/powerdns/powerdns_test.go +++ b/plugins/inputs/powerdns/powerdns_test.go @@ -1,8 +1,6 @@ package powerdns import ( - "crypto/rand" - "encoding/binary" "fmt" "net" "testing" @@ -70,10 +68,9 @@ func (s statServer) serverSocket(l net.Listener) { } } -func TestMemcachedGeneratesMetrics(t *testing.T) { +func TestPowerdnsGeneratesMetrics(t *testing.T) { // We create a fake server to return test data - var randomNumber int64 - binary.Read(rand.Reader, binary.LittleEndian, &randomNumber) + randomNumber := int64(5239846799706671610) socket, err := net.Listen("unix", fmt.Sprintf("/tmp/pdns%d.controlsocket", randomNumber)) if err != nil { t.Fatal("Cannot initialize server on port ") diff --git a/plugins/inputs/powerdns_recursor/README.md b/plugins/inputs/powerdns_recursor/README.md new file mode 100644 index 000000000..09192db35 --- /dev/null +++ b/plugins/inputs/powerdns_recursor/README.md @@ -0,0 +1,163 @@ +# PowerDNS Recursor Input Plugin + +The `powerdns_recursor` plugin gathers metrics about PowerDNS Recursor using +the unix controlsocket. + +### Configuration + +```toml +[[inputs.powerdns_recursor]] + ## Path to the Recursor control socket. + unix_sockets = ["/var/run/pdns_recursor.controlsocket"] + + ## Directory to create receive socket. This default is likely not writable, + ## please reference the full plugin documentation for a recommended setup. + # socket_dir = "/var/run/" + ## Socket permissions for the receive socket. + # socket_mode = "0666" +``` + +#### Permissions + +Telegraf will need read/write access to the control socket and to the +`socket_dir`. PowerDNS will need to be able to write to the `socket_dir`. + +The setup described below was tested on a Debian Stretch system and may need +adapted for other systems. + +First change permissions on the controlsocket in the PowerDNS recursor +configuration, usually in `/etc/powerdns/recursor.conf`: +``` +socket-mode = 660 +``` + +Then place the `telegraf` user into the `pdns` group: +``` +usermod telegraf -a -G pdns +``` + +Since `telegraf` cannot write to to the default `/var/run` socket directory, +create a subdirectory and adjust permissions for this directory so that both +users can access it. +```sh +$ mkdir /var/run/pdns +$ chown root:pdns /var/run/pdns +$ chmod 770 /var/run/pdns +``` + +### Metrics + +- powerdns_recursor + - tags: + - server + - fields: + - all-outqueries + - answers-slow + - answers0-1 + - answers1-10 + - answers10-100 + - answers100-1000 + - auth-zone-queries + - auth4-answers-slow + - auth4-answers0-1 + - auth4-answers1-10 + - auth4-answers10-100 + - auth4-answers100-1000 + - auth6-answers-slow + - auth6-answers0-1 + - auth6-answers1-10 + - auth6-answers10-100 + - auth6-answers100-1000 + - cache-entries + - cache-hits + - cache-misses + - case-mismatches + - chain-resends + - client-parse-errors + - concurrent-queries + - dlg-only-drops + - dnssec-queries + - dnssec-result-bogus + - dnssec-result-indeterminate + - dnssec-result-insecure + - dnssec-result-nta + - dnssec-result-secure + - dnssec-validations + - dont-outqueries + - ecs-queries + - ecs-responses + - edns-ping-matches + - edns-ping-mismatches + - failed-host-entries + - fd-usage + - ignored-packets + - ipv6-outqueries + - ipv6-questions + - malloc-bytes + - max-cache-entries + - max-mthread-stack + - max-packetcache-entries + - negcache-entries + - no-packet-error + - noedns-outqueries + - noerror-answers + - noping-outqueries + - nsset-invalidations + - nsspeeds-entries + - nxdomain-answers + - outgoing-timeouts + - outgoing4-timeouts + - outgoing6-timeouts + - over-capacity-drops + - packetcache-entries + - packetcache-hits + - packetcache-misses + - policy-drops + - policy-result-custom + - policy-result-drop + - policy-result-noaction + - policy-result-nodata + - policy-result-nxdomain + - policy-result-truncate + - qa-latency + - query-pipe-full-drops + - questions + - real-memory-usage + - resource-limits + - security-status + - server-parse-errors + - servfail-answers + - spoof-prevents + - sys-msec + - tcp-client-overflow + - tcp-clients + - tcp-outqueries + - tcp-questions + - throttle-entries + - throttled-out + - throttled-outqueries + - too-old-drops + - udp-in-errors + - udp-noport-errors + - udp-recvbuf-errors + - udp-sndbuf-errors + - unauthorized-tcp + - unauthorized-udp + - unexpected-packets + - unreachables + - uptime + - user-msec + - x-our-latency + - x-ourtime-slow + - x-ourtime0-1 + - x-ourtime1-2 + - x-ourtime16-32 + - x-ourtime2-4 + - x-ourtime4-8 + - x-ourtime8-16 + +### Example Output + +``` +powerdns_recursor,server=/var/run/pdns_recursor.controlsocket all-outqueries=3631810i,answers-slow=36863i,answers0-1=179612i,answers1-10=1223305i,answers10-100=1252199i,answers100-1000=408357i,auth-zone-queries=4i,auth4-answers-slow=44758i,auth4-answers0-1=59721i,auth4-answers1-10=1766787i,auth4-answers10-100=1329638i,auth4-answers100-1000=430372i,auth6-answers-slow=0i,auth6-answers0-1=0i,auth6-answers1-10=0i,auth6-answers10-100=0i,auth6-answers100-1000=0i,cache-entries=296689i,cache-hits=150654i,cache-misses=2949682i,case-mismatches=0i,chain-resends=420004i,client-parse-errors=0i,concurrent-queries=0i,dlg-only-drops=0i,dnssec-queries=152970i,dnssec-result-bogus=0i,dnssec-result-indeterminate=0i,dnssec-result-insecure=0i,dnssec-result-nta=0i,dnssec-result-secure=47i,dnssec-validations=47i,dont-outqueries=62i,ecs-queries=0i,ecs-responses=0i,edns-ping-matches=0i,edns-ping-mismatches=0i,failed-host-entries=21i,fd-usage=32i,ignored-packets=0i,ipv6-outqueries=0i,ipv6-questions=0i,malloc-bytes=0i,max-cache-entries=1000000i,max-mthread-stack=33747i,max-packetcache-entries=500000i,negcache-entries=100019i,no-packet-error=0i,noedns-outqueries=73341i,noerror-answers=25453808i,noping-outqueries=0i,nsset-invalidations=2398i,nsspeeds-entries=3966i,nxdomain-answers=3341302i,outgoing-timeouts=44384i,outgoing4-timeouts=44384i,outgoing6-timeouts=0i,over-capacity-drops=0i,packetcache-entries=78258i,packetcache-hits=25999027i,packetcache-misses=3100179i,policy-drops=0i,policy-result-custom=0i,policy-result-drop=0i,policy-result-noaction=3100336i,policy-result-nodata=0i,policy-result-nxdomain=0i,policy-result-truncate=0i,qa-latency=6553i,query-pipe-full-drops=0i,questions=29099363i,real-memory-usage=280494080i,resource-limits=0i,security-status=1i,server-parse-errors=0i,servfail-answers=304253i,spoof-prevents=0i,sys-msec=1312600i,tcp-client-overflow=0i,tcp-clients=0i,tcp-outqueries=116i,tcp-questions=133i,throttle-entries=21i,throttled-out=13296i,throttled-outqueries=13296i,too-old-drops=2i,udp-in-errors=4i,udp-noport-errors=2918i,udp-recvbuf-errors=0i,udp-sndbuf-errors=0i,unauthorized-tcp=0i,unauthorized-udp=0i,unexpected-packets=0i,unreachables=1708i,uptime=167482i,user-msec=1282640i,x-our-latency=19i,x-ourtime-slow=642i,x-ourtime0-1=3095566i,x-ourtime1-2=3401i,x-ourtime16-32=201i,x-ourtime2-4=304i,x-ourtime4-8=198i,x-ourtime8-16=24i 1533903879000000000 +``` diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor.go b/plugins/inputs/powerdns_recursor/powerdns_recursor.go new file mode 100644 index 000000000..d040d8355 --- /dev/null +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor.go @@ -0,0 +1,167 @@ +package powerdns_recursor + +import ( + "bufio" + "errors" + "fmt" + "log" + "math/rand" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type PowerdnsRecursor struct { + UnixSockets []string `toml:"unix_sockets"` + SocketDir string `toml:"socket_dir"` + SocketMode string `toml:"socket_mode"` + + mode uint32 +} + +var defaultTimeout = 5 * time.Second + +var sampleConfig = ` + ## Path to the Recursor control socket. + unix_sockets = ["/var/run/pdns_recursor.controlsocket"] + + ## Directory to create receive socket. This default is likely not writable, + ## please reference the full plugin documentation for a recommended setup. + # socket_dir = "/var/run/" + ## Socket permissions for the receive socket. + # socket_mode = "0666" +` + +func (p *PowerdnsRecursor) SampleConfig() string { + return sampleConfig +} + +func (p *PowerdnsRecursor) Description() string { + return "Read metrics from one or many PowerDNS Recursor servers" +} + +func (p *PowerdnsRecursor) Init() error { + if p.SocketMode != "" { + mode, err := strconv.ParseUint(p.SocketMode, 8, 32) + if err != nil { + return fmt.Errorf("could not parse socket_mode: %v", err) + } + + p.mode = uint32(mode) + } + return nil +} + +func (p *PowerdnsRecursor) Gather(acc telegraf.Accumulator) error { + if len(p.UnixSockets) == 0 { + return p.gatherServer("/var/run/pdns_recursor.controlsocket", acc) + } + + for _, serverSocket := range p.UnixSockets { + if err := p.gatherServer(serverSocket, acc); err != nil { + acc.AddError(err) + } + } + + return nil +} + +func (p *PowerdnsRecursor) gatherServer(address string, acc telegraf.Accumulator) error { + randomNumber := rand.Int63() + recvSocket := filepath.Join("/", "var", "run", fmt.Sprintf("pdns_recursor_telegraf%d", randomNumber)) + if p.SocketDir != "" { + recvSocket = filepath.Join(p.SocketDir, fmt.Sprintf("pdns_recursor_telegraf%d", randomNumber)) + } + + laddr, err := net.ResolveUnixAddr("unixgram", recvSocket) + if err != nil { + return err + } + defer os.Remove(recvSocket) + raddr, err := net.ResolveUnixAddr("unixgram", address) + if err != nil { + return err + } + conn, err := net.DialUnix("unixgram", laddr, raddr) + if err != nil { + return err + } + if err := os.Chmod(recvSocket, os.FileMode(p.mode)); err != nil { + return err + } + defer conn.Close() + + conn.SetDeadline(time.Now().Add(defaultTimeout)) + + // Read and write buffer + rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)) + + // Send command + if _, err := fmt.Fprint(rw, "get-all\n"); err != nil { + return nil + } + if err := rw.Flush(); err != nil { + return err + } + + // Read data + buf := make([]byte, 16384) + n, err := rw.Read(buf) + if err != nil { + return err + } + if n == 0 { + return errors.New("no data received") + } + + metrics := string(buf) + + // Process data + fields := parseResponse(metrics) + + // Add server socket as a tag + tags := map[string]string{"server": address} + + acc.AddFields("powerdns_recursor", fields, tags) + + conn.Close() + + return nil +} + +func parseResponse(metrics string) map[string]interface{} { + values := make(map[string]interface{}) + + s := strings.Split(metrics, "\n") + + for _, metric := range s[:len(s)-1] { + m := strings.Split(metric, "\t") + if len(m) < 2 { + continue + } + + i, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + log.Printf("E! [inputs.powerdns_recursor] error parsing integer for metric %q: %s", + metric, err.Error()) + continue + } + values[m[0]] = i + } + + return values +} + +func init() { + inputs.Add("powerdns_recursor", func() telegraf.Input { + return &PowerdnsRecursor{ + mode: uint32(0666), + } + }) +} diff --git a/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go new file mode 100644 index 000000000..d0f5690cc --- /dev/null +++ b/plugins/inputs/powerdns_recursor/powerdns_recursor_test.go @@ -0,0 +1,561 @@ +package powerdns_recursor + +import ( + "net" + "os" + "runtime" + "sync" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type statServer struct{} + +var metrics = "all-outqueries\t3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + + "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" + + "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" + + "auth4-answers100-1000\t424683\nauth6-answers-slow\t0\nauth6-answers0-1\t0\nauth6-answers1-10\t0\n" + + "auth6-answers10-100\t0\nauth6-answers100-1000\t0\ncache-entries\t295917\ncache-hits\t148630\n" + + "cache-misses\t2916149\ncase-mismatches\t0\nchain-resends\t418602\nclient-parse-errors\t0\n" + + "concurrent-queries\t0\ndlg-only-drops\t0\ndnssec-queries\t151536\ndnssec-result-bogus\t0\n" + + "dnssec-result-indeterminate\t0\ndnssec-result-insecure\t0\ndnssec-result-nta\t0\n" + + "dnssec-result-secure\t46\ndnssec-validations\t46\ndont-outqueries\t62\necs-queries\t0\n" + + "ecs-responses\t0\nedns-ping-matches\t0\nedns-ping-mismatches\t0\nfailed-host-entries\t33\n" + + "fd-usage\t32\nignored-packets\t0\nipv6-outqueries\t0\nipv6-questions\t0\nmalloc-bytes\t0\n" + + "max-cache-entries\t1000000\nmax-mthread-stack\t33747\nmax-packetcache-entries\t500000\n" + + "negcache-entries\t100070\nno-packet-error\t0\nnoedns-outqueries\t72409\nnoerror-answers\t25155259\n" + + "noping-outqueries\t0\nnsset-invalidations\t2385\nnsspeeds-entries\t3571\nnxdomain-answers\t3307768\n" + + "outgoing-timeouts\t43876\noutgoing4-timeouts\t43876\noutgoing6-timeouts\t0\nover-capacity-drops\t0\n" + + "packetcache-entries\t80756\npacketcache-hits\t25698497\npacketcache-misses\t3064625\npolicy-drops\t0\n" + + "policy-result-custom\t0\npolicy-result-drop\t0\npolicy-result-noaction\t3064779\npolicy-result-nodata\t0\n" + + "policy-result-nxdomain\t0\npolicy-result-truncate\t0\nqa-latency\t6587\nquery-pipe-full-drops\t0\n" + + "questions\t28763276\nreal-memory-usage\t280465408\nresource-limits\t0\nsecurity-status\t1\n" + + "server-parse-errors\t0\nservfail-answers\t300249\nspoof-prevents\t0\nsys-msec\t1296588\n" + + "tcp-client-overflow\t0\ntcp-clients\t0\ntcp-outqueries\t116\ntcp-questions\t130\nthrottle-entries\t33\n" + + "throttled-out\t13187\nthrottled-outqueries\t13187\ntoo-old-drops\t2\nudp-in-errors\t4\n" + + "udp-noport-errors\t2908\nudp-recvbuf-errors\t0\nudp-sndbuf-errors\t0\nunauthorized-tcp\t0\n" + + "unauthorized-udp\t0\nunexpected-packets\t0\nunreachables\t1695\nuptime\t165725\nuser-msec\t1266384\n" + + "x-our-latency\t19\nx-ourtime-slow\t632\nx-ourtime0-1\t3060079\nx-ourtime1-2\t3351\nx-ourtime16-32\t197\n" + + "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" + +// first metric has no "\t" +var corruptMetrics = "all-outqueries3591637\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + + "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" + + "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" + + "auth4-answers100-1000\t424683\nauth6-answers-slow\t0\nauth6-answers0-1\t0\nauth6-answers1-10\t0\n" + + "auth6-answers10-100\t0\nauth6-answers100-1000\t0\ncache-entries\t295917\ncache-hits\t148630\n" + + "cache-misses\t2916149\ncase-mismatches\t0\nchain-resends\t418602\nclient-parse-errors\t0\n" + + "concurrent-queries\t0\ndlg-only-drops\t0\ndnssec-queries\t151536\ndnssec-result-bogus\t0\n" + + "dnssec-result-indeterminate\t0\ndnssec-result-insecure\t0\ndnssec-result-nta\t0\n" + + "dnssec-result-secure\t46\ndnssec-validations\t46\ndont-outqueries\t62\necs-queries\t0\n" + + "ecs-responses\t0\nedns-ping-matches\t0\nedns-ping-mismatches\t0\nfailed-host-entries\t33\n" + + "fd-usage\t32\nignored-packets\t0\nipv6-outqueries\t0\nipv6-questions\t0\nmalloc-bytes\t0\n" + + "max-cache-entries\t1000000\nmax-mthread-stack\t33747\nmax-packetcache-entries\t500000\n" + + "negcache-entries\t100070\nno-packet-error\t0\nnoedns-outqueries\t72409\nnoerror-answers\t25155259\n" + + "noping-outqueries\t0\nnsset-invalidations\t2385\nnsspeeds-entries\t3571\nnxdomain-answers\t3307768\n" + + "outgoing-timeouts\t43876\noutgoing4-timeouts\t43876\noutgoing6-timeouts\t0\nover-capacity-drops\t0\n" + + "packetcache-entries\t80756\npacketcache-hits\t25698497\npacketcache-misses\t3064625\npolicy-drops\t0\n" + + "policy-result-custom\t0\npolicy-result-drop\t0\npolicy-result-noaction\t3064779\npolicy-result-nodata\t0\n" + + "policy-result-nxdomain\t0\npolicy-result-truncate\t0\nqa-latency\t6587\nquery-pipe-full-drops\t0\n" + + "questions\t28763276\nreal-memory-usage\t280465408\nresource-limits\t0\nsecurity-status\t1\n" + + "server-parse-errors\t0\nservfail-answers\t300249\nspoof-prevents\t0\nsys-msec\t1296588\n" + + "tcp-client-overflow\t0\ntcp-clients\t0\ntcp-outqueries\t116\ntcp-questions\t130\nthrottle-entries\t33\n" + + "throttled-out\t13187\nthrottled-outqueries\t13187\ntoo-old-drops\t2\nudp-in-errors\t4\n" + + "udp-noport-errors\t2908\nudp-recvbuf-errors\t0\nudp-sndbuf-errors\t0\nunauthorized-tcp\t0\n" + + "unauthorized-udp\t0\nunexpected-packets\t0\nunreachables\t1695\nuptime\t165725\nuser-msec\t1266384\n" + + "x-our-latency\t19\nx-ourtime-slow\t632\nx-ourtime0-1\t3060079\nx-ourtime1-2\t3351\nx-ourtime16-32\t197\n" + + "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" + +// integer overflow +var intOverflowMetrics = "all-outqueries\t18446744073709550195\nanswers-slow\t36451\nanswers0-1\t177297\nanswers1-10\t1209328\n" + + "answers10-100\t1238786\nanswers100-1000\t402917\nauth-zone-queries\t4\nauth4-answers-slow\t44248\n" + + "auth4-answers0-1\t59169\nauth4-answers1-10\t1747403\nauth4-answers10-100\t1315621\n" + + "auth4-answers100-1000\t424683\nauth6-answers-slow\t0\nauth6-answers0-1\t0\nauth6-answers1-10\t0\n" + + "auth6-answers10-100\t0\nauth6-answers100-1000\t0\ncache-entries\t295917\ncache-hits\t148630\n" + + "cache-misses\t2916149\ncase-mismatches\t0\nchain-resends\t418602\nclient-parse-errors\t0\n" + + "concurrent-queries\t0\ndlg-only-drops\t0\ndnssec-queries\t151536\ndnssec-result-bogus\t0\n" + + "dnssec-result-indeterminate\t0\ndnssec-result-insecure\t0\ndnssec-result-nta\t0\n" + + "dnssec-result-secure\t46\ndnssec-validations\t46\ndont-outqueries\t62\necs-queries\t0\n" + + "ecs-responses\t0\nedns-ping-matches\t0\nedns-ping-mismatches\t0\nfailed-host-entries\t33\n" + + "fd-usage\t32\nignored-packets\t0\nipv6-outqueries\t0\nipv6-questions\t0\nmalloc-bytes\t0\n" + + "max-cache-entries\t1000000\nmax-mthread-stack\t33747\nmax-packetcache-entries\t500000\n" + + "negcache-entries\t100070\nno-packet-error\t0\nnoedns-outqueries\t72409\nnoerror-answers\t25155259\n" + + "noping-outqueries\t0\nnsset-invalidations\t2385\nnsspeeds-entries\t3571\nnxdomain-answers\t3307768\n" + + "outgoing-timeouts\t43876\noutgoing4-timeouts\t43876\noutgoing6-timeouts\t0\nover-capacity-drops\t0\n" + + "packetcache-entries\t80756\npacketcache-hits\t25698497\npacketcache-misses\t3064625\npolicy-drops\t0\n" + + "policy-result-custom\t0\npolicy-result-drop\t0\npolicy-result-noaction\t3064779\npolicy-result-nodata\t0\n" + + "policy-result-nxdomain\t0\npolicy-result-truncate\t0\nqa-latency\t6587\nquery-pipe-full-drops\t0\n" + + "questions\t28763276\nreal-memory-usage\t280465408\nresource-limits\t0\nsecurity-status\t1\n" + + "server-parse-errors\t0\nservfail-answers\t300249\nspoof-prevents\t0\nsys-msec\t1296588\n" + + "tcp-client-overflow\t0\ntcp-clients\t0\ntcp-outqueries\t116\ntcp-questions\t130\nthrottle-entries\t33\n" + + "throttled-out\t13187\nthrottled-outqueries\t13187\ntoo-old-drops\t2\nudp-in-errors\t4\n" + + "udp-noport-errors\t2908\nudp-recvbuf-errors\t0\nudp-sndbuf-errors\t0\nunauthorized-tcp\t0\n" + + "unauthorized-udp\t0\nunexpected-packets\t0\nunreachables\t1695\nuptime\t165725\nuser-msec\t1266384\n" + + "x-our-latency\t19\nx-ourtime-slow\t632\nx-ourtime0-1\t3060079\nx-ourtime1-2\t3351\nx-ourtime16-32\t197\n" + + "x-ourtime2-4\t302\nx-ourtime4-8\t194\nx-ourtime8-16\t24\n" + +func TestPowerdnsRecursorGeneratesMetrics(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("Skipping test on darwin") + } + // We create a fake server to return test data + controlSocket := "/tmp/pdns5724354148158589552.controlsocket" + addr, err := net.ResolveUnixAddr("unixgram", controlSocket) + if err != nil { + t.Fatal("Cannot parse unix socket") + } + socket, err := net.ListenUnixgram("unixgram", addr) + if err != nil { + t.Fatal("Cannot initialize server on port") + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer func() { + socket.Close() + os.Remove(controlSocket) + wg.Done() + }() + + for { + buf := make([]byte, 1024) + n, remote, err := socket.ReadFromUnix(buf) + if err != nil { + socket.Close() + return + } + + data := buf[:n] + if string(data) == "get-all\n" { + socket.WriteToUnix([]byte(metrics), remote) + socket.Close() + } + + time.Sleep(100 * time.Millisecond) + } + }() + + p := &PowerdnsRecursor{ + UnixSockets: []string{controlSocket}, + SocketDir: "/tmp", + SocketMode: "0666", + } + err = p.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + + err = acc.GatherError(p.Gather) + require.NoError(t, err) + + wg.Wait() + + intMetrics := []string{"all-outqueries", "answers-slow", "answers0-1", "answers1-10", + "answers10-100", "answers100-1000", "auth-zone-queries", "auth4-answers-slow", + "auth4-answers0-1", "auth4-answers1-10", "auth4-answers10-100", "auth4-answers100-1000", + "auth6-answers-slow", "auth6-answers0-1", "auth6-answers1-10", "auth6-answers10-100", + "auth6-answers100-1000", "cache-entries", "cache-hits", "cache-misses", "case-mismatches", + "chain-resends", "client-parse-errors", "concurrent-queries", "dlg-only-drops", "dnssec-queries", + "dnssec-result-bogus", "dnssec-result-indeterminate", "dnssec-result-insecure", "dnssec-result-nta", + "dnssec-result-secure", "dnssec-validations", "dont-outqueries", "ecs-queries", "ecs-responses", + "edns-ping-matches", "edns-ping-mismatches", "failed-host-entries", "fd-usage", "ignored-packets", + "ipv6-outqueries", "ipv6-questions", "malloc-bytes", "max-cache-entries", "max-mthread-stack", + "max-packetcache-entries", "negcache-entries", "no-packet-error", "noedns-outqueries", + "noerror-answers", "noping-outqueries", "nsset-invalidations", "nsspeeds-entries", + "nxdomain-answers", "outgoing-timeouts", "outgoing4-timeouts", "outgoing6-timeouts", + "over-capacity-drops", "packetcache-entries", "packetcache-hits", "packetcache-misses", + "policy-drops", "policy-result-custom", "policy-result-drop", "policy-result-noaction", + "policy-result-nodata", "policy-result-nxdomain", "policy-result-truncate", "qa-latency", + "query-pipe-full-drops", "questions", "real-memory-usage", "resource-limits", "security-status", + "server-parse-errors", "servfail-answers", "spoof-prevents", "sys-msec", "tcp-client-overflow", + "tcp-clients", "tcp-outqueries", "tcp-questions", "throttle-entries", "throttled-out", "throttled-outqueries", + "too-old-drops", "udp-in-errors", "udp-noport-errors", "udp-recvbuf-errors", "udp-sndbuf-errors", + "unauthorized-tcp", "unauthorized-udp", "unexpected-packets", "unreachables", "uptime", "user-msec", + "x-our-latency", "x-ourtime-slow", "x-ourtime0-1", "x-ourtime1-2", "x-ourtime16-32", + "x-ourtime2-4", "x-ourtime4-8", "x-ourtime8-16"} + + for _, metric := range intMetrics { + assert.True(t, acc.HasInt64Field("powerdns_recursor", metric), metric) + } +} + +func TestPowerdnsRecursorParseMetrics(t *testing.T) { + values := parseResponse(metrics) + + tests := []struct { + key string + value int64 + }{ + {"all-outqueries", 3591637}, + {"answers-slow", 36451}, + {"answers0-1", 177297}, + {"answers1-10", 1209328}, + {"answers10-100", 1238786}, + {"answers100-1000", 402917}, + {"auth-zone-queries", 4}, + {"auth4-answers-slow", 44248}, + {"auth4-answers0-1", 59169}, + {"auth4-answers1-10", 1747403}, + {"auth4-answers10-100", 1315621}, + {"auth4-answers100-1000", 424683}, + {"auth6-answers-slow", 0}, + {"auth6-answers0-1", 0}, + {"auth6-answers1-10", 0}, + {"auth6-answers10-100", 0}, + {"auth6-answers100-1000", 0}, + {"cache-entries", 295917}, + {"cache-hits", 148630}, + {"cache-misses", 2916149}, + {"case-mismatches", 0}, + {"chain-resends", 418602}, + {"client-parse-errors", 0}, + {"concurrent-queries", 0}, + {"dlg-only-drops", 0}, + {"dnssec-queries", 151536}, + {"dnssec-result-bogus", 0}, + {"dnssec-result-indeterminate", 0}, + {"dnssec-result-insecure", 0}, + {"dnssec-result-nta", 0}, + {"dnssec-result-secure", 46}, + {"dnssec-validations", 46}, + {"dont-outqueries", 62}, + {"ecs-queries", 0}, + {"ecs-responses", 0}, + {"edns-ping-matches", 0}, + {"edns-ping-mismatches", 0}, + {"failed-host-entries", 33}, + {"fd-usage", 32}, + {"ignored-packets", 0}, + {"ipv6-outqueries", 0}, + {"ipv6-questions", 0}, + {"malloc-bytes", 0}, + {"max-cache-entries", 1000000}, + {"max-mthread-stack", 33747}, + {"max-packetcache-entries", 500000}, + {"negcache-entries", 100070}, + {"no-packet-error", 0}, + {"noedns-outqueries", 72409}, + {"noerror-answers", 25155259}, + {"noping-outqueries", 0}, + {"nsset-invalidations", 2385}, + {"nsspeeds-entries", 3571}, + {"nxdomain-answers", 3307768}, + {"outgoing-timeouts", 43876}, + {"outgoing4-timeouts", 43876}, + {"outgoing6-timeouts", 0}, + {"over-capacity-drops", 0}, + {"packetcache-entries", 80756}, + {"packetcache-hits", 25698497}, + {"packetcache-misses", 3064625}, + {"policy-drops", 0}, + {"policy-result-custom", 0}, + {"policy-result-drop", 0}, + {"policy-result-noaction", 3064779}, + {"policy-result-nodata", 0}, + {"policy-result-nxdomain", 0}, + {"policy-result-truncate", 0}, + {"qa-latency", 6587}, + {"query-pipe-full-drops", 0}, + {"questions", 28763276}, + {"real-memory-usage", 280465408}, + {"resource-limits", 0}, + {"security-status", 1}, + {"server-parse-errors", 0}, + {"servfail-answers", 300249}, + {"spoof-prevents", 0}, + {"sys-msec", 1296588}, + {"tcp-client-overflow", 0}, + {"tcp-clients", 0}, + {"tcp-outqueries", 116}, + {"tcp-questions", 130}, + {"throttle-entries", 33}, + {"throttled-out", 13187}, + {"throttled-outqueries", 13187}, + {"too-old-drops", 2}, + {"udp-in-errors", 4}, + {"udp-noport-errors", 2908}, + {"udp-recvbuf-errors", 0}, + {"udp-sndbuf-errors", 0}, + {"unauthorized-tcp", 0}, + {"unauthorized-udp", 0}, + {"unexpected-packets", 0}, + {"unreachables", 1695}, + {"uptime", 165725}, + {"user-msec", 1266384}, + {"x-our-latency", 19}, + {"x-ourtime-slow", 632}, + {"x-ourtime0-1", 3060079}, + {"x-ourtime1-2", 3351}, + {"x-ourtime16-32", 197}, + {"x-ourtime2-4", 302}, + {"x-ourtime4-8", 194}, + {"x-ourtime8-16", 24}, + } + + for _, test := range tests { + value, ok := values[test.key] + if !ok { + t.Errorf("Did not find key for metric %s in values", test.key) + continue + } + if value != test.value { + t.Errorf("Metric: %s, Expected: %d, actual: %d", + test.key, test.value, value) + } + } +} + +func TestPowerdnsRecursorParseCorruptMetrics(t *testing.T) { + values := parseResponse(corruptMetrics) + + tests := []struct { + key string + value int64 + }{ + {"answers-slow", 36451}, + {"answers0-1", 177297}, + {"answers1-10", 1209328}, + {"answers10-100", 1238786}, + {"answers100-1000", 402917}, + {"auth-zone-queries", 4}, + {"auth4-answers-slow", 44248}, + {"auth4-answers0-1", 59169}, + {"auth4-answers1-10", 1747403}, + {"auth4-answers10-100", 1315621}, + {"auth4-answers100-1000", 424683}, + {"auth6-answers-slow", 0}, + {"auth6-answers0-1", 0}, + {"auth6-answers1-10", 0}, + {"auth6-answers10-100", 0}, + {"auth6-answers100-1000", 0}, + {"cache-entries", 295917}, + {"cache-hits", 148630}, + {"cache-misses", 2916149}, + {"case-mismatches", 0}, + {"chain-resends", 418602}, + {"client-parse-errors", 0}, + {"concurrent-queries", 0}, + {"dlg-only-drops", 0}, + {"dnssec-queries", 151536}, + {"dnssec-result-bogus", 0}, + {"dnssec-result-indeterminate", 0}, + {"dnssec-result-insecure", 0}, + {"dnssec-result-nta", 0}, + {"dnssec-result-secure", 46}, + {"dnssec-validations", 46}, + {"dont-outqueries", 62}, + {"ecs-queries", 0}, + {"ecs-responses", 0}, + {"edns-ping-matches", 0}, + {"edns-ping-mismatches", 0}, + {"failed-host-entries", 33}, + {"fd-usage", 32}, + {"ignored-packets", 0}, + {"ipv6-outqueries", 0}, + {"ipv6-questions", 0}, + {"malloc-bytes", 0}, + {"max-cache-entries", 1000000}, + {"max-mthread-stack", 33747}, + {"max-packetcache-entries", 500000}, + {"negcache-entries", 100070}, + {"no-packet-error", 0}, + {"noedns-outqueries", 72409}, + {"noerror-answers", 25155259}, + {"noping-outqueries", 0}, + {"nsset-invalidations", 2385}, + {"nsspeeds-entries", 3571}, + {"nxdomain-answers", 3307768}, + {"outgoing-timeouts", 43876}, + {"outgoing4-timeouts", 43876}, + {"outgoing6-timeouts", 0}, + {"over-capacity-drops", 0}, + {"packetcache-entries", 80756}, + {"packetcache-hits", 25698497}, + {"packetcache-misses", 3064625}, + {"policy-drops", 0}, + {"policy-result-custom", 0}, + {"policy-result-drop", 0}, + {"policy-result-noaction", 3064779}, + {"policy-result-nodata", 0}, + {"policy-result-nxdomain", 0}, + {"policy-result-truncate", 0}, + {"qa-latency", 6587}, + {"query-pipe-full-drops", 0}, + {"questions", 28763276}, + {"real-memory-usage", 280465408}, + {"resource-limits", 0}, + {"security-status", 1}, + {"server-parse-errors", 0}, + {"servfail-answers", 300249}, + {"spoof-prevents", 0}, + {"sys-msec", 1296588}, + {"tcp-client-overflow", 0}, + {"tcp-clients", 0}, + {"tcp-outqueries", 116}, + {"tcp-questions", 130}, + {"throttle-entries", 33}, + {"throttled-out", 13187}, + {"throttled-outqueries", 13187}, + {"too-old-drops", 2}, + {"udp-in-errors", 4}, + {"udp-noport-errors", 2908}, + {"udp-recvbuf-errors", 0}, + {"udp-sndbuf-errors", 0}, + {"unauthorized-tcp", 0}, + {"unauthorized-udp", 0}, + {"unexpected-packets", 0}, + {"unreachables", 1695}, + {"uptime", 165725}, + {"user-msec", 1266384}, + {"x-our-latency", 19}, + {"x-ourtime-slow", 632}, + {"x-ourtime0-1", 3060079}, + {"x-ourtime1-2", 3351}, + {"x-ourtime16-32", 197}, + {"x-ourtime2-4", 302}, + {"x-ourtime4-8", 194}, + {"x-ourtime8-16", 24}, + } + + for _, test := range tests { + value, ok := values[test.key] + if !ok { + t.Errorf("Did not find key for metric %s in values", test.key) + continue + } + if value != test.value { + t.Errorf("Metric: %s, Expected: %d, actual: %d", + test.key, test.value, value) + } + } +} + +func TestPowerdnsRecursorParseIntOverflowMetrics(t *testing.T) { + values := parseResponse(intOverflowMetrics) + + tests := []struct { + key string + value int64 + }{ + {"answers-slow", 36451}, + {"answers0-1", 177297}, + {"answers1-10", 1209328}, + {"answers10-100", 1238786}, + {"answers100-1000", 402917}, + {"auth-zone-queries", 4}, + {"auth4-answers-slow", 44248}, + {"auth4-answers0-1", 59169}, + {"auth4-answers1-10", 1747403}, + {"auth4-answers10-100", 1315621}, + {"auth4-answers100-1000", 424683}, + {"auth6-answers-slow", 0}, + {"auth6-answers0-1", 0}, + {"auth6-answers1-10", 0}, + {"auth6-answers10-100", 0}, + {"auth6-answers100-1000", 0}, + {"cache-entries", 295917}, + {"cache-hits", 148630}, + {"cache-misses", 2916149}, + {"case-mismatches", 0}, + {"chain-resends", 418602}, + {"client-parse-errors", 0}, + {"concurrent-queries", 0}, + {"dlg-only-drops", 0}, + {"dnssec-queries", 151536}, + {"dnssec-result-bogus", 0}, + {"dnssec-result-indeterminate", 0}, + {"dnssec-result-insecure", 0}, + {"dnssec-result-nta", 0}, + {"dnssec-result-secure", 46}, + {"dnssec-validations", 46}, + {"dont-outqueries", 62}, + {"ecs-queries", 0}, + {"ecs-responses", 0}, + {"edns-ping-matches", 0}, + {"edns-ping-mismatches", 0}, + {"failed-host-entries", 33}, + {"fd-usage", 32}, + {"ignored-packets", 0}, + {"ipv6-outqueries", 0}, + {"ipv6-questions", 0}, + {"malloc-bytes", 0}, + {"max-cache-entries", 1000000}, + {"max-mthread-stack", 33747}, + {"max-packetcache-entries", 500000}, + {"negcache-entries", 100070}, + {"no-packet-error", 0}, + {"noedns-outqueries", 72409}, + {"noerror-answers", 25155259}, + {"noping-outqueries", 0}, + {"nsset-invalidations", 2385}, + {"nsspeeds-entries", 3571}, + {"nxdomain-answers", 3307768}, + {"outgoing-timeouts", 43876}, + {"outgoing4-timeouts", 43876}, + {"outgoing6-timeouts", 0}, + {"over-capacity-drops", 0}, + {"packetcache-entries", 80756}, + {"packetcache-hits", 25698497}, + {"packetcache-misses", 3064625}, + {"policy-drops", 0}, + {"policy-result-custom", 0}, + {"policy-result-drop", 0}, + {"policy-result-noaction", 3064779}, + {"policy-result-nodata", 0}, + {"policy-result-nxdomain", 0}, + {"policy-result-truncate", 0}, + {"qa-latency", 6587}, + {"query-pipe-full-drops", 0}, + {"questions", 28763276}, + {"real-memory-usage", 280465408}, + {"resource-limits", 0}, + {"security-status", 1}, + {"server-parse-errors", 0}, + {"servfail-answers", 300249}, + {"spoof-prevents", 0}, + {"sys-msec", 1296588}, + {"tcp-client-overflow", 0}, + {"tcp-clients", 0}, + {"tcp-outqueries", 116}, + {"tcp-questions", 130}, + {"throttle-entries", 33}, + {"throttled-out", 13187}, + {"throttled-outqueries", 13187}, + {"too-old-drops", 2}, + {"udp-in-errors", 4}, + {"udp-noport-errors", 2908}, + {"udp-recvbuf-errors", 0}, + {"udp-sndbuf-errors", 0}, + {"unauthorized-tcp", 0}, + {"unauthorized-udp", 0}, + {"unexpected-packets", 0}, + {"unreachables", 1695}, + {"uptime", 165725}, + {"user-msec", 1266384}, + {"x-our-latency", 19}, + {"x-ourtime-slow", 632}, + {"x-ourtime0-1", 3060079}, + {"x-ourtime1-2", 3351}, + {"x-ourtime16-32", 197}, + {"x-ourtime2-4", 302}, + {"x-ourtime4-8", 194}, + {"x-ourtime8-16", 24}, + } + + for _, test := range tests { + value, ok := values[test.key] + if !ok { + t.Errorf("Did not find key for metric %s in values", test.key) + continue + } + if value != test.value { + t.Errorf("Metric: %s, Expected: %d, actual: %d", + test.key, test.value, value) + } + } +} diff --git a/plugins/inputs/system/PROCESSES_README.md b/plugins/inputs/processes/README.md similarity index 83% rename from plugins/inputs/system/PROCESSES_README.md rename to plugins/inputs/processes/README.md index 3c2e27291..756326d75 100644 --- a/plugins/inputs/system/PROCESSES_README.md +++ b/plugins/inputs/processes/README.md @@ -6,7 +6,9 @@ them by status (zombie, sleeping, running, etc.) On linux this plugin requires access to procfs (/proc), on other OSes it requires access to execute `ps`. -### Configuration: +**Supported Platforms**: Linux, FreeBSD, Darwin + +### Configuration ```toml # Get the number of processes and group them by status @@ -19,9 +21,10 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info `docker run -v /proc:/rootfs/proc:ro -e HOST_PROC=/rootfs/proc` -### Measurements & Fields: +### Metrics - processes + - fields: - blocked (aka disk sleep or uninterruptible sleep) - running - sleeping @@ -32,6 +35,7 @@ Using the environment variable `HOST_PROC` the plugin will retrieve process info - wait (freebsd only) - idle (bsd and Linux 4+ only) - paging (linux only) + - parked (linux only) - total_threads (linux only) ### Process State Mappings @@ -52,14 +56,8 @@ Linux FreeBSD Darwin meaning W W none paging (linux kernel < 2.6 only), wait (freebsd) ``` -### Tags: - -None - -### Example Output: +### Example Output ``` -$ telegraf --config ~/ws/telegraf.conf --input-filter processes --test -* Plugin: processes, Collection 1 -> processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042 +processes blocked=8i,running=1i,sleeping=265i,stopped=0i,total=274i,zombie=0i,dead=0i,paging=0i,total_threads=687i 1457478636980905042 ``` diff --git a/plugins/inputs/processes/processes.go b/plugins/inputs/processes/processes.go new file mode 100644 index 000000000..9ee583dba --- /dev/null +++ b/plugins/inputs/processes/processes.go @@ -0,0 +1,7 @@ +package processes + +func (p *Processes) Description() string { + return "Get the number of processes and group them by status" +} + +func (p *Processes) SampleConfig() string { return "" } diff --git a/plugins/inputs/system/processes.go b/plugins/inputs/processes/processes_notwindows.go similarity index 90% rename from plugins/inputs/system/processes.go rename to plugins/inputs/processes/processes_notwindows.go index 9258bc417..445e7fb9f 100644 --- a/plugins/inputs/system/processes.go +++ b/plugins/inputs/processes/processes_notwindows.go @@ -1,12 +1,11 @@ // +build !windows -package system +package processes import ( "bytes" "fmt" "io/ioutil" - "log" "os" "os/exec" "path/filepath" @@ -16,22 +15,19 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/linux_sysctl_fs" ) type Processes struct { execPS func() ([]byte, error) readProcFile func(filename string) ([]byte, error) + Log telegraf.Logger + forcePS bool forceProc bool } -func (p *Processes) Description() string { - return "Get the number of processes and group them by status" -} - -func (p *Processes) SampleConfig() string { return "" } - func (p *Processes) Gather(acc telegraf.Accumulator) error { // Get an empty map of metric fields fields := getEmptyFields() @@ -123,8 +119,7 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error { case '?': fields["unknown"] = fields["unknown"].(int64) + int64(1) default: - log.Printf("I! processes: Unknown state [ %s ] from ps", - string(status[0])) + p.Log.Infof("Unknown state %q from ps", string(status[0])) } fields["total"] = fields["total"].(int64) + int64(1) } @@ -133,7 +128,7 @@ func (p *Processes) gatherFromPS(fields map[string]interface{}) error { // get process states from /proc/(pid)/stat files func (p *Processes) gatherFromProc(fields map[string]interface{}) error { - filenames, err := filepath.Glob(GetHostProc() + "/[0-9]*/stat") + filenames, err := filepath.Glob(linux_sysctl_fs.GetHostProc() + "/[0-9]*/stat") if err != nil { return err @@ -177,15 +172,19 @@ func (p *Processes) gatherFromProc(fields map[string]interface{}) error { fields["paging"] = fields["paging"].(int64) + int64(1) case 'I': fields["idle"] = fields["idle"].(int64) + int64(1) + case 'P': + if _, ok := fields["parked"]; ok { + fields["parked"] = fields["parked"].(int64) + int64(1) + } + fields["parked"] = int64(1) default: - log.Printf("I! processes: Unknown state [ %s ] in file %s", - string(stats[0][0]), filename) + p.Log.Infof("Unknown state %q in file %q", string(stats[0][0]), filename) } fields["total"] = fields["total"].(int64) + int64(1) threads, err := strconv.Atoi(string(stats[17])) if err != nil { - log.Printf("I! processes: Error parsing thread count: %s", err) + p.Log.Infof("Error parsing thread count: %s", err.Error()) continue } fields["total_threads"] = fields["total_threads"].(int64) + int64(threads) diff --git a/plugins/inputs/system/processes_test.go b/plugins/inputs/processes/processes_test.go similarity index 68% rename from plugins/inputs/system/processes_test.go rename to plugins/inputs/processes/processes_test.go index 5401e1a70..268cef913 100644 --- a/plugins/inputs/system/processes_test.go +++ b/plugins/inputs/processes/processes_test.go @@ -1,10 +1,14 @@ -package system +// +build !windows + +package processes import ( "fmt" "runtime" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -12,6 +16,7 @@ import ( func TestProcesses(t *testing.T) { processes := &Processes{ + Log: testutil.Logger{}, execPS: execPS, readProcFile: readProcFile, } @@ -31,6 +36,7 @@ func TestProcesses(t *testing.T) { func TestFromPS(t *testing.T) { processes := &Processes{ + Log: testutil.Logger{}, execPS: testExecPS, forcePS: true, } @@ -52,6 +58,7 @@ func TestFromPS(t *testing.T) { func TestFromPSError(t *testing.T) { processes := &Processes{ + Log: testutil.Logger{}, execPS: testExecPSError, forcePS: true, } @@ -67,6 +74,7 @@ func TestFromProcFiles(t *testing.T) { } tester := tester{} processes := &Processes{ + Log: testutil.Logger{}, readProcFile: tester.testProcFile, forceProc: true, } @@ -89,6 +97,7 @@ func TestFromProcFilesWithSpaceInCmd(t *testing.T) { } tester := tester{} processes := &Processes{ + Log: testutil.Logger{}, readProcFile: tester.testProcFile2, forceProc: true, } @@ -105,6 +114,60 @@ func TestFromProcFilesWithSpaceInCmd(t *testing.T) { acc.AssertContainsTaggedFields(t, "processes", fields, map[string]string{}) } +// Based on `man 5 proc`, parked processes an be found in a +// limited range of Linux versions: +// +// > P Parked (Linux 3.9 to 3.13 only) +// +// However, we have had reports of this process state on Ubuntu +// Bionic w/ Linux 4.15 (#6270) +func TestParkedProcess(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("Parked process test only relevant on linux") + } + procstat := `88 (watchdog/13) P 2 0 0 0 -1 69238848 0 0 0 0 0 0 0 0 20 0 1 0 20 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 1 0 0 17 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +` + plugin := &Processes{ + Log: testutil.Logger{}, + readProcFile: func(string) ([]byte, error) { + return []byte(procstat), nil + }, + forceProc: true, + } + + var acc testutil.Accumulator + err := plugin.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "processes", + map[string]string{}, + map[string]interface{}{ + "blocked": 0, + "dead": 0, + "idle": 0, + "paging": 0, + "parked": 1, + "running": 0, + "sleeping": 0, + "stopped": 0, + "unknown": 0, + "zombies": 0, + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + } + actual := acc.GetTelegrafMetrics() + for _, a := range actual { + a.RemoveField("total") + a.RemoveField("total_threads") + } + testutil.RequireMetricsEqual(t, expected, actual, + testutil.IgnoreTime()) +} + func testExecPS() ([]byte, error) { return []byte(testPSOut), nil } diff --git a/plugins/inputs/processes/processes_windows.go b/plugins/inputs/processes/processes_windows.go new file mode 100644 index 000000000..567373c7c --- /dev/null +++ b/plugins/inputs/processes/processes_windows.go @@ -0,0 +1,27 @@ +// +build windows + +package processes + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Processes struct { + Log telegraf.Logger +} + +func (e *Processes) Init() error { + e.Log.Warn("Current platform is not supported") + return nil +} + +func (e *Processes) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("processes", func() telegraf.Input { + return &Processes{} + }) +} diff --git a/plugins/inputs/procstat/README.md b/plugins/inputs/procstat/README.md index efa71489b..380321569 100644 --- a/plugins/inputs/procstat/README.md +++ b/plugins/inputs/procstat/README.md @@ -1,7 +1,7 @@ # Procstat Input Plugin The procstat plugin can be used to monitor the system resource usage of one or more processes. -The procstat_lookup metric displays the query information, +The procstat_lookup metric displays the query information, specifically the number of PIDs returned on a search Processes can be selected for monitoring using one of several methods: @@ -11,6 +11,7 @@ Processes can be selected for monitoring using one of several methods: - user - systemd_unit - cgroup +- win_service ### Configuration: @@ -30,6 +31,9 @@ Processes can be selected for monitoring using one of several methods: ## CGroup name or path # cgroup = "systemd/system.slice/nginx.service" + ## Windows service name + # win_service = "" + ## override for process_name ## This is optional; default is sourced from /proc//status # process_name = "bar" @@ -37,9 +41,15 @@ Processes can be selected for monitoring using one of several methods: ## Field name prefix # prefix = "" - ## Add PID as a tag instead of a field; useful to differentiate between - ## processes whose tags are otherwise the same. Can create a large number - ## of series, use judiciously. + ## When true add the full cmdline as a tag. + # cmdline_tag = false + + ## Add the PID as a tag instead of as a field. When collecting multiple + ## processes with otherwise matching tags this setting should be enabled to + ## ensure each process has a unique identity. + ## + ## Enabling this option may result in a large number of series, especially + ## when processes have a short lifetime. # pid_tag = false ## Method to use when finding process IDs. Can be one of 'pgrep', or @@ -68,6 +78,7 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - procstat - tags: - pid (when `pid_tag` is true) + - cmdline (when 'cmdline_tag' is true) - process_name - pidfile (when defined) - exe (when defined) @@ -75,7 +86,11 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - user (when selected) - systemd_unit (when defined) - cgroup (when defined) + - win_service (when defined) - fields: + - child_major_faults (int) + - child_minor_faults (int) + - created_at (int) [epoch in nanoseconds] - cpu_time (int) - cpu_time_guest (float) - cpu_time_guest_nice (float) @@ -85,17 +100,19 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - cpu_time_nice (float) - cpu_time_soft_irq (float) - cpu_time_steal (float) - - cpu_time_stolen (float) - cpu_time_system (float) - cpu_time_user (float) - cpu_usage (float) - involuntary_context_switches (int) + - major_faults (int) - memory_data (int) - memory_locked (int) - memory_rss (int) - memory_stack (int) - memory_swap (int) + - memory_usage (float) - memory_vms (int) + - minor_faults (int) - nice_priority (int) - num_fds (int, *telegraf* may need to be ran as **root**) - num_threads (int) @@ -131,21 +148,26 @@ implemented as a WMI query. The pattern allows fuzzy matching using only - write_count (int, *telegraf* may need to be ran as **root**) - procstat_lookup - tags: - - exe (string) - - pid_finder (string) - - pid_file (string) - - pattern (string) - - prefix (string) - - user (string) - - systemd_unit (string) - - cgroup (string) + - exe + - pid_finder + - pid_file + - pattern + - prefix + - user + - systemd_unit + - cgroup + - win_service + - result - fields: - pid_count (int) + - running (int) + - result_code (int, success = 0, lookup_error = 1) + *NOTE: Resource limit > 2147483647 will be reported as 2147483647.* ### Example Output: ``` -procstat,pidfile=/var/run/lxc/dnsmasq.pid,process_name=dnsmasq rlimit_file_locks_soft=2147483647i,rlimit_signals_pending_hard=1758i,voluntary_context_switches=478i,read_bytes=307200i,cpu_time_user=0.01,cpu_time_guest=0,memory_swap=0i,memory_locked=0i,rlimit_num_fds_hard=4096i,rlimit_nice_priority_hard=0i,num_fds=11i,involuntary_context_switches=20i,read_count=23i,memory_rss=1388544i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_rss_hard=2147483647i,nice_priority=20i,rlimit_cpu_time_hard=2147483647i,cpu_time=0i,write_bytes=0i,cpu_time_idle=0,cpu_time_nice=0,memory_data=229376i,memory_stack=135168i,rlimit_cpu_time_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_signals_pending_soft=1758i,write_count=11i,cpu_time_iowait=0,cpu_time_steal=0,cpu_time_stolen=0,rlimit_memory_stack_soft=8388608i,cpu_time_system=0.02,cpu_time_guest_nice=0,rlimit_memory_locked_soft=65536i,rlimit_memory_vms_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_realtime_priority_hard=0i,pid=828i,num_threads=1i,cpu_time_soft_irq=0,rlimit_memory_vms_hard=2147483647i,rlimit_realtime_priority_soft=0i,memory_vms=15884288i,rlimit_memory_stack_hard=2147483647i,cpu_time_irq=0,rlimit_memory_data_soft=2147483647i,rlimit_num_fds_soft=1024i,signals_pending=0i,rlimit_nice_priority_soft=0i,realtime_priority=0i -procstat,exe=influxd,process_name=influxd rlimit_num_fds_hard=16384i,rlimit_signals_pending_hard=1758i,realtime_priority=0i,rlimit_memory_vms_hard=2147483647i,rlimit_signals_pending_soft=1758i,cpu_time_stolen=0,rlimit_memory_stack_hard=2147483647i,rlimit_realtime_priority_hard=0i,cpu_time=0i,pid=500i,voluntary_context_switches=975i,cpu_time_idle=0,memory_rss=3072000i,memory_locked=0i,rlimit_nice_priority_soft=0i,signals_pending=0i,nice_priority=20i,read_bytes=823296i,cpu_time_soft_irq=0,rlimit_memory_data_hard=2147483647i,rlimit_memory_locked_soft=65536i,write_count=8i,cpu_time_irq=0,memory_vms=33501184i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,num_fds=29i,memory_data=229376i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_soft=2147483647i,num_threads=1i,write_bytes=0i,cpu_time_steal=0,rlimit_memory_rss_hard=2147483647i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_usage=0,rlimit_memory_locked_hard=65536i,rlimit_file_locks_hard=2147483647i,involuntary_context_switches=38i,read_count=16851i,memory_swap=0i,rlimit_memory_data_soft=2147483647i,cpu_time_user=0.11,rlimit_cpu_time_hard=2147483647i,rlimit_num_fds_soft=16384i,rlimit_realtime_priority_soft=0i,cpu_time_system=0.27,cpu_time_nice=0,memory_stack=135168i,rlimit_memory_rss_soft=2147483647i +procstat_lookup,host=prash-laptop,pattern=influxd,pid_finder=pgrep,result=success pid_count=1i,running=1i,result_code=0i 1582089700000000000 +procstat,host=prash-laptop,pattern=influxd,process_name=influxd,user=root involuntary_context_switches=151496i,child_minor_faults=1061i,child_major_faults=8i,cpu_time_user=2564.81,cpu_time_idle=0,cpu_time_irq=0,cpu_time_guest=0,pid=32025i,major_faults=8609i,created_at=1580107536000000000i,voluntary_context_switches=1058996i,cpu_time_system=616.98,cpu_time_steal=0,cpu_time_guest_nice=0,memory_swap=0i,memory_locked=0i,memory_usage=1.7797634601593018,num_threads=18i,cpu_time_nice=0,cpu_time_iowait=0,cpu_time_soft_irq=0,memory_rss=148643840i,memory_vms=1435688960i,memory_data=0i,memory_stack=0i,minor_faults=1856550i 1582089700000000000 ``` diff --git a/plugins/inputs/procstat/dev/telegraf.conf b/plugins/inputs/procstat/dev/telegraf.conf new file mode 100644 index 000000000..63b150d7c --- /dev/null +++ b/plugins/inputs/procstat/dev/telegraf.conf @@ -0,0 +1,9 @@ +[agent] + interval="1s" + flush_interval="1s" + +[[inputs.procstat]] + exe = "telegraf" + +[[outputs.file]] + files = ["stdout"] diff --git a/plugins/inputs/procstat/native_finder_notwindows.go b/plugins/inputs/procstat/native_finder_notwindows.go index 533b7333a..a1683aad3 100644 --- a/plugins/inputs/procstat/native_finder_notwindows.go +++ b/plugins/inputs/procstat/native_finder_notwindows.go @@ -33,7 +33,7 @@ func (pg *NativeFinder) Pattern(pattern string) ([]PID, error) { return pids, err } -//FullPattern matches on the command line when the proccess was executed +//FullPattern matches on the command line when the process was executed func (pg *NativeFinder) FullPattern(pattern string) ([]PID, error) { var pids []PID regxPattern, err := regexp.Compile(pattern) diff --git a/plugins/inputs/procstat/native_finder_windows_test.go b/plugins/inputs/procstat/native_finder_windows_test.go index 2f51a3f92..ef9c5ffb1 100644 --- a/plugins/inputs/procstat/native_finder_windows_test.go +++ b/plugins/inputs/procstat/native_finder_windows_test.go @@ -11,6 +11,9 @@ import ( ) func TestGather_RealPattern(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } pg, err := NewNativeFinder() require.NoError(t, err) pids, err := pg.Pattern(`procstat`) @@ -20,6 +23,9 @@ func TestGather_RealPattern(t *testing.T) { } func TestGather_RealFullPattern(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } pg, err := NewNativeFinder() require.NoError(t, err) pids, err := pg.FullPattern(`%procstat%`) @@ -29,6 +35,9 @@ func TestGather_RealFullPattern(t *testing.T) { } func TestGather_RealUser(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } user, err := user.Current() require.NoError(t, err) pg, err := NewNativeFinder() diff --git a/plugins/inputs/procstat/pgrep.go b/plugins/inputs/procstat/pgrep.go index 703febaa9..48bf76ed6 100644 --- a/plugins/inputs/procstat/pgrep.go +++ b/plugins/inputs/procstat/pgrep.go @@ -10,7 +10,7 @@ import ( "github.com/influxdata/telegraf/internal" ) -// Implemention of PIDGatherer that execs pgrep to find processes +// Implementation of PIDGatherer that execs pgrep to find processes type Pgrep struct { path string } diff --git a/plugins/inputs/procstat/process.go b/plugins/inputs/procstat/process.go index 30e8f182f..042929f08 100644 --- a/plugins/inputs/procstat/process.go +++ b/plugins/inputs/procstat/process.go @@ -12,16 +12,20 @@ type Process interface { PID() PID Tags() map[string]string + PageFaults() (*process.PageFaultsStat, error) IOCounters() (*process.IOCountersStat, error) MemoryInfo() (*process.MemoryInfoStat, error) Name() (string, error) + Cmdline() (string, error) NumCtxSwitches() (*process.NumCtxSwitchesStat, error) NumFDs() (int32, error) NumThreads() (int32, error) Percent(interval time.Duration) (float64, error) + MemoryPercent() (float32, error) Times() (*cpu.TimesStat, error) RlimitUsage(bool) ([]process.RlimitStat, error) Username() (string, error) + CreateTime() (int64, error) } type PIDFinder interface { diff --git a/plugins/inputs/procstat/procstat.go b/plugins/inputs/procstat/procstat.go index 4b253fd1c..61e575370 100644 --- a/plugins/inputs/procstat/procstat.go +++ b/plugins/inputs/procstat/procstat.go @@ -27,11 +27,13 @@ type Procstat struct { Exe string Pattern string Prefix string + CmdLineTag bool `toml:"cmdline_tag"` ProcessName string User string SystemdUnit string CGroup string `toml:"cgroup"` PidTag bool + WinService string `toml:"win_service"` finder PIDFinder @@ -54,6 +56,9 @@ var sampleConfig = ` ## CGroup name or path # cgroup = "systemd/system.slice/nginx.service" + ## Windows service name + # win_service = "" + ## override for process_name ## This is optional; default is sourced from /proc//status # process_name = "bar" @@ -61,9 +66,15 @@ var sampleConfig = ` ## Field name prefix # prefix = "" - ## Add PID as a tag instead of a field; useful to differentiate between - ## processes whose tags are otherwise the same. Can create a large number - ## of series, use judiciously. + ## When true add the full cmdline as a tag. + # cmdline_tag = false + + ## Add the PID as a tag instead of as a field. When collecting multiple + ## processes with otherwise matching tags this setting should be enabled to + ## ensure each process has a unique identity. + ## + ## Enabling this option may result in a large number of series, especially + ## when processes have a short lifetime. # pid_tag = false ## Method to use when finding process IDs. Can be one of 'pgrep', or @@ -89,6 +100,7 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { case "pgrep": p.createPIDFinder = NewPgrep default: + p.PidFinder = "pgrep" p.createPIDFinder = defaultPIDFinder } @@ -97,7 +109,22 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.createProcess = defaultProcess } - procs, err := p.updateProcesses(acc, p.procs) + pids, tags, err := p.findPids(acc) + if err != nil { + fields := map[string]interface{}{ + "pid_count": 0, + "running": 0, + "result_code": 1, + } + tags := map[string]string{ + "pid_finder": p.PidFinder, + "result": "lookup_error", + } + acc.AddFields("procstat_lookup", fields, tags) + return err + } + + procs, err := p.updateProcesses(pids, tags, p.procs) if err != nil { acc.AddError(fmt.Errorf("E! Error: procstat getting process, exe: [%s] pidfile: [%s] pattern: [%s] user: [%s] %s", p.Exe, p.PidFile, p.Pattern, p.User, err.Error())) @@ -105,14 +132,23 @@ func (p *Procstat) Gather(acc telegraf.Accumulator) error { p.procs = procs for _, proc := range p.procs { - p.addMetrics(proc, acc) + p.addMetric(proc, acc) } + fields := map[string]interface{}{ + "pid_count": len(pids), + "running": len(procs), + "result_code": 0, + } + tags["pid_finder"] = p.PidFinder + tags["result"] = "success" + acc.AddFields("procstat_lookup", fields, tags) + return nil } // Add metrics a single Process -func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) { +func (p *Procstat) addMetric(proc Process, acc telegraf.Accumulator) { var prefix string if p.Prefix != "" { prefix = p.Prefix + "_" @@ -141,6 +177,16 @@ func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) { fields["pid"] = int32(proc.PID()) } + //If cmd_line tag is true and it is not already set add cmdline as a tag + if p.CmdLineTag { + if _, ok := proc.Tags()["cmdline"]; !ok { + Cmdline, err := proc.Cmdline() + if err == nil { + proc.Tags()["cmdline"] = Cmdline + } + } + } + numThreads, err := proc.NumThreads() if err == nil { fields[prefix+"num_threads"] = numThreads @@ -157,6 +203,14 @@ func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) { fields[prefix+"involuntary_context_switches"] = ctx.Involuntary } + faults, err := proc.PageFaults() + if err == nil { + fields[prefix+"minor_faults"] = faults.MinorFaults + fields[prefix+"major_faults"] = faults.MajorFaults + fields[prefix+"child_minor_faults"] = faults.ChildMinorFaults + fields[prefix+"child_major_faults"] = faults.ChildMajorFaults + } + io, err := proc.IOCounters() if err == nil { fields[prefix+"read_count"] = io.ReadCount @@ -165,6 +219,11 @@ func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) { fields[prefix+"write_bytes"] = io.WriteBytes } + createdAt, err := proc.CreateTime() //Returns epoch in ms + if err == nil { + fields[prefix+"created_at"] = createdAt * 1000000 //Convert ms to ns + } + cpu_time, err := proc.Times() if err == nil { fields[prefix+"cpu_time_user"] = cpu_time.User @@ -175,7 +234,6 @@ func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) { fields[prefix+"cpu_time_irq"] = cpu_time.Irq fields[prefix+"cpu_time_soft_irq"] = cpu_time.Softirq fields[prefix+"cpu_time_steal"] = cpu_time.Steal - fields[prefix+"cpu_time_stolen"] = cpu_time.Stolen fields[prefix+"cpu_time_guest"] = cpu_time.Guest fields[prefix+"cpu_time_guest_nice"] = cpu_time.GuestNice } @@ -195,6 +253,11 @@ func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) { fields[prefix+"memory_locked"] = mem.Locked } + mem_perc, err := proc.MemoryPercent() + if err == nil { + fields[prefix+"memory_usage"] = mem_perc + } + rlims, err := proc.RlimitUsage(true) if err == nil { for _, rlim := range rlims { @@ -238,17 +301,16 @@ func (p *Procstat) addMetrics(proc Process, acc telegraf.Accumulator) { } // Update monitored Processes -func (p *Procstat) updateProcesses(acc telegraf.Accumulator, prevInfo map[PID]Process) (map[PID]Process, error) { - pids, tags, err := p.findPids(acc) - if err != nil { - return nil, err - } - +func (p *Procstat) updateProcesses(pids []PID, tags map[string]string, prevInfo map[PID]Process) (map[PID]Process, error) { procs := make(map[PID]Process, len(prevInfo)) for _, pid := range pids { info, ok := prevInfo[pid] if ok { + // Assumption: if a process has no name, it probably does not exist + if name, _ := info.Name(); name == "" { + continue + } procs[pid] = info } else { proc, err := p.createProcess(pid) @@ -256,6 +318,10 @@ func (p *Procstat) updateProcesses(acc telegraf.Accumulator, prevInfo map[PID]Pr // No problem; process may have ended after we found it continue } + // Assumption: if a process has no name, it probably does not exist + if name, _ := proc.Name(); name == "" { + continue + } procs[pid] = proc // Add initial tags @@ -277,7 +343,6 @@ func (p *Procstat) updateProcesses(acc telegraf.Accumulator, prevInfo map[PID]Pr // Create and return PIDGatherer lazily func (p *Procstat) getPIDFinder() (PIDFinder, error) { - if p.finder == nil { f, err := p.createPIDFinder() if err != nil { @@ -317,22 +382,14 @@ func (p *Procstat) findPids(acc telegraf.Accumulator) ([]PID, map[string]string, } else if p.CGroup != "" { pids, err = p.cgroupPIDs() tags = map[string]string{"cgroup": p.CGroup} + } else if p.WinService != "" { + pids, err = p.winServicePIDs() + tags = map[string]string{"win_service": p.WinService} } else { - err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, or cgroup must be specified") + err = fmt.Errorf("Either exe, pid_file, user, pattern, systemd_unit, cgroup, or win_service must be specified") } - rTags := make(map[string]string) - for k, v := range tags { - rTags[k] = v - } - - //adds a metric with info on the pgrep query - fields := make(map[string]interface{}) - tags["pid_finder"] = p.PidFinder - fields["pid_count"] = len(pids) - acc.AddFields("procstat_lookup", fields, tags) - - return pids, rTags, err + return pids, tags, err } // execCommand is so tests can mock out exec.Command usage. @@ -353,7 +410,7 @@ func (p *Procstat) systemdUnitPIDs() ([]PID, error) { if !bytes.Equal(kv[0], []byte("MainPID")) { continue } - if len(kv[1]) == 0 { + if len(kv[1]) == 0 || bytes.Equal(kv[1], []byte("0")) { return nil, nil } pid, err := strconv.Atoi(string(kv[1])) @@ -391,6 +448,19 @@ func (p *Procstat) cgroupPIDs() ([]PID, error) { return pids, nil } +func (p *Procstat) winServicePIDs() ([]PID, error) { + var pids []PID + + pid, err := queryPidWithWinServiceName(p.WinService) + if err != nil { + return pids, err + } + + pids = append(pids, PID(pid)) + + return pids, nil +} + func init() { inputs.Add("procstat", func() telegraf.Input { return &Procstat{} diff --git a/plugins/inputs/procstat/procstat_test.go b/plugins/inputs/procstat/procstat_test.go index 7a2eaf9ee..e1ee8ab92 100644 --- a/plugins/inputs/procstat/procstat_test.go +++ b/plugins/inputs/procstat/procstat_test.go @@ -76,6 +76,10 @@ func (pg *testPgrep) PidFile(path string) ([]PID, error) { return pg.pids, pg.err } +func (p *testProc) Cmdline() (string, error) { + return "test_proc", nil +} + func (pg *testPgrep) Pattern(pattern string) ([]PID, error) { return pg.pids, pg.err } @@ -112,6 +116,10 @@ func (p *testProc) Tags() map[string]string { return p.tags } +func (p *testProc) PageFaults() (*process.PageFaultsStat, error) { + return &process.PageFaultsStat{}, nil +} + func (p *testProc) IOCounters() (*process.IOCountersStat, error) { return &process.IOCountersStat{}, nil } @@ -140,6 +148,14 @@ func (p *testProc) Percent(interval time.Duration) (float64, error) { return 0, nil } +func (p *testProc) MemoryPercent() (float32, error) { + return 0, nil +} + +func (p *testProc) CreateTime() (int64, error) { + return 0, nil +} + func (p *testProc) Times() (*cpu.TimesStat, error) { return &cpu.TimesStat{}, nil } diff --git a/plugins/inputs/procstat/win_service_notwindows.go b/plugins/inputs/procstat/win_service_notwindows.go new file mode 100644 index 000000000..3d539d9f9 --- /dev/null +++ b/plugins/inputs/procstat/win_service_notwindows.go @@ -0,0 +1,11 @@ +// +build !windows + +package procstat + +import ( + "fmt" +) + +func queryPidWithWinServiceName(winServiceName string) (uint32, error) { + return 0, fmt.Errorf("os not support win_service option") +} diff --git a/plugins/inputs/procstat/win_service_windows.go b/plugins/inputs/procstat/win_service_windows.go new file mode 100644 index 000000000..06dffc847 --- /dev/null +++ b/plugins/inputs/procstat/win_service_windows.go @@ -0,0 +1,48 @@ +// +build windows + +package procstat + +import ( + "unsafe" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/svc/mgr" +) + +func getService(name string) (*mgr.Service, error) { + m, err := mgr.Connect() + if err != nil { + return nil, err + } + defer m.Disconnect() + + srv, err := m.OpenService(name) + if err != nil { + return nil, err + } + + return srv, nil +} + +func queryPidWithWinServiceName(winServiceName string) (uint32, error) { + srv, err := getService(winServiceName) + if err != nil { + return 0, err + } + + var p *windows.SERVICE_STATUS_PROCESS + var bytesNeeded uint32 + var buf []byte + + if err := windows.QueryServiceStatusEx(srv.Handle, windows.SC_STATUS_PROCESS_INFO, nil, 0, &bytesNeeded); err != windows.ERROR_INSUFFICIENT_BUFFER { + return 0, err + } + + buf = make([]byte, bytesNeeded) + p = (*windows.SERVICE_STATUS_PROCESS)(unsafe.Pointer(&buf[0])) + if err := windows.QueryServiceStatusEx(srv.Handle, windows.SC_STATUS_PROCESS_INFO, &buf[0], uint32(len(buf)), &bytesNeeded); err != nil { + return 0, err + } + + return p.ProcessId, nil +} diff --git a/plugins/inputs/prometheus/README.md b/plugins/inputs/prometheus/README.md index 227f3f737..b4e587452 100644 --- a/plugins/inputs/prometheus/README.md +++ b/plugins/inputs/prometheus/README.md @@ -11,11 +11,46 @@ in Prometheus format. ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] + ## Metric version controls the mapping from Prometheus metrics into + ## Telegraf metrics. When using the prometheus_client output, use the same + ## value in both plugins to ensure metrics are round-tripped without + ## modification. + ## + ## example: metric_version = 1; deprecated in 1.13 + ## metric_version = 2; recommended version + # metric_version = 1 + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - ## Use bearer token for authorization - # bearer_token = /path/to/bearer/token + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + ## Restricts Kubernetes monitoring to a single namespace + ## ex: monitor_kubernetes_pods_namespace = "default" + # monitor_kubernetes_pods_namespace = "" + # label selector to target pods which have the label + # kubernetes_label_selector = "env=dev,app=nginx" + # field selector to target pods + # eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## HTTP Basic Authentication username and password. ('bearer_token' and + ## 'bearer_token_string' take priority) + # username = "" + # password = "" ## Specify timeout duration for slower prometheus clients (default is 3s) # response_timeout = "3s" @@ -28,6 +63,8 @@ in Prometheus format. # insecure_skip_verify = false ``` +`urls` can contain a unix socket as well. If a different path is required (default is `/metrics` for both http[s] and unix) for a unix socket, add `path` as a query parameter as follows: `unix:///var/run/prometheus.sock?path=/custom/metrics` + #### Kubernetes Service Discovery URLs listed in the `kubernetes_services` parameter will be expanded @@ -37,6 +74,20 @@ by looking up all A records assigned to the hostname as described in This method can be used to locate all [Kubernetes headless services](https://kubernetes.io/docs/concepts/services-networking/service/#headless-services). +#### Kubernetes scraping + +Enabling this option will allow the plugin to scrape for prometheus annotation on Kubernetes +pods. Currently, you can run this plugin in your kubernetes cluster, or we use the kubeconfig +file to determine where to monitor. +Currently the following annotation are supported: + +* `prometheus.io/scrape` Enable scraping for this pod. +* `prometheus.io/scheme` If the metrics endpoint is secured then you will need to set this to `https` & most likely set the tls config. (default 'http') +* `prometheus.io/path` Override the path for the metrics endpoint on the service. (default '/metrics') +* `prometheus.io/port` Used to override the port. (default 9102) + +Using the `monitor_kubernetes_pods_namespace` option allows you to limit which pods you are scraping. + #### Bearer Token If set, the file specified by the `bearer_token` parameter will be read on @@ -103,3 +154,18 @@ cpu_usage_user,cpu=cpu1,url=http://example.org:9273/metrics gauge=5.829145728641 cpu_usage_user,cpu=cpu2,url=http://example.org:9273/metrics gauge=2.119071644805144 1505776751000000000 cpu_usage_user,cpu=cpu3,url=http://example.org:9273/metrics gauge=1.5228426395944945 1505776751000000000 ``` + +**Output (when metric_version = 2)** +``` +prometheus,quantile=1,url=http://example.org:9273/metrics go_gc_duration_seconds=0.005574303 1556075100000000000 +prometheus,quantile=0.75,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0001046 1556075100000000000 +prometheus,quantile=0.5,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000719 1556075100000000000 +prometheus,quantile=0.25,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000579 1556075100000000000 +prometheus,quantile=0,url=http://example.org:9273/metrics go_gc_duration_seconds=0.0000349 1556075100000000000 +prometheus,url=http://example.org:9273/metrics go_gc_duration_seconds_count=324,go_gc_duration_seconds_sum=0.091340353 1556075100000000000 +prometheus,url=http://example.org:9273/metrics go_goroutines=15 1556075100000000000 +prometheus,cpu=cpu0,url=http://example.org:9273/metrics cpu_usage_user=1.513622603430151 1505776751000000000 +prometheus,cpu=cpu1,url=http://example.org:9273/metrics cpu_usage_user=5.829145728641773 1505776751000000000 +prometheus,cpu=cpu2,url=http://example.org:9273/metrics cpu_usage_user=2.119071644805144 1505776751000000000 +prometheus,cpu=cpu3,url=http://example.org:9273/metrics cpu_usage_user=1.5228426395944945 1505776751000000000 +``` diff --git a/plugins/inputs/prometheus/kubernetes.go b/plugins/inputs/prometheus/kubernetes.go new file mode 100644 index 000000000..16f69cbd1 --- /dev/null +++ b/plugins/inputs/prometheus/kubernetes.go @@ -0,0 +1,241 @@ +package prometheus + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "net" + "net/url" + "os/user" + "path/filepath" + "sync" + "time" + + "github.com/ericchiang/k8s" + corev1 "github.com/ericchiang/k8s/apis/core/v1" + "github.com/ghodss/yaml" +) + +type payload struct { + eventype string + pod *corev1.Pod +} + +// loadClient parses a kubeconfig from a file and returns a Kubernetes +// client. It does not support extensions or client auth providers. +func loadClient(kubeconfigPath string) (*k8s.Client, error) { + data, err := ioutil.ReadFile(kubeconfigPath) + if err != nil { + return nil, fmt.Errorf("failed reading '%s': %v", kubeconfigPath, err) + } + + // Unmarshal YAML into a Kubernetes config object. + var config k8s.Config + if err := yaml.Unmarshal(data, &config); err != nil { + return nil, err + } + return k8s.NewClient(&config) +} + +func (p *Prometheus) start(ctx context.Context) error { + client, err := k8s.NewInClusterClient() + if err != nil { + u, err := user.Current() + if err != nil { + return fmt.Errorf("Failed to get current user - %v", err) + } + + configLocation := filepath.Join(u.HomeDir, ".kube/config") + if p.KubeConfig != "" { + configLocation = p.KubeConfig + } + client, err = loadClient(configLocation) + if err != nil { + return err + } + } + + p.wg = sync.WaitGroup{} + + p.wg.Add(1) + go func() { + defer p.wg.Done() + for { + select { + case <-ctx.Done(): + return + case <-time.After(time.Second): + err := p.watch(ctx, client) + if err != nil { + p.Log.Errorf("Unable to watch resources: %s", err.Error()) + } + } + } + }() + + return nil +} + +// An edge case exists if a pod goes offline at the same time a new pod is created +// (without the scrape annotations). K8s may re-assign the old pod ip to the non-scrape +// pod, causing errors in the logs. This is only true if the pod going offline is not +// directed to do so by K8s. +func (p *Prometheus) watch(ctx context.Context, client *k8s.Client) error { + + selectors := podSelector(p) + + pod := &corev1.Pod{} + watcher, err := client.Watch(ctx, p.PodNamespace, &corev1.Pod{}, selectors...) + if err != nil { + return err + } + defer watcher.Close() + + for { + select { + case <-ctx.Done(): + return nil + default: + pod = &corev1.Pod{} + // An error here means we need to reconnect the watcher. + eventType, err := watcher.Next(pod) + if err != nil { + return err + } + + // If the pod is not "ready", there will be no ip associated with it. + if pod.GetMetadata().GetAnnotations()["prometheus.io/scrape"] != "true" || + !podReady(pod.Status.GetContainerStatuses()) { + continue + } + + switch eventType { + case k8s.EventAdded: + registerPod(pod, p) + case k8s.EventModified: + // To avoid multiple actions for each event, unregister on the first event + // in the delete sequence, when the containers are still "ready". + if pod.Metadata.GetDeletionTimestamp() != nil { + unregisterPod(pod, p) + } else { + registerPod(pod, p) + } + } + } + } +} + +func podReady(statuss []*corev1.ContainerStatus) bool { + if len(statuss) == 0 { + return false + } + for _, cs := range statuss { + if !cs.GetReady() { + return false + } + } + return true +} + +func podSelector(p *Prometheus) []k8s.Option { + options := []k8s.Option{} + + if len(p.KubernetesLabelSelector) > 0 { + options = append(options, k8s.QueryParam("labelSelector", p.KubernetesLabelSelector)) + } + + if len(p.KubernetesFieldSelector) > 0 { + options = append(options, k8s.QueryParam("fieldSelector", p.KubernetesFieldSelector)) + } + + return options + +} + +func registerPod(pod *corev1.Pod, p *Prometheus) { + if p.kubernetesPods == nil { + p.kubernetesPods = map[string]URLAndAddress{} + } + targetURL := getScrapeURL(pod) + if targetURL == nil { + return + } + + log.Printf("D! [inputs.prometheus] will scrape metrics from %q", *targetURL) + // add annotation as metrics tags + tags := pod.GetMetadata().GetAnnotations() + if tags == nil { + tags = map[string]string{} + } + tags["pod_name"] = pod.GetMetadata().GetName() + tags["namespace"] = pod.GetMetadata().GetNamespace() + // add labels as metrics tags + for k, v := range pod.GetMetadata().GetLabels() { + tags[k] = v + } + URL, err := url.Parse(*targetURL) + if err != nil { + log.Printf("E! [inputs.prometheus] could not parse URL %q: %s", *targetURL, err.Error()) + return + } + podURL := p.AddressToURL(URL, URL.Hostname()) + p.lock.Lock() + p.kubernetesPods[podURL.String()] = URLAndAddress{ + URL: podURL, + Address: URL.Hostname(), + OriginalURL: URL, + Tags: tags, + } + p.lock.Unlock() +} + +func getScrapeURL(pod *corev1.Pod) *string { + ip := pod.Status.GetPodIP() + if ip == "" { + // return as if scrape was disabled, we will be notified again once the pod + // has an IP + return nil + } + + scheme := pod.GetMetadata().GetAnnotations()["prometheus.io/scheme"] + path := pod.GetMetadata().GetAnnotations()["prometheus.io/path"] + port := pod.GetMetadata().GetAnnotations()["prometheus.io/port"] + + if scheme == "" { + scheme = "http" + } + if port == "" { + port = "9102" + } + if path == "" { + path = "/metrics" + } + + u := &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(ip, port), + Path: path, + } + + x := u.String() + + return &x +} + +func unregisterPod(pod *corev1.Pod, p *Prometheus) { + url := getScrapeURL(pod) + if url == nil { + return + } + + log.Printf("D! [inputs.prometheus] registered a delete request for %q in namespace %q", + pod.GetMetadata().GetName(), pod.GetMetadata().GetNamespace()) + + p.lock.Lock() + defer p.lock.Unlock() + if _, ok := p.kubernetesPods[*url]; ok { + delete(p.kubernetesPods, *url) + log.Printf("D! [inputs.prometheus] will stop scraping for %q", *url) + } +} diff --git a/plugins/inputs/prometheus/kubernetes_test.go b/plugins/inputs/prometheus/kubernetes_test.go new file mode 100644 index 000000000..8568ac946 --- /dev/null +++ b/plugins/inputs/prometheus/kubernetes_test.go @@ -0,0 +1,155 @@ +package prometheus + +import ( + "github.com/ericchiang/k8s" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + + v1 "github.com/ericchiang/k8s/apis/core/v1" + metav1 "github.com/ericchiang/k8s/apis/meta/v1" +) + +func TestScrapeURLNoAnnotations(t *testing.T) { + p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} + p.GetMetadata().Annotations = map[string]string{} + url := getScrapeURL(p) + assert.Nil(t, url) +} + +func TestScrapeURLAnnotationsNoScrape(t *testing.T) { + p := &v1.Pod{Metadata: &metav1.ObjectMeta{}} + p.Metadata.Name = str("myPod") + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "false"} + url := getScrapeURL(p) + assert.Nil(t, url) +} + +func TestScrapeURLAnnotations(t *testing.T) { + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + url := getScrapeURL(p) + assert.Equal(t, "http://127.0.0.1:9102/metrics", *url) +} + +func TestScrapeURLAnnotationsCustomPort(t *testing.T) { + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/port": "9000"} + url := getScrapeURL(p) + assert.Equal(t, "http://127.0.0.1:9000/metrics", *url) +} + +func TestScrapeURLAnnotationsCustomPath(t *testing.T) { + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "mymetrics"} + url := getScrapeURL(p) + assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) +} + +func TestScrapeURLAnnotationsCustomPathWithSep(t *testing.T) { + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true", "prometheus.io/path": "/mymetrics"} + url := getScrapeURL(p) + assert.Equal(t, "http://127.0.0.1:9102/mymetrics", *url) +} + +func TestAddPod(t *testing.T) { + prom := &Prometheus{Log: testutil.Logger{}} + + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + registerPod(p, prom) + assert.Equal(t, 1, len(prom.kubernetesPods)) +} + +func TestAddMultipleDuplicatePods(t *testing.T) { + prom := &Prometheus{Log: testutil.Logger{}} + + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + registerPod(p, prom) + p.Metadata.Name = str("Pod2") + registerPod(p, prom) + assert.Equal(t, 1, len(prom.kubernetesPods)) +} + +func TestAddMultiplePods(t *testing.T) { + prom := &Prometheus{Log: testutil.Logger{}} + + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + registerPod(p, prom) + p.Metadata.Name = str("Pod2") + p.Status.PodIP = str("127.0.0.2") + registerPod(p, prom) + assert.Equal(t, 2, len(prom.kubernetesPods)) +} + +func TestDeletePods(t *testing.T) { + prom := &Prometheus{Log: testutil.Logger{}} + + p := pod() + p.Metadata.Annotations = map[string]string{"prometheus.io/scrape": "true"} + registerPod(p, prom) + unregisterPod(p, prom) + assert.Equal(t, 0, len(prom.kubernetesPods)) +} + +func TestPodSelector(t *testing.T) { + + cases := []struct { + expected []k8s.Option + labelselector string + fieldselector string + }{ + { + expected: []k8s.Option{ + k8s.QueryParam("labelSelector", "key1=val1,key2=val2,key3"), + k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"), + }, + labelselector: "key1=val1,key2=val2,key3", + fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com", + }, + { + expected: []k8s.Option{ + k8s.QueryParam("labelSelector", "key1"), + k8s.QueryParam("fieldSelector", "spec.nodeName=ip-1-2-3-4.acme.com"), + }, + labelselector: "key1", + fieldselector: "spec.nodeName=ip-1-2-3-4.acme.com", + }, + { + expected: []k8s.Option{ + k8s.QueryParam("labelSelector", "key1"), + k8s.QueryParam("fieldSelector", "somefield"), + }, + labelselector: "key1", + fieldselector: "somefield", + }, + } + + for _, c := range cases { + prom := &Prometheus{ + Log: testutil.Logger{}, + KubernetesLabelSelector: c.labelselector, + KubernetesFieldSelector: c.fieldselector, + } + + output := podSelector(prom) + + assert.Equal(t, len(output), len(c.expected)) + } +} + +func pod() *v1.Pod { + p := &v1.Pod{Metadata: &metav1.ObjectMeta{}, Status: &v1.PodStatus{}, Spec: &v1.PodSpec{}} + p.Status.PodIP = str("127.0.0.1") + p.Metadata.Name = str("myPod") + p.Metadata.Namespace = str("default") + return p +} + +func str(x string) *string { + return &x +} diff --git a/plugins/inputs/prometheus/parser.go b/plugins/inputs/prometheus/parser.go index 6584fbc05..6427c3f8c 100644 --- a/plugins/inputs/prometheus/parser.go +++ b/plugins/inputs/prometheus/parser.go @@ -15,12 +15,151 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/matttproud/golang_protobuf_extensions/pbutil" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" ) +// Parse returns a slice of Metrics from a text representation of a +// metrics +func ParseV2(buf []byte, header http.Header) ([]telegraf.Metric, error) { + var metrics []telegraf.Metric + var parser expfmt.TextParser + // parse even if the buffer begins with a newline + buf = bytes.TrimPrefix(buf, []byte("\n")) + // Read raw data + buffer := bytes.NewBuffer(buf) + reader := bufio.NewReader(buffer) + + mediatype, params, err := mime.ParseMediaType(header.Get("Content-Type")) + // Prepare output + metricFamilies := make(map[string]*dto.MetricFamily) + + if err == nil && mediatype == "application/vnd.google.protobuf" && + params["encoding"] == "delimited" && + params["proto"] == "io.prometheus.client.MetricFamily" { + for { + mf := &dto.MetricFamily{} + if _, ierr := pbutil.ReadDelimited(reader, mf); ierr != nil { + if ierr == io.EOF { + break + } + return nil, fmt.Errorf("reading metric family protocol buffer failed: %s", ierr) + } + metricFamilies[mf.GetName()] = mf + } + } else { + metricFamilies, err = parser.TextToMetricFamilies(reader) + if err != nil { + return nil, fmt.Errorf("reading text format failed: %s", err) + } + } + + // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds + now := time.Now() + // read metrics + for metricName, mf := range metricFamilies { + for _, m := range mf.Metric { + // reading tags + tags := makeLabels(m) + + if mf.GetType() == dto.MetricType_SUMMARY { + // summary metric + telegrafMetrics := makeQuantilesV2(m, tags, metricName, mf.GetType(), now) + metrics = append(metrics, telegrafMetrics...) + } else if mf.GetType() == dto.MetricType_HISTOGRAM { + // histogram metric + telegrafMetrics := makeBucketsV2(m, tags, metricName, mf.GetType(), now) + metrics = append(metrics, telegrafMetrics...) + } else { + // standard metric + // reading fields + fields := make(map[string]interface{}) + fields = getNameAndValueV2(m, metricName) + // converting to telegraf metric + if len(fields) > 0 { + var t time.Time + if m.TimestampMs != nil && *m.TimestampMs > 0 { + t = time.Unix(0, *m.TimestampMs*1000000) + } else { + t = now + } + metric, err := metric.New("prometheus", tags, fields, t, valueType(mf.GetType())) + if err == nil { + metrics = append(metrics, metric) + } + } + } + } + } + + return metrics, err +} + +// Get Quantiles for summary metric & Buckets for histogram +func makeQuantilesV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { + var metrics []telegraf.Metric + fields := make(map[string]interface{}) + var t time.Time + if m.TimestampMs != nil && *m.TimestampMs > 0 { + t = time.Unix(0, *m.TimestampMs*1000000) + } else { + t = now + } + fields[metricName+"_count"] = float64(m.GetSummary().GetSampleCount()) + fields[metricName+"_sum"] = float64(m.GetSummary().GetSampleSum()) + met, err := metric.New("prometheus", tags, fields, t, valueType(metricType)) + if err == nil { + metrics = append(metrics, met) + } + + for _, q := range m.GetSummary().Quantile { + newTags := tags + fields = make(map[string]interface{}) + + newTags["quantile"] = fmt.Sprint(q.GetQuantile()) + fields[metricName] = float64(q.GetValue()) + + quantileMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) + if err == nil { + metrics = append(metrics, quantileMetric) + } + } + return metrics +} + +// Get Buckets from histogram metric +func makeBucketsV2(m *dto.Metric, tags map[string]string, metricName string, metricType dto.MetricType, now time.Time) []telegraf.Metric { + var metrics []telegraf.Metric + fields := make(map[string]interface{}) + var t time.Time + if m.TimestampMs != nil && *m.TimestampMs > 0 { + t = time.Unix(0, *m.TimestampMs*1000000) + } else { + t = now + } + fields[metricName+"_count"] = float64(m.GetHistogram().GetSampleCount()) + fields[metricName+"_sum"] = float64(m.GetHistogram().GetSampleSum()) + + met, err := metric.New("prometheus", tags, fields, t, valueType(metricType)) + if err == nil { + metrics = append(metrics, met) + } + + for _, b := range m.GetHistogram().Bucket { + newTags := tags + fields = make(map[string]interface{}) + newTags["le"] = fmt.Sprint(b.GetUpperBound()) + fields[metricName+"_bucket"] = float64(b.GetCumulativeCount()) + + histogramMetric, err := metric.New("prometheus", newTags, fields, t, valueType(metricType)) + if err == nil { + metrics = append(metrics, histogramMetric) + } + } + return metrics +} + // Parse returns a slice of Metrics from a text representation of a // metrics func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { @@ -56,6 +195,8 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { } } + // make sure all metrics have a consistent timestamp so that metrics don't straddle two different seconds + now := time.Now() // read metrics for metricName, mf := range metricFamilies { for _, m := range mf.Metric { @@ -84,7 +225,7 @@ func Parse(buf []byte, header http.Header) ([]telegraf.Metric, error) { if m.TimestampMs != nil && *m.TimestampMs > 0 { t = time.Unix(0, *m.TimestampMs*1000000) } else { - t = time.Now() + t = now } metric, err := metric.New(metricName, tags, fields, t, valueType(mf.GetType())) if err == nil { @@ -159,3 +300,22 @@ func getNameAndValue(m *dto.Metric) map[string]interface{} { } return fields } + +// Get name and value from metric +func getNameAndValueV2(m *dto.Metric, metricName string) map[string]interface{} { + fields := make(map[string]interface{}) + if m.Gauge != nil { + if !math.IsNaN(m.GetGauge().GetValue()) { + fields[metricName] = float64(m.GetGauge().GetValue()) + } + } else if m.Counter != nil { + if !math.IsNaN(m.GetCounter().GetValue()) { + fields[metricName] = float64(m.GetCounter().GetValue()) + } + } else if m.Untyped != nil { + if !math.IsNaN(m.GetUntyped().GetValue()) { + fields[metricName] = float64(m.GetUntyped().GetValue()) + } + } + return fields +} diff --git a/plugins/inputs/prometheus/prometheus.go b/plugins/inputs/prometheus/prometheus.go index 23709790f..ad98a1987 100644 --- a/plugins/inputs/prometheus/prometheus.go +++ b/plugins/inputs/prometheus/prometheus.go @@ -1,10 +1,10 @@ package prometheus import ( + "context" "errors" "fmt" "io/ioutil" - "log" "net" "net/http" "net/url" @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3` +const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3,*/*;q=0.1` type Prometheus struct { // An array of urls to scrape metrics from. @@ -26,25 +26,91 @@ type Prometheus struct { // An array of Kubernetes services to scrape metrics from. KubernetesServices []string + // Location of kubernetes config file + KubeConfig string + + // Label Selector/s for Kubernetes + KubernetesLabelSelector string `toml:"kubernetes_label_selector"` + + // Field Selector/s for Kubernetes + KubernetesFieldSelector string `toml:"kubernetes_field_selector"` + // Bearer Token authorization file path - BearerToken string `toml:"bearer_token"` + BearerToken string `toml:"bearer_token"` + BearerTokenString string `toml:"bearer_token_string"` + + // Basic authentication credentials + Username string `toml:"username"` + Password string `toml:"password"` ResponseTimeout internal.Duration `toml:"response_timeout"` + MetricVersion int `toml:"metric_version"` + + URLTag string `toml:"url_tag"` + tls.ClientConfig + Log telegraf.Logger + client *http.Client + + // Should we scrape Kubernetes services for prometheus annotations + MonitorPods bool `toml:"monitor_kubernetes_pods"` + PodNamespace string `toml:"monitor_kubernetes_pods_namespace"` + lock sync.Mutex + kubernetesPods map[string]URLAndAddress + cancel context.CancelFunc + wg sync.WaitGroup } var sampleConfig = ` ## An array of urls to scrape metrics from. urls = ["http://localhost:9100/metrics"] + ## Metric version controls the mapping from Prometheus metrics into + ## Telegraf metrics. When using the prometheus_client output, use the same + ## value in both plugins to ensure metrics are round-tripped without + ## modification. + ## + ## example: metric_version = 1; deprecated in 1.13 + ## metric_version = 2; recommended version + # metric_version = 1 + + ## Url tag name (tag containing scrapped url. optional, default is "url") + # url_tag = "scrapeUrl" + ## An array of Kubernetes services to scrape metrics from. # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - ## Use bearer token for authorization - # bearer_token = /path/to/bearer/token + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to 'https' & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + ## Restricts Kubernetes monitoring to a single namespace + ## ex: monitor_kubernetes_pods_namespace = "default" + # monitor_kubernetes_pods_namespace = "" + # label selector to target pods which have the label + # kubernetes_label_selector = "env=dev,app=nginx" + # field selector to target pods + # eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + # bearer_token = "/path/to/bearer/token" + ## OR + # bearer_token_string = "abc_123" + + ## HTTP Basic Authentication username and password. ('bearer_token' and + ## 'bearer_token_string' take priority) + # username = "" + # password = "" ## Specify timeout duration for slower prometheus clients (default is 3s) # response_timeout = "3s" @@ -65,6 +131,14 @@ func (p *Prometheus) Description() string { return "Read metrics from one or many prometheus clients" } +func (p *Prometheus) Init() error { + if p.MetricVersion != 2 { + p.Log.Warnf("Use of deprecated configuration: 'metric_version = 1'; please update to 'metric_version = 2'") + } + + return nil +} + var ErrProtocolError = errors.New("prometheus protocol error") func (p *Prometheus) AddressToURL(u *url.URL, address string) *url.URL { @@ -90,32 +164,45 @@ type URLAndAddress struct { OriginalURL *url.URL URL *url.URL Address string + Tags map[string]string } -func (p *Prometheus) GetAllURLs() ([]URLAndAddress, error) { - allURLs := make([]URLAndAddress, 0) +func (p *Prometheus) GetAllURLs() (map[string]URLAndAddress, error) { + allURLs := make(map[string]URLAndAddress, 0) for _, u := range p.URLs { URL, err := url.Parse(u) if err != nil { - log.Printf("prometheus: Could not parse %s, skipping it. Error: %s", u, err) + p.Log.Errorf("Could not parse %q, skipping it. Error: %s", u, err.Error()) continue } - - allURLs = append(allURLs, URLAndAddress{URL: URL, OriginalURL: URL}) + allURLs[URL.String()] = URLAndAddress{URL: URL, OriginalURL: URL} } + + p.lock.Lock() + defer p.lock.Unlock() + // loop through all pods scraped via the prometheus annotation on the pods + for k, v := range p.kubernetesPods { + allURLs[k] = v + } + for _, service := range p.KubernetesServices { URL, err := url.Parse(service) if err != nil { return nil, err } + resolvedAddresses, err := net.LookupHost(URL.Hostname()) if err != nil { - log.Printf("prometheus: Could not resolve %s, skipping it. Error: %s", URL.Host, err) + p.Log.Errorf("Could not resolve %q, skipping it. Error: %s", URL.Host, err.Error()) continue } for _, resolved := range resolvedAddresses { serviceURL := p.AddressToURL(URL, resolved) - allURLs = append(allURLs, URLAndAddress{URL: serviceURL, Address: resolved, OriginalURL: URL}) + allURLs[serviceURL.String()] = URLAndAddress{ + URL: serviceURL, + Address: resolved, + OriginalURL: URL, + } } } return allURLs, nil @@ -125,7 +212,7 @@ func (p *Prometheus) GetAllURLs() ([]URLAndAddress, error) { // Returns one of the errors encountered while gather stats (if any). func (p *Prometheus) Gather(acc telegraf.Accumulator) error { if p.client == nil { - client, err := p.createHttpClient() + client, err := p.createHTTPClient() if err != nil { return err } @@ -151,16 +238,7 @@ func (p *Prometheus) Gather(acc telegraf.Accumulator) error { return nil } -var tr = &http.Transport{ - ResponseHeaderTimeout: time.Duration(3 * time.Second), -} - -var client = &http.Client{ - Transport: tr, - Timeout: time.Duration(4 * time.Second), -} - -func (p *Prometheus) createHttpClient() (*http.Client, error) { +func (p *Prometheus) createHTTPClient() (*http.Client, error) { tlsCfg, err := p.ClientConfig.TLSConfig() if err != nil { return nil, err @@ -178,24 +256,62 @@ func (p *Prometheus) createHttpClient() (*http.Client, error) { } func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error { - var req, err = http.NewRequest("GET", u.URL.String(), nil) + var req *http.Request + var err error + var uClient *http.Client + var metrics []telegraf.Metric + if u.URL.Scheme == "unix" { + path := u.URL.Query().Get("path") + if path == "" { + path = "/metrics" + } + req, err = http.NewRequest("GET", "http://localhost"+path, nil) + + // ignore error because it's been handled before getting here + tlsCfg, _ := p.ClientConfig.TLSConfig() + uClient = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + DisableKeepAlives: true, + Dial: func(network, addr string) (net.Conn, error) { + c, err := net.Dial("unix", u.URL.Path) + return c, err + }, + }, + Timeout: p.ResponseTimeout.Duration, + } + } else { + if u.URL.Path == "" { + u.URL.Path = "/metrics" + } + req, err = http.NewRequest("GET", u.URL.String(), nil) + } + req.Header.Add("Accept", acceptHeader) - var token []byte - var resp *http.Response if p.BearerToken != "" { - token, err = ioutil.ReadFile(p.BearerToken) + token, err := ioutil.ReadFile(p.BearerToken) if err != nil { return err } req.Header.Set("Authorization", "Bearer "+string(token)) + } else if p.BearerTokenString != "" { + req.Header.Set("Authorization", "Bearer "+p.BearerTokenString) + } else if p.Username != "" || p.Password != "" { + req.SetBasicAuth(p.Username, p.Password) } - resp, err = p.client.Do(req) + var resp *http.Response + if u.URL.Scheme != "unix" { + resp, err = p.client.Do(req) + } else { + resp, err = uClient.Do(req) + } if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", u.URL, err) } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { return fmt.Errorf("%s returned HTTP status %s", u.URL, resp.Status) } @@ -205,20 +321,30 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return fmt.Errorf("error reading body: %s", err) } - metrics, err := Parse(body, resp.Header) + if p.MetricVersion == 2 { + metrics, err = ParseV2(body, resp.Header) + } else { + metrics, err = Parse(body, resp.Header) + } + if err != nil { return fmt.Errorf("error reading metrics for %s: %s", u.URL, err) } - // Add (or not) collected metrics + for _, metric := range metrics { tags := metric.Tags() // strip user and password from URL u.OriginalURL.User = nil - tags["url"] = u.OriginalURL.String() + if p.URLTag != "" { + tags[p.URLTag] = u.OriginalURL.String() + } if u.Address != "" { tags["address"] = u.Address } + for k, v := range u.Tags { + tags[k] = v + } switch metric.Type() { case telegraf.Counter: @@ -237,8 +363,29 @@ func (p *Prometheus) gatherURL(u URLAndAddress, acc telegraf.Accumulator) error return nil } +// Start will start the Kubernetes scraping if enabled in the configuration +func (p *Prometheus) Start(a telegraf.Accumulator) error { + if p.MonitorPods { + var ctx context.Context + ctx, p.cancel = context.WithCancel(context.Background()) + return p.start(ctx) + } + return nil +} + +func (p *Prometheus) Stop() { + if p.MonitorPods { + p.cancel() + } + p.wg.Wait() +} + func init() { inputs.Add("prometheus", func() telegraf.Input { - return &Prometheus{ResponseTimeout: internal.Duration{Duration: time.Second * 3}} + return &Prometheus{ + ResponseTimeout: internal.Duration{Duration: time.Second * 3}, + kubernetesPods: map[string]URLAndAddress{}, + URLTag: "url", + } }) } diff --git a/plugins/inputs/prometheus/prometheus_test.go b/plugins/inputs/prometheus/prometheus_test.go index 9a2982ff9..d33cba273 100644 --- a/plugins/inputs/prometheus/prometheus_test.go +++ b/plugins/inputs/prometheus/prometheus_test.go @@ -2,12 +2,14 @@ package prometheus import ( "fmt" + "math" "net/http" "net/http/httptest" "net/url" "testing" "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -29,6 +31,21 @@ go_goroutines 15 # TYPE test_metric untyped test_metric{label="value"} 1.0 1490802350000 ` +const sampleSummaryTextFormat = `# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} 0.00010425500000000001 +go_gc_duration_seconds{quantile="0.25"} 0.000139108 +go_gc_duration_seconds{quantile="0.5"} 0.00015749400000000002 +go_gc_duration_seconds{quantile="0.75"} 0.000331463 +go_gc_duration_seconds{quantile="1"} 0.000667154 +go_gc_duration_seconds_sum 0.0018183950000000002 +go_gc_duration_seconds_count 7 +` +const sampleGaugeTextFormat = ` +# HELP go_goroutines Number of goroutines that currently exist. +# TYPE go_goroutines gauge +go_goroutines 15 1490802350000 +` func TestPrometheusGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -37,7 +54,9 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { defer ts.Close() p := &Prometheus{ - URLs: []string{ts.URL}, + Log: testutil.Logger{}, + URLs: []string{ts.URL}, + URLTag: "url", } var acc testutil.Accumulator @@ -50,7 +69,7 @@ func TestPrometheusGeneratesMetrics(t *testing.T) { assert.True(t, acc.HasFloatField("test_metric", "value")) assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) assert.False(t, acc.HasTag("test_metric", "address")) - assert.True(t, acc.TagValue("test_metric", "url") == ts.URL) + assert.True(t, acc.TagValue("test_metric", "url") == ts.URL+"/metrics") } func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { @@ -60,7 +79,9 @@ func TestPrometheusGeneratesMetricsWithHostNameTag(t *testing.T) { defer ts.Close() p := &Prometheus{ + Log: testutil.Logger{}, KubernetesServices: []string{ts.URL}, + URLTag: "url", } u, _ := url.Parse(ts.URL) tsAddress := u.Hostname() @@ -89,6 +110,7 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) { defer ts.Close() p := &Prometheus{ + Log: testutil.Logger{}, URLs: []string{ts.URL}, KubernetesServices: []string{"http://random.telegraf.local:88/metrics"}, } @@ -103,3 +125,112 @@ func TestPrometheusGeneratesMetricsAlthoughFirstDNSFails(t *testing.T) { assert.True(t, acc.HasFloatField("test_metric", "value")) assert.True(t, acc.HasTimestamp("test_metric", time.Unix(1490802350, 0))) } + +func TestPrometheusGeneratesSummaryMetricsV2(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, sampleSummaryTextFormat) + })) + defer ts.Close() + + p := &Prometheus{ + URLs: []string{ts.URL}, + URLTag: "url", + MetricVersion: 2, + } + + var acc testutil.Accumulator + + err := acc.GatherError(p.Gather) + require.NoError(t, err) + + assert.True(t, acc.TagSetValue("prometheus", "quantile") == "0") + assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_sum")) + assert.True(t, acc.HasFloatField("prometheus", "go_gc_duration_seconds_count")) + assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") + +} + +func TestSummaryMayContainNaN(t *testing.T) { + const data = `# HELP go_gc_duration_seconds A summary of the GC invocation durations. +# TYPE go_gc_duration_seconds summary +go_gc_duration_seconds{quantile="0"} NaN +go_gc_duration_seconds{quantile="1"} NaN +go_gc_duration_seconds_sum 42.0 +go_gc_duration_seconds_count 42 +` + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, data) + })) + defer ts.Close() + + p := &Prometheus{ + URLs: []string{ts.URL}, + URLTag: "", + MetricVersion: 2, + } + + var acc testutil.Accumulator + + err := p.Gather(&acc) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "quantile": "0", + }, + map[string]interface{}{ + "go_gc_duration_seconds": math.NaN(), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{ + "quantile": "1", + }, + map[string]interface{}{ + "go_gc_duration_seconds": math.NaN(), + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "go_gc_duration_seconds_sum": 42.0, + "go_gc_duration_seconds_count": 42.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), testutil.SortMetrics()) +} + +func TestPrometheusGeneratesGaugeMetricsV2(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, sampleGaugeTextFormat) + })) + defer ts.Close() + + p := &Prometheus{ + URLs: []string{ts.URL}, + URLTag: "url", + MetricVersion: 2, + } + + var acc testutil.Accumulator + + err := acc.GatherError(p.Gather) + require.NoError(t, err) + + assert.True(t, acc.HasFloatField("prometheus", "go_goroutines")) + assert.True(t, acc.TagValue("prometheus", "url") == ts.URL+"/metrics") + assert.True(t, acc.HasTimestamp("prometheus", time.Unix(1490802350, 0))) +} diff --git a/plugins/inputs/puppetagent/puppetagent.go b/plugins/inputs/puppetagent/puppetagent.go index c8a265bb8..1d0e30aa8 100644 --- a/plugins/inputs/puppetagent/puppetagent.go +++ b/plugins/inputs/puppetagent/puppetagent.go @@ -79,7 +79,7 @@ func (pa *PuppetAgent) SampleConfig() string { // Description returns description of PuppetAgent plugin func (pa *PuppetAgent) Description() string { - return `Reads last_run_summary.yaml file and converts to measurments` + return `Reads last_run_summary.yaml file and converts to measurements` } // Gather reads stats from all configured servers accumulates stats diff --git a/plugins/inputs/rabbitmq/README.md b/plugins/inputs/rabbitmq/README.md index ae6dac6f1..4a53ddc6c 100644 --- a/plugins/inputs/rabbitmq/README.md +++ b/plugins/inputs/rabbitmq/README.md @@ -1,10 +1,13 @@ # RabbitMQ Input Plugin -Reads metrics from RabbitMQ servers via the [Management Plugin](https://www.rabbitmq.com/management.html). +Reads metrics from RabbitMQ servers via the [Management Plugin][management]. -For additional details reference the [RabbitMQ Management HTTP Stats](https://cdn.rawgit.com/rabbitmq/rabbitmq-management/master/priv/www/doc/stats.html). +For additional details reference the [RabbitMQ Management HTTP Stats][management-reference]. -### Configuration: +[management]: https://www.rabbitmq.com/management.html +[management-reference]: https://raw.githack.com/rabbitmq/rabbitmq-management/rabbitmq_v3_6_9/priv/www/api/index.html + +### Configuration ```toml [[inputs.rabbitmq]] @@ -49,106 +52,175 @@ For additional details reference the [RabbitMQ Management HTTP Stats](https://cd ## Note that an empty array for both will include all queues # queue_name_include = [] # queue_name_exclude = [] + + ## Federation upstreams to include and exclude specified as an array of glob + ## pattern strings. Federation links can also be limited by the queue and + ## exchange filters. + # federation_upstream_include = [] + # federation_upstream_exclude = [] ``` -### Measurements & Fields: +### Metrics - rabbitmq_overview - - channels (int, channels) - - connections (int, connections) - - consumers (int, consumers) - - exchanges (int, exchanges) - - messages (int, messages) - - messages_acked (int, messages) - - messages_delivered (int, messages) - - messages_delivered_get (int, messages) - - messages_published (int, messages) - - messages_ready (int, messages) - - messages_unacked (int, messages) - - queues (int, queues) - - clustering_listeners (int, cluster nodes) - - amqp_listeners (int, amqp nodes up) + - tags: + - url + - name + - fields: + - channels (int, channels) + - connections (int, connections) + - consumers (int, consumers) + - exchanges (int, exchanges) + - messages (int, messages) + - messages_acked (int, messages) + - messages_delivered (int, messages) + - messages_delivered_get (int, messages) + - messages_published (int, messages) + - messages_ready (int, messages) + - messages_unacked (int, messages) + - queues (int, queues) + - clustering_listeners (int, cluster nodes) + - amqp_listeners (int, amqp nodes up) + - return_unroutable (int, number of unroutable messages) + - return_unroutable_rate (float, number of unroutable messages per second) -- rabbitmq_node - - disk_free (int, bytes) - - disk_free_limit (int, bytes) - - fd_total (int, file descriptors) - - fd_used (int, file descriptors) - - mem_limit (int, bytes) - - mem_used (int, bytes) - - proc_total (int, erlang processes) - - proc_used (int, erlang processes) - - run_queue (int, erlang processes) - - sockets_total (int, sockets) - - sockets_used (int, sockets) - - running (int, node up) ++ rabbitmq_node + - tags: + - url + - node + - url + - fields: + - disk_free (int, bytes) + - disk_free_limit (int, bytes) + - disk_free_alarm (int, disk alarm) + - fd_total (int, file descriptors) + - fd_used (int, file descriptors) + - mem_limit (int, bytes) + - mem_used (int, bytes) + - mem_alarm (int, memory a) + - proc_total (int, erlang processes) + - proc_used (int, erlang processes) + - run_queue (int, erlang processes) + - sockets_total (int, sockets) + - sockets_used (int, sockets) + - running (int, node up) + - uptime (int, milliseconds) + - health_check_status (int, 1 or 0) + - mnesia_disk_tx_count (int, number of disk transaction) + - mnesia_ram_tx_count (int, number of ram transaction) + - mnesia_disk_tx_count_rate (float, number of disk transaction per second) + - mnesia_ram_tx_count_rate (float, number of ram transaction per second) + - gc_num (int, number of garbage collection) + - gc_bytes_reclaimed (int, bytes) + - gc_num_rate (float, number of garbage collection per second) + - gc_bytes_reclaimed_rate (float, bytes per second) + - io_read_avg_time (float, number of read operations) + - io_read_avg_time_rate (int, number of read operations per second) + - io_read_bytes (int, bytes) + - io_read_bytes_rate (float, bytes per second) + - io_write_avg_time (int, milliseconds) + - io_write_avg_time_rate (float, milliseconds per second) + - io_write_bytes (int, bytes) + - io_write_bytes_rate (float, bytes per second) + - mem_connection_readers (int, bytes) + - mem_connection_writers (int, bytes) + - mem_connection_channels (int, bytes) + - mem_connection_other (int, bytes) + - mem_queue_procs (int, bytes) + - mem_queue_slave_procs (int, bytes) + - mem_plugins (int, bytes) + - mem_other_proc (int, bytes) + - mem_metrics (int, bytes) + - mem_mgmt_db (int, bytes) + - mem_mnesia (int, bytes) + - mem_other_ets (int, bytes) + - mem_binary (int, bytes) + - mem_msg_index (int, bytes) + - mem_code (int, bytes) + - mem_atom (int, bytes) + - mem_other_system (int, bytes) + - mem_allocated_unused (int, bytes) + - mem_reserved_unallocated (int, bytes) + - mem_total (int, bytes) - rabbitmq_queue - - consumer_utilisation (float, percent) - - consumers (int, int) - - idle_since (string, time - e.g., "2006-01-02 15:04:05") - - memory (int, bytes) - - message_bytes (int, bytes) - - message_bytes_persist (int, bytes) - - message_bytes_ram (int, bytes) - - message_bytes_ready (int, bytes) - - message_bytes_unacked (int, bytes) - - messages (int, count) - - messages_ack (int, count) - - messages_ack_rate (float, messages per second) - - messages_deliver (int, count) - - messages_deliver_rate (float, messages per second) - - messages_deliver_get (int, count) - - messages_deliver_get_rate (float, messages per second) - - messages_publish (int, count) - - messages_publish_rate (float, messages per second) - - messages_ready (int, count) - - messages_redeliver (int, count) - - messages_redeliver_rate (float, messages per second) - - messages_unack (integer, count) + - tags: + - url + - queue + - vhost + - node + - durable + - auto_delete + - fields: + - consumer_utilisation (float, percent) + - consumers (int, int) + - idle_since (string, time - e.g., "2006-01-02 15:04:05") + - memory (int, bytes) + - message_bytes (int, bytes) + - message_bytes_persist (int, bytes) + - message_bytes_ram (int, bytes) + - message_bytes_ready (int, bytes) + - message_bytes_unacked (int, bytes) + - messages (int, count) + - messages_ack (int, count) + - messages_ack_rate (float, messages per second) + - messages_deliver (int, count) + - messages_deliver_rate (float, messages per second) + - messages_deliver_get (int, count) + - messages_deliver_get_rate (float, messages per second) + - messages_publish (int, count) + - messages_publish_rate (float, messages per second) + - messages_ready (int, count) + - messages_redeliver (int, count) + - messages_redeliver_rate (float, messages per second) + - messages_unack (int, count) + - slave_nodes (int, count) + - synchronised_slave_nodes (int, count) -- rabbitmq_exchange - - messages_publish_in (int, count) - - messages_publish_out (int, count) ++ rabbitmq_exchange + - tags: + - url + - exchange + - type + - vhost + - internal + - durable + - auto_delete + - fields: + - messages_publish_in (int, count) + - messages_publish_in_rate (int, messages per second) + - messages_publish_out (int, count) + - messages_publish_out_rate (int, messages per second) -### Tags: +- rabbitmq_federation + - tags: + - url + - vhost + - type + - upstream + - exchange + - upstream_exchange + - queue + - upstream_queue + - fields: + - acks_uncommitted (int, count) + - consumers (int, count) + - messages_unacknowledged (int, count) + - messages_uncommitted (int, count) + - messages_unconfirmed (int, count) + - messages_confirm (int, count) + - messages_publish (int, count) + - messages_return_unroutable (int, count) -- All measurements have the following tags: - - url - -- rabbitmq_overview - - name - -- rabbitmq_node - - node - -- rabbitmq_queue - - url - - queue - - vhost - - node - - durable - - auto_delete - -- rabbitmq_exchange - - url - - exchange - - type - - vhost - - internal - - durable - - auto_delete - -### Sample Queries: +### Sample Queries Message rates for the entire node can be calculated from total message counts. For instance, to get the rate of messages published per minute, use this query: ``` -SELECT NON_NEGATIVE_DERIVATIVE(LAST("messages_published"), 1m) AS messages_published_rate -FROM rabbitmq_overview WHERE time > now() - 10m GROUP BY time(1m) +SELECT NON_NEGATIVE_DERIVATIVE(LAST("messages_published"), 1m) AS messages_published_rate FROM rabbitmq_overview WHERE time > now() - 10m GROUP BY time(1m) ``` -### Example Output: +### Example Output ``` rabbitmq_queue,url=http://amqp.example.org:15672,queue=telegraf,vhost=influxdb,node=rabbit@amqp.example.org,durable=true,auto_delete=false,host=amqp.example.org messages_deliver_get=0i,messages_publish=329i,messages_publish_rate=0.2,messages_redeliver_rate=0,message_bytes_ready=0i,message_bytes_unacked=0i,messages_deliver=329i,messages_unack=0i,consumers=1i,idle_since="",messages=0i,messages_deliver_rate=0.2,messages_deliver_get_rate=0.2,messages_redeliver=0i,memory=43032i,message_bytes_ram=0i,messages_ack=329i,messages_ready=0i,messages_ack_rate=0.2,consumer_utilisation=1,message_bytes=0i,message_bytes_persist=0i 1493684035000000000 diff --git a/plugins/inputs/rabbitmq/rabbitmq.go b/plugins/inputs/rabbitmq/rabbitmq.go index 49dabe1b5..cb8fbb1aa 100644 --- a/plugins/inputs/rabbitmq/rabbitmq.go +++ b/plugins/inputs/rabbitmq/rabbitmq.go @@ -15,15 +15,15 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -// DefaultUsername will set a default value that corrasponds to the default +// DefaultUsername will set a default value that corresponds to the default // value used by Rabbitmq const DefaultUsername = "guest" -// DefaultPassword will set a default value that corrasponds to the default +// DefaultPassword will set a default value that corresponds to the default // value used by Rabbitmq const DefaultPassword = "guest" -// DefaultURL will set a default value that corrasponds to the default value +// DefaultURL will set a default value that corresponds to the default value // used by Rabbitmq const DefaultURL = "http://localhost:15672" @@ -34,27 +34,30 @@ const DefaultClientTimeout = 4 // RabbitMQ defines the configuration necessary for gathering metrics, // see the sample config for further details type RabbitMQ struct { - URL string - Name string - Username string - Password string + URL string `toml:"url"` + Name string `toml:"name"` + Username string `toml:"username"` + Password string `toml:"password"` tls.ClientConfig ResponseHeaderTimeout internal.Duration `toml:"header_timeout"` ClientTimeout internal.Duration `toml:"client_timeout"` - Nodes []string - Queues []string - Exchanges []string + Nodes []string `toml:"nodes"` + Queues []string `toml:"queues"` + Exchanges []string `toml:"exchanges"` - QueueInclude []string `toml:"queue_name_include"` - QueueExclude []string `toml:"queue_name_exclude"` + QueueInclude []string `toml:"queue_name_include"` + QueueExclude []string `toml:"queue_name_exclude"` + FederationUpstreamInclude []string `toml:"federation_upstream_include"` + FederationUpstreamExclude []string `toml:"federation_upstream_exclude"` - Client *http.Client + Client *http.Client `toml:"-"` filterCreated bool excludeEveryQueue bool queueFilter filter.Filter + upstreamFilter filter.Filter } // OverviewResponse ... @@ -72,23 +75,27 @@ type Listeners struct { // Details ... type Details struct { - Rate float64 + Rate float64 `json:"rate"` } // MessageStats ... type MessageStats struct { - Ack int64 - AckDetails Details `json:"ack_details"` - Deliver int64 - DeliverDetails Details `json:"deliver_details"` - DeliverGet int64 `json:"deliver_get"` - DeliverGetDetails Details `json:"deliver_get_details"` - Publish int64 - PublishDetails Details `json:"publish_details"` - Redeliver int64 - RedeliverDetails Details `json:"redeliver_details"` - PublishIn int64 `json:"publish_in"` - PublishOut int64 `json:"publish_out"` + Ack int64 + AckDetails Details `json:"ack_details"` + Deliver int64 + DeliverDetails Details `json:"deliver_details"` + DeliverGet int64 `json:"deliver_get"` + DeliverGetDetails Details `json:"deliver_get_details"` + Publish int64 + PublishDetails Details `json:"publish_details"` + Redeliver int64 + RedeliverDetails Details `json:"redeliver_details"` + PublishIn int64 `json:"publish_in"` + PublishInDetails Details `json:"publish_in_details"` + PublishOut int64 `json:"publish_out"` + PublishOutDetails Details `json:"publish_out_details"` + ReturnUnroutable int64 `json:"return_unroutable"` + ReturnUnroutableDetails Details `json:"return_unroutable_details"` } // ObjectTotals ... @@ -114,35 +121,56 @@ type QueueTotals struct { // Queue ... type Queue struct { - QueueTotals // just to not repeat the same code - MessageStats `json:"message_stats"` - Memory int64 - Consumers int64 - ConsumerUtilisation float64 `json:"consumer_utilisation"` - Name string - Node string - Vhost string - Durable bool - AutoDelete bool `json:"auto_delete"` - IdleSince string `json:"idle_since"` + QueueTotals // just to not repeat the same code + MessageStats `json:"message_stats"` + Memory int64 + Consumers int64 + ConsumerUtilisation float64 `json:"consumer_utilisation"` + Name string + Node string + Vhost string + Durable bool + AutoDelete bool `json:"auto_delete"` + IdleSince string `json:"idle_since"` + SlaveNodes []string `json:"slave_nodes"` + SynchronisedSlaveNodes []string `json:"synchronised_slave_nodes"` } // Node ... type Node struct { Name string - DiskFree int64 `json:"disk_free"` - DiskFreeLimit int64 `json:"disk_free_limit"` - FdTotal int64 `json:"fd_total"` - FdUsed int64 `json:"fd_used"` - MemLimit int64 `json:"mem_limit"` - MemUsed int64 `json:"mem_used"` - ProcTotal int64 `json:"proc_total"` - ProcUsed int64 `json:"proc_used"` - RunQueue int64 `json:"run_queue"` - SocketsTotal int64 `json:"sockets_total"` - SocketsUsed int64 `json:"sockets_used"` - Running bool `json:"running"` + DiskFree int64 `json:"disk_free"` + DiskFreeLimit int64 `json:"disk_free_limit"` + DiskFreeAlarm bool `json:"disk_free_alarm"` + FdTotal int64 `json:"fd_total"` + FdUsed int64 `json:"fd_used"` + MemLimit int64 `json:"mem_limit"` + MemUsed int64 `json:"mem_used"` + MemAlarm bool `json:"mem_alarm"` + ProcTotal int64 `json:"proc_total"` + ProcUsed int64 `json:"proc_used"` + RunQueue int64 `json:"run_queue"` + SocketsTotal int64 `json:"sockets_total"` + SocketsUsed int64 `json:"sockets_used"` + Running bool `json:"running"` + Uptime int64 `json:"uptime"` + MnesiaDiskTxCount int64 `json:"mnesia_disk_tx_count"` + MnesiaDiskTxCountDetails Details `json:"mnesia_disk_tx_count_details"` + MnesiaRamTxCount int64 `json:"mnesia_ram_tx_count"` + MnesiaRamTxCountDetails Details `json:"mnesia_ram_tx_count_details"` + GcNum int64 `json:"gc_num"` + GcNumDetails Details `json:"gc_num_details"` + GcBytesReclaimed int64 `json:"gc_bytes_reclaimed"` + GcBytesReclaimedDetails Details `json:"gc_bytes_reclaimed_details"` + IoReadAvgTime int64 `json:"io_read_avg_time"` + IoReadAvgTimeDetails Details `json:"io_read_avg_time_details"` + IoReadBytes int64 `json:"io_read_bytes"` + IoReadBytesDetails Details `json:"io_read_bytes_details"` + IoWriteAvgTime int64 `json:"io_write_avg_time"` + IoWriteAvgTimeDetails Details `json:"io_write_avg_time_details"` + IoWriteBytes int64 `json:"io_write_bytes"` + IoWriteBytesDetails Details `json:"io_write_bytes_details"` } type Exchange struct { @@ -155,10 +183,75 @@ type Exchange struct { AutoDelete bool `json:"auto_delete"` } +// FederationLinkChannelMessageStats ... +type FederationLinkChannelMessageStats struct { + Confirm int64 `json:"confirm"` + ConfirmDetails Details `json:"confirm_details"` + Publish int64 `json:"publish"` + PublishDetails Details `json:"publish_details"` + ReturnUnroutable int64 `json:"return_unroutable"` + ReturnUnroutableDetails Details `json:"return_unroutable_details"` +} + +// FederationLinkChannel ... +type FederationLinkChannel struct { + AcksUncommitted int64 `json:"acks_uncommitted"` + ConsumerCount int64 `json:"consumer_count"` + MessagesUnacknowledged int64 `json:"messages_unacknowledged"` + MessagesUncommitted int64 `json:"messages_uncommitted"` + MessagesUnconfirmed int64 `json:"messages_unconfirmed"` + MessageStats FederationLinkChannelMessageStats `json:"message_stats"` +} + +// FederationLink ... +type FederationLink struct { + Type string `json:"type"` + Queue string `json:"queue"` + UpstreamQueue string `json:"upstream_queue"` + Exchange string `json:"exchange"` + UpstreamExchange string `json:"upstream_exchange"` + Vhost string `json:"vhost"` + Upstream string `json:"upstream"` + LocalChannel FederationLinkChannel `json:"local_channel"` +} + +type HealthCheck struct { + Status string `json:"status"` +} + +// MemoryResponse ... +type MemoryResponse struct { + Memory *Memory `json:"memory"` +} + +// Memory details +type Memory struct { + ConnectionReaders int64 `json:"connection_readers"` + ConnectionWriters int64 `json:"connection_writers"` + ConnectionChannels int64 `json:"connection_channels"` + ConnectionOther int64 `json:"connection_other"` + QueueProcs int64 `json:"queue_procs"` + QueueSlaveProcs int64 `json:"queue_slave_procs"` + Plugins int64 `json:"plugins"` + OtherProc int64 `json:"other_proc"` + Metrics int64 `json:"metrics"` + MgmtDb int64 `json:"mgmt_db"` + Mnesia int64 `json:"mnesia"` + OtherEts int64 `json:"other_ets"` + Binary int64 `json:"binary"` + MsgIndex int64 `json:"msg_index"` + Code int64 `json:"code"` + Atom int64 `json:"atom"` + OtherSystem int64 `json:"other_system"` + AllocatedUnused int64 `json:"allocated_unused"` + ReservedUnallocated int64 `json:"reserved_unallocated"` + Total int64 `json:"total"` +} + // gatherFunc ... type gatherFunc func(r *RabbitMQ, acc telegraf.Accumulator) -var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherExchanges} +var gatherFunctions = []gatherFunc{gatherOverview, gatherNodes, gatherQueues, gatherExchanges, gatherFederationLinks} var sampleConfig = ` ## Management Plugin url. (default: http://localhost:15672) @@ -202,8 +295,24 @@ var sampleConfig = ` ## Note that an empty array for both will include all queues queue_name_include = [] queue_name_exclude = [] + + ## Federation upstreams include and exclude when gathering the rabbitmq_federation measurement. + ## If neither are specified, metrics for all federation upstreams are gathered. + ## Federation link metrics will only be gathered for queues and exchanges + ## whose non-federation metrics will be collected (e.g a queue excluded + ## by the 'queue_name_exclude' option will also be excluded from federation). + ## Globs accepted. + # federation_upstream_include = ["dataCentre-*"] + # federation_upstream_exclude = [] ` +func boolToInt(b bool) int64 { + if b { + return 1 + } + return 0 +} + // SampleConfig ... func (r *RabbitMQ) SampleConfig() string { return sampleConfig @@ -231,12 +340,16 @@ func (r *RabbitMQ) Gather(acc telegraf.Accumulator) error { } } - // Create queue filter if not already created + // Create gather filters if not already created if !r.filterCreated { err := r.createQueueFilter() if err != nil { return err } + err = r.createUpstreamFilter() + if err != nil { + return err + } r.filterCreated = true } @@ -302,12 +415,12 @@ func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) { return } - var clustering_listeners, amqp_listeners int64 = 0, 0 + var clusteringListeners, amqpListeners int64 = 0, 0 for _, listener := range overview.Listeners { if listener.Protocol == "clustering" { - clustering_listeners++ + clusteringListeners++ } else if listener.Protocol == "amqp" { - amqp_listeners++ + amqpListeners++ } } @@ -328,51 +441,121 @@ func gatherOverview(r *RabbitMQ, acc telegraf.Accumulator) { "messages_delivered": overview.MessageStats.Deliver, "messages_delivered_get": overview.MessageStats.DeliverGet, "messages_published": overview.MessageStats.Publish, - "clustering_listeners": clustering_listeners, - "amqp_listeners": amqp_listeners, + "clustering_listeners": clusteringListeners, + "amqp_listeners": amqpListeners, + "return_unroutable": overview.MessageStats.ReturnUnroutable, + "return_unroutable_rate": overview.MessageStats.ReturnUnroutableDetails.Rate, } acc.AddFields("rabbitmq_overview", fields, tags) } func gatherNodes(r *RabbitMQ, acc telegraf.Accumulator) { - nodes := make([]Node, 0) - // Gather information about nodes - err := r.requestJSON("/api/nodes", &nodes) + allNodes := make([]*Node, 0) + + err := r.requestJSON("/api/nodes", &allNodes) if err != nil { acc.AddError(err) return } - now := time.Now() - for _, node := range nodes { - if !r.shouldGatherNode(node) { - continue + nodes := allNodes[:0] + for _, node := range allNodes { + if r.shouldGatherNode(node) { + nodes = append(nodes, node) } - - tags := map[string]string{"url": r.URL} - tags["node"] = node.Name - - var running int64 = 0 - if node.Running { - running = 1 - } - - fields := map[string]interface{}{ - "disk_free": node.DiskFree, - "disk_free_limit": node.DiskFreeLimit, - "fd_total": node.FdTotal, - "fd_used": node.FdUsed, - "mem_limit": node.MemLimit, - "mem_used": node.MemUsed, - "proc_total": node.ProcTotal, - "proc_used": node.ProcUsed, - "run_queue": node.RunQueue, - "sockets_total": node.SocketsTotal, - "sockets_used": node.SocketsUsed, - "running": running, - } - acc.AddFields("rabbitmq_node", fields, tags, now) } + + var wg sync.WaitGroup + for _, node := range nodes { + wg.Add(1) + go func(node *Node) { + defer wg.Done() + + tags := map[string]string{"url": r.URL} + tags["node"] = node.Name + + fields := map[string]interface{}{ + "disk_free": node.DiskFree, + "disk_free_limit": node.DiskFreeLimit, + "disk_free_alarm": boolToInt(node.DiskFreeAlarm), + "fd_total": node.FdTotal, + "fd_used": node.FdUsed, + "mem_limit": node.MemLimit, + "mem_used": node.MemUsed, + "mem_alarm": boolToInt(node.MemAlarm), + "proc_total": node.ProcTotal, + "proc_used": node.ProcUsed, + "run_queue": node.RunQueue, + "sockets_total": node.SocketsTotal, + "sockets_used": node.SocketsUsed, + "uptime": node.Uptime, + "mnesia_disk_tx_count": node.MnesiaDiskTxCount, + "mnesia_disk_tx_count_rate": node.MnesiaDiskTxCountDetails.Rate, + "mnesia_ram_tx_count": node.MnesiaRamTxCount, + "mnesia_ram_tx_count_rate": node.MnesiaRamTxCountDetails.Rate, + "gc_num": node.GcNum, + "gc_num_rate": node.GcNumDetails.Rate, + "gc_bytes_reclaimed": node.GcBytesReclaimed, + "gc_bytes_reclaimed_rate": node.GcBytesReclaimedDetails.Rate, + "io_read_avg_time": node.IoReadAvgTime, + "io_read_avg_time_rate": node.IoReadAvgTimeDetails.Rate, + "io_read_bytes": node.IoReadBytes, + "io_read_bytes_rate": node.IoReadBytesDetails.Rate, + "io_write_avg_time": node.IoWriteAvgTime, + "io_write_avg_time_rate": node.IoWriteAvgTimeDetails.Rate, + "io_write_bytes": node.IoWriteBytes, + "io_write_bytes_rate": node.IoWriteBytesDetails.Rate, + "running": boolToInt(node.Running), + } + + var health HealthCheck + err := r.requestJSON("/api/healthchecks/node/"+node.Name, &health) + if err != nil { + acc.AddError(err) + return + } + + if health.Status == "ok" { + fields["health_check_status"] = int64(1) + } else { + fields["health_check_status"] = int64(0) + } + + var memory MemoryResponse + err = r.requestJSON("/api/nodes/"+node.Name+"/memory", &memory) + if err != nil { + acc.AddError(err) + return + } + + if memory.Memory != nil { + fields["mem_connection_readers"] = memory.Memory.ConnectionReaders + fields["mem_connection_writers"] = memory.Memory.ConnectionWriters + fields["mem_connection_channels"] = memory.Memory.ConnectionChannels + fields["mem_connection_other"] = memory.Memory.ConnectionOther + fields["mem_queue_procs"] = memory.Memory.QueueProcs + fields["mem_queue_slave_procs"] = memory.Memory.QueueSlaveProcs + fields["mem_plugins"] = memory.Memory.Plugins + fields["mem_other_proc"] = memory.Memory.OtherProc + fields["mem_metrics"] = memory.Memory.Metrics + fields["mem_mgmt_db"] = memory.Memory.MgmtDb + fields["mem_mnesia"] = memory.Memory.Mnesia + fields["mem_other_ets"] = memory.Memory.OtherEts + fields["mem_binary"] = memory.Memory.Binary + fields["mem_msg_index"] = memory.Memory.MsgIndex + fields["mem_code"] = memory.Memory.Code + fields["mem_atom"] = memory.Memory.Atom + fields["mem_other_system"] = memory.Memory.OtherSystem + fields["mem_allocated_unused"] = memory.Memory.AllocatedUnused + fields["mem_reserved_unallocated"] = memory.Memory.ReservedUnallocated + fields["mem_total"] = memory.Memory.Total + } + + acc.AddFields("rabbitmq_node", fields, tags) + }(node) + } + + wg.Wait() } func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator) { @@ -404,10 +587,12 @@ func gatherQueues(r *RabbitMQ, acc telegraf.Accumulator) { "rabbitmq_queue", map[string]interface{}{ // common information - "consumers": queue.Consumers, - "consumer_utilisation": queue.ConsumerUtilisation, - "idle_since": queue.IdleSince, - "memory": queue.Memory, + "consumers": queue.Consumers, + "consumer_utilisation": queue.ConsumerUtilisation, + "idle_since": queue.IdleSince, + "slave_nodes": len(queue.SlaveNodes), + "synchronised_slave_nodes": len(queue.SynchronisedSlaveNodes), + "memory": queue.Memory, // messages information "message_bytes": queue.MessageBytes, "message_bytes_ready": queue.MessageBytesReady, @@ -443,7 +628,7 @@ func gatherExchanges(r *RabbitMQ, acc telegraf.Accumulator) { } for _, exchange := range exchanges { - if !r.shouldGatherExchange(exchange) { + if !r.shouldGatherExchange(exchange.Name) { continue } tags := map[string]string{ @@ -459,15 +644,63 @@ func gatherExchanges(r *RabbitMQ, acc telegraf.Accumulator) { acc.AddFields( "rabbitmq_exchange", map[string]interface{}{ - "messages_publish_in": exchange.MessageStats.PublishIn, - "messages_publish_out": exchange.MessageStats.PublishOut, + "messages_publish_in": exchange.MessageStats.PublishIn, + "messages_publish_in_rate": exchange.MessageStats.PublishInDetails.Rate, + "messages_publish_out": exchange.MessageStats.PublishOut, + "messages_publish_out_rate": exchange.MessageStats.PublishOutDetails.Rate, }, tags, ) } } -func (r *RabbitMQ) shouldGatherNode(node Node) bool { +func gatherFederationLinks(r *RabbitMQ, acc telegraf.Accumulator) { + // Gather information about federation links + federationLinks := make([]FederationLink, 0) + err := r.requestJSON("/api/federation-links", &federationLinks) + if err != nil { + acc.AddError(err) + return + } + + for _, link := range federationLinks { + if !r.shouldGatherFederationLink(link) { + continue + } + + tags := map[string]string{ + "url": r.URL, + "type": link.Type, + "vhost": link.Vhost, + "upstream": link.Upstream, + } + + if link.Type == "exchange" { + tags["exchange"] = link.Exchange + tags["upstream_exchange"] = link.UpstreamExchange + } else { + tags["queue"] = link.Queue + tags["upstream_queue"] = link.UpstreamQueue + } + + acc.AddFields( + "rabbitmq_federation", + map[string]interface{}{ + "acks_uncommitted": link.LocalChannel.AcksUncommitted, + "consumers": link.LocalChannel.ConsumerCount, + "messages_unacknowledged": link.LocalChannel.MessagesUnacknowledged, + "messages_uncommitted": link.LocalChannel.MessagesUncommitted, + "messages_unconfirmed": link.LocalChannel.MessagesUnconfirmed, + "messages_confirm": link.LocalChannel.MessageStats.Confirm, + "messages_publish": link.LocalChannel.MessageStats.Publish, + "messages_return_unroutable": link.LocalChannel.MessageStats.ReturnUnroutable, + }, + tags, + ) + } +} + +func (r *RabbitMQ) shouldGatherNode(node *Node) bool { if len(r.Nodes) == 0 { return true } @@ -487,11 +720,11 @@ func (r *RabbitMQ) createQueueFilter() error { r.QueueInclude = append(r.QueueInclude, r.Queues...) } - filter, err := filter.NewIncludeExcludeFilter(r.QueueInclude, r.QueueExclude) + queueFilter, err := filter.NewIncludeExcludeFilter(r.QueueInclude, r.QueueExclude) if err != nil { return err } - r.queueFilter = filter + r.queueFilter = queueFilter for _, q := range r.QueueExclude { if q == "*" { @@ -502,13 +735,23 @@ func (r *RabbitMQ) createQueueFilter() error { return nil } -func (r *RabbitMQ) shouldGatherExchange(exchange Exchange) bool { +func (r *RabbitMQ) createUpstreamFilter() error { + upstreamFilter, err := filter.NewIncludeExcludeFilter(r.FederationUpstreamInclude, r.FederationUpstreamExclude) + if err != nil { + return err + } + r.upstreamFilter = upstreamFilter + + return nil +} + +func (r *RabbitMQ) shouldGatherExchange(exchangeName string) bool { if len(r.Exchanges) == 0 { return true } for _, name := range r.Exchanges { - if name == exchange.Name { + if name == exchangeName { return true } } @@ -516,6 +759,21 @@ func (r *RabbitMQ) shouldGatherExchange(exchange Exchange) bool { return false } +func (r *RabbitMQ) shouldGatherFederationLink(link FederationLink) bool { + if !r.upstreamFilter.Match(link.Upstream) { + return false + } + + switch link.Type { + case "exchange": + return r.shouldGatherExchange(link.Exchange) + case "queue": + return r.queueFilter.Match(link.Queue) + default: + return false + } +} + func init() { inputs.Add("rabbitmq", func() telegraf.Input { return &RabbitMQ{ diff --git a/plugins/inputs/rabbitmq/rabbitmq_test.go b/plugins/inputs/rabbitmq/rabbitmq_test.go index 5e9829cc0..c207706c9 100644 --- a/plugins/inputs/rabbitmq/rabbitmq_test.go +++ b/plugins/inputs/rabbitmq/rabbitmq_test.go @@ -6,506 +6,43 @@ import ( "net/http/httptest" "testing" + "io/ioutil" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -const sampleOverviewResponse = ` -{ - "message_stats": { - "ack": 5246, - "ack_details": { - "rate": 0.0 - }, - "deliver": 5246, - "deliver_details": { - "rate": 0.0 - }, - "deliver_get": 5246, - "deliver_get_details": { - "rate": 0.0 - }, - "publish": 5258, - "publish_details": { - "rate": 0.0 - } - }, - "object_totals": { - "channels": 44, - "connections": 44, - "consumers": 65, - "exchanges": 43, - "queues": 62 - }, - "queue_totals": { - "messages": 0, - "messages_details": { - "rate": 0.0 - }, - "messages_ready": 0, - "messages_ready_details": { - "rate": 0.0 - }, - "messages_unacknowledged": 0, - "messages_unacknowledged_details": { - "rate": 0.0 - } - }, - "listeners": [ - { - "name": "rabbit@node-a", - "protocol": "amqp" - }, - { - "name": "rabbit@node-b", - "protocol": "amqp" - }, - { - "name": "rabbit@node-a", - "protocol": "clustering" - }, - { - "name": "rabbit@node-b", - "protocol": "clustering" - } - ] -} -` - -const sampleNodesResponse = ` -[ - { - "db_dir": "/var/lib/rabbitmq/mnesia/rabbit@vagrant-ubuntu-trusty-64", - "disk_free": 37768282112, - "disk_free_alarm": false, - "disk_free_details": { - "rate": 0.0 - }, - "disk_free_limit": 50000000, - "enabled_plugins": [ - "rabbitmq_management" - ], - "fd_total": 1024, - "fd_used": 63, - "fd_used_details": { - "rate": 0.0 - }, - "io_read_avg_time": 0, - "io_read_avg_time_details": { - "rate": 0.0 - }, - "io_read_bytes": 1, - "io_read_bytes_details": { - "rate": 0.0 - }, - "io_read_count": 1, - "io_read_count_details": { - "rate": 0.0 - }, - "io_sync_avg_time": 0, - "io_sync_avg_time_details": { - "rate": 0.0 - }, - "io_write_avg_time": 0, - "io_write_avg_time_details": { - "rate": 0.0 - }, - "log_file": "/var/log/rabbitmq/rabbit@vagrant-ubuntu-trusty-64.log", - "mem_alarm": false, - "mem_limit": 2503771750, - "mem_used": 159707080, - "mem_used_details": { - "rate": 15185.6 - }, - "mnesia_disk_tx_count": 16, - "mnesia_disk_tx_count_details": { - "rate": 0.0 - }, - "mnesia_ram_tx_count": 296, - "mnesia_ram_tx_count_details": { - "rate": 0.0 - }, - "name": "rabbit@vagrant-ubuntu-trusty-64", - "net_ticktime": 60, - "os_pid": "14244", - "partitions": [], - "proc_total": 1048576, - "proc_used": 783, - "proc_used_details": { - "rate": 0.0 - }, - "processors": 1, - "rates_mode": "basic", - "run_queue": 0, - "running": true, - "sasl_log_file": "/var/log/rabbitmq/rabbit@vagrant-ubuntu-trusty-64-sasl.log", - "sockets_total": 829, - "sockets_used": 45, - "sockets_used_details": { - "rate": 0.0 - }, - "type": "disc", - "uptime": 7464827 - } -] -` -const sampleQueuesResponse = ` -[ - { - "memory": 21960, - "messages": 0, - "messages_details": { - "rate": 0 - }, - "messages_ready": 0, - "messages_ready_details": { - "rate": 0 - }, - "messages_unacknowledged": 0, - "messages_unacknowledged_details": { - "rate": 0 - }, - "idle_since": "2015-11-01 8:22:15", - "consumer_utilisation": "", - "policy": "federator", - "exclusive_consumer_tag": "", - "consumers": 0, - "recoverable_slaves": "", - "state": "running", - "messages_ram": 0, - "messages_ready_ram": 0, - "messages_unacknowledged_ram": 0, - "messages_persistent": 0, - "message_bytes": 0, - "message_bytes_ready": 0, - "message_bytes_unacknowledged": 0, - "message_bytes_ram": 0, - "message_bytes_persistent": 0, - "disk_reads": 0, - "disk_writes": 0, - "backing_queue_status": { - "q1": 0, - "q2": 0, - "delta": [ - "delta", - "undefined", - 0, - "undefined" - ], - "q3": 0, - "q4": 0, - "len": 0, - "target_ram_count": "infinity", - "next_seq_id": 0, - "avg_ingress_rate": 0, - "avg_egress_rate": 0, - "avg_ack_ingress_rate": 0, - "avg_ack_egress_rate": 0 - }, - "name": "collectd-queue", - "vhost": "collectd", - "durable": true, - "auto_delete": false, - "arguments": {}, - "node": "rabbit@testhost" - }, - { - "memory": 55528, - "message_stats": { - "ack": 223654927, - "ack_details": { - "rate": 0 - }, - "deliver": 224518745, - "deliver_details": { - "rate": 0 - }, - "deliver_get": 224518829, - "deliver_get_details": { - "rate": 0 - }, - "get": 19, - "get_details": { - "rate": 0 - }, - "get_no_ack": 65, - "get_no_ack_details": { - "rate": 0 - }, - "publish": 223883765, - "publish_details": { - "rate": 0 - }, - "redeliver": 863805, - "redeliver_details": { - "rate": 0 - } - }, - "messages": 24, - "messages_details": { - "rate": 0 - }, - "messages_ready": 24, - "messages_ready_details": { - "rate": 0 - }, - "messages_unacknowledged": 0, - "messages_unacknowledged_details": { - "rate": 0 - }, - "idle_since": "2015-11-01 8:22:14", - "consumer_utilisation": "", - "policy": "", - "exclusive_consumer_tag": "", - "consumers": 0, - "recoverable_slaves": "", - "state": "running", - "messages_ram": 24, - "messages_ready_ram": 24, - "messages_unacknowledged_ram": 0, - "messages_persistent": 0, - "message_bytes": 149220, - "message_bytes_ready": 149220, - "message_bytes_unacknowledged": 0, - "message_bytes_ram": 149220, - "message_bytes_persistent": 0, - "disk_reads": 0, - "disk_writes": 0, - "backing_queue_status": { - "q1": 0, - "q2": 0, - "delta": [ - "delta", - "undefined", - 0, - "undefined" - ], - "q3": 0, - "q4": 24, - "len": 24, - "target_ram_count": "infinity", - "next_seq_id": 223883765, - "avg_ingress_rate": 0, - "avg_egress_rate": 0, - "avg_ack_ingress_rate": 0, - "avg_ack_egress_rate": 0 - }, - "name": "telegraf", - "vhost": "collectd", - "durable": true, - "auto_delete": false, - "arguments": {}, - "node": "rabbit@testhost" - }, - { - "message_stats": { - "ack": 1296077, - "ack_details": { - "rate": 0 - }, - "deliver": 1513176, - "deliver_details": { - "rate": 0.4 - }, - "deliver_get": 1513239, - "deliver_get_details": { - "rate": 0.4 - }, - "disk_writes": 7976, - "disk_writes_details": { - "rate": 0 - }, - "get": 40, - "get_details": { - "rate": 0 - }, - "get_no_ack": 23, - "get_no_ack_details": { - "rate": 0 - }, - "publish": 1325628, - "publish_details": { - "rate": 0.4 - }, - "redeliver": 216034, - "redeliver_details": { - "rate": 0 - } - }, - "messages": 5, - "messages_details": { - "rate": 0.4 - }, - "messages_ready": 0, - "messages_ready_details": { - "rate": 0 - }, - "messages_unacknowledged": 5, - "messages_unacknowledged_details": { - "rate": 0.4 - }, - "policy": "federator", - "exclusive_consumer_tag": "", - "consumers": 1, - "consumer_utilisation": 1, - "memory": 122856, - "recoverable_slaves": "", - "state": "running", - "messages_ram": 5, - "messages_ready_ram": 0, - "messages_unacknowledged_ram": 5, - "messages_persistent": 0, - "message_bytes": 150096, - "message_bytes_ready": 0, - "message_bytes_unacknowledged": 150096, - "message_bytes_ram": 150096, - "message_bytes_persistent": 0, - "disk_reads": 0, - "disk_writes": 7976, - "backing_queue_status": { - "q1": 0, - "q2": 0, - "delta": [ - "delta", - "undefined", - 0, - "undefined" - ], - "q3": 0, - "q4": 0, - "len": 0, - "target_ram_count": "infinity", - "next_seq_id": 1325628, - "avg_ingress_rate": 0.19115840579934168, - "avg_egress_rate": 0.19115840579934168, - "avg_ack_ingress_rate": 0.19115840579934168, - "avg_ack_egress_rate": 0.1492766485341716 - }, - "name": "telegraf", - "vhost": "metrics", - "durable": true, - "auto_delete": false, - "arguments": {}, - "node": "rabbit@testhost" - } -] -` - -const sampleExchangesResponse = ` -[ - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "direct", - "vhost": "\/", - "name": "" - }, - { - "message_stats": { - "publish_in_details": { - "rate": 0 - }, - "publish_in": 2, - "publish_out_details": { - "rate": 0 - }, - "publish_out": 1 - }, - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "fanout", - "vhost": "\/", - "name": "telegraf" - }, - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "direct", - "vhost": "\/", - "name": "amq.direct" - }, - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "fanout", - "vhost": "\/", - "name": "amq.fanout" - }, - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "headers", - "vhost": "\/", - "name": "amq.headers" - }, - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "headers", - "vhost": "\/", - "name": "amq.match" - }, - { - "arguments": { }, - "internal": true, - "auto_delete": false, - "durable": true, - "type": "topic", - "vhost": "\/", - "name": "amq.rabbitmq.log" - }, - { - "arguments": { }, - "internal": true, - "auto_delete": false, - "durable": true, - "type": "topic", - "vhost": "\/", - "name": "amq.rabbitmq.trace" - }, - { - "arguments": { }, - "internal": false, - "auto_delete": false, - "durable": true, - "type": "topic", - "vhost": "\/", - "name": "amq.topic" - } -] -` - func TestRabbitMQGeneratesMetrics(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - var rsp string + var jsonFilePath string switch r.URL.Path { case "/api/overview": - rsp = sampleOverviewResponse + jsonFilePath = "testdata/overview.json" case "/api/nodes": - rsp = sampleNodesResponse + jsonFilePath = "testdata/nodes.json" case "/api/queues": - rsp = sampleQueuesResponse + jsonFilePath = "testdata/queues.json" case "/api/exchanges": - rsp = sampleExchangesResponse + jsonFilePath = "testdata/exchanges.json" + case "/api/healthchecks/node/rabbit@vagrant-ubuntu-trusty-64": + jsonFilePath = "testdata/healthchecks.json" + case "/api/federation-links": + jsonFilePath = "testdata/federation-links.json" + case "/api/nodes/rabbit@vagrant-ubuntu-trusty-64/memory": + jsonFilePath = "testdata/memory.json" default: panic("Cannot handle request") } - fmt.Fprintln(w, rsp) + data, err := ioutil.ReadFile(jsonFilePath) + + if err != nil { + panic(fmt.Sprintf("could not read from data file %s", jsonFilePath)) + } + + w.Write(data) })) defer ts.Close() @@ -513,60 +50,152 @@ func TestRabbitMQGeneratesMetrics(t *testing.T) { URL: ts.URL, } - var acc testutil.Accumulator + acc := &testutil.Accumulator{} err := acc.GatherError(r.Gather) require.NoError(t, err) - intMetrics := []string{ - "messages", - "messages_ready", - "messages_unacked", - - "messages_acked", - "messages_delivered", - "messages_published", - - "channels", - "connections", - "consumers", - "exchanges", - "queues", - "clustering_listeners", - "amqp_listeners", + overviewMetrics := map[string]interface{}{ + "messages": 5, + "messages_ready": 32, + "messages_unacked": 27, + "messages_acked": 5246, + "messages_delivered": 5234, + "messages_delivered_get": 3333, + "messages_published": 5258, + "channels": 44, + "connections": 44, + "consumers": 65, + "exchanges": 43, + "queues": 62, + "clustering_listeners": 2, + "amqp_listeners": 2, + "return_unroutable": 10, + "return_unroutable_rate": 3.3, } + compareMetrics(t, overviewMetrics, acc, "rabbitmq_overview") - for _, metric := range intMetrics { - assert.True(t, acc.HasInt64Field("rabbitmq_overview", metric)) + queuesMetrics := map[string]interface{}{ + "consumers": 3, + "consumer_utilisation": 1.0, + "memory": 143776, + "message_bytes": 3, + "message_bytes_ready": 4, + "message_bytes_unacked": 5, + "message_bytes_ram": 6, + "message_bytes_persist": 7, + "messages": 44, + "messages_ready": 32, + "messages_unack": 44, + "messages_ack": 3457, + "messages_ack_rate": 9.9, + "messages_deliver": 22222, + "messages_deliver_rate": 333.4, + "messages_deliver_get": 3457, + "messages_deliver_get_rate": 0.2, + "messages_publish": 3457, + "messages_publish_rate": 11.2, + "messages_redeliver": 33, + "messages_redeliver_rate": 2.5, + "idle_since": "2015-11-01 8:22:14", + "slave_nodes": 1, + "synchronised_slave_nodes": 1, } + compareMetrics(t, queuesMetrics, acc, "rabbitmq_queue") - nodeIntMetrics := []string{ - "disk_free", - "disk_free_limit", - "fd_total", - "fd_used", - "mem_limit", - "mem_used", - "proc_total", - "proc_used", - "run_queue", - "sockets_total", - "sockets_used", - "running", + nodeMetrics := map[string]interface{}{ + "disk_free": 3776, + "disk_free_limit": 50000000, + "disk_free_alarm": 0, + "fd_total": 1024, + "fd_used": 63, + "mem_limit": 2503, + "mem_used": 159707080, + "mem_alarm": 1, + "proc_total": 1048576, + "proc_used": 783, + "run_queue": 0, + "sockets_total": 829, + "sockets_used": 45, + "uptime": 7464827, + "running": 1, + "health_check_status": 1, + "mnesia_disk_tx_count": 16, + "mnesia_ram_tx_count": 296, + "mnesia_disk_tx_count_rate": 1.1, + "mnesia_ram_tx_count_rate": 2.2, + "gc_num": 57280132, + "gc_bytes_reclaimed": 2533, + "gc_num_rate": 274.2, + "gc_bytes_reclaimed_rate": 16490856.3, + "io_read_avg_time": 983, + "io_read_avg_time_rate": 88.77, + "io_read_bytes": 1111, + "io_read_bytes_rate": 99.99, + "io_write_avg_time": 134, + "io_write_avg_time_rate": 4.32, + "io_write_bytes": 823, + "io_write_bytes_rate": 32.8, + "mem_connection_readers": 1234, + "mem_connection_writers": 5678, + "mem_connection_channels": 1133, + "mem_connection_other": 2840, + "mem_queue_procs": 2840, + "mem_queue_slave_procs": 0, + "mem_plugins": 1755976, + "mem_other_proc": 23056584, + "mem_metrics": 196536, + "mem_mgmt_db": 491272, + "mem_mnesia": 115600, + "mem_other_ets": 2121872, + "mem_binary": 418848, + "mem_msg_index": 42848, + "mem_code": 25179322, + "mem_atom": 1041593, + "mem_other_system": 14741981, + "mem_allocated_unused": 38208528, + "mem_reserved_unallocated": 0, + "mem_total": 83025920, } + compareMetrics(t, nodeMetrics, acc, "rabbitmq_node") - for _, metric := range nodeIntMetrics { - assert.True(t, acc.HasInt64Field("rabbitmq_node", metric)) + exchangeMetrics := map[string]interface{}{ + "messages_publish_in": 3678, + "messages_publish_in_rate": 3.2, + "messages_publish_out": 3677, + "messages_publish_out_rate": 5.1, } + compareMetrics(t, exchangeMetrics, acc, "rabbitmq_exchange") - assert.True(t, acc.HasMeasurement("rabbitmq_queue")) - - exchangeIntMetrics := []string{ - "messages_publish_in", - "messages_publish_out", + federationLinkMetrics := map[string]interface{}{ + "acks_uncommitted": 1, + "consumers": 2, + "messages_unacknowledged": 3, + "messages_uncommitted": 4, + "messages_unconfirmed": 5, + "messages_confirm": 67, + "messages_publish": 890, + "messages_return_unroutable": 1, } + compareMetrics(t, federationLinkMetrics, acc, "rabbitmq_federation") +} - for _, metric := range exchangeIntMetrics { - assert.True(t, acc.HasInt64Field("rabbitmq_exchange", metric)) +func compareMetrics(t *testing.T, expectedMetrics map[string]interface{}, + accumulator *testutil.Accumulator, measurementKey string) { + measurement, exist := accumulator.Get(measurementKey) + + assert.True(t, exist, "There is measurement %s", measurementKey) + assert.Equal(t, len(expectedMetrics), len(measurement.Fields)) + + for metricName, metricValue := range expectedMetrics { + actualMetricValue := measurement.Fields[metricName] + + if accumulator.HasStringField(measurementKey, metricName) { + assert.Equal(t, metricValue, actualMetricValue, + "Metric name: %s", metricName) + } else { + assert.InDelta(t, metricValue, actualMetricValue, 0e5, + "Metric name: %s", metricName) + } } } diff --git a/plugins/inputs/rabbitmq/testdata/exchanges.json b/plugins/inputs/rabbitmq/testdata/exchanges.json new file mode 100644 index 000000000..203c29a59 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/exchanges.json @@ -0,0 +1,22 @@ +[ + { + "message_stats": { + "publish_in_details": { + "rate": 3.2 + }, + "publish_in": 3678, + "publish_out_details": { + "rate": 5.1 + }, + "publish_out": 3677 + }, + "user_who_performed_action": "mistral_testuser_1", + "arguments": {}, + "internal": false, + "auto_delete": true, + "durable": false, + "type": "direct", + "vhost": "sorandomsorandom", + "name": "reply_a716f0523cd44941ad2ea6ce4a3869c3" + } +] \ No newline at end of file diff --git a/plugins/inputs/rabbitmq/testdata/federation-links.json b/plugins/inputs/rabbitmq/testdata/federation-links.json new file mode 100644 index 000000000..4cf514870 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/federation-links.json @@ -0,0 +1,63 @@ +[ + { + "node": "rabbit@rmqlocal", + "queue": "exampleLocalQueue", + "upstream_queue": "exampleUpstreamQueue", + "type": "queue", + "vhost": "/", + "upstream": "ExampleFederationUpstream", + "id": "8ba5218f", + "status": "running", + "local_connection": "", + "uri": "amqp://appsv03", + "timestamp": "2019-08-19 15:34:15", + "local_channel": { + "acks_uncommitted": 1, + "confirm": true, + "connection_details": { + "name": "", + "peer_host": "undefined", + "peer_port": "undefined" + }, + "consumer_count": 2, + "garbage_collection": { + "fullsweep_after": 65535, + "max_heap_size": 0, + "min_bin_vheap_size": 46422, + "min_heap_size": 233, + "minor_gcs": 203 + }, + "global_prefetch_count": 0, + "message_stats": { + "confirm": 67, + "confirm_details": { + "rate": 2 + }, + "publish": 890, + "publish_details": { + "rate": 2 + }, + "return_unroutable": 1, + "return_unroutable_details": { + "rate": 0.1 + } + }, + "messages_unacknowledged": 3, + "messages_uncommitted": 4, + "messages_unconfirmed": 5, + "name": "", + "node": "rabbit@rmqlocal", + "number": 1, + "prefetch_count": 0, + "reductions": 1926653, + "reductions_details": { + "rate": 1068 + }, + "state": "running", + "transactional": false, + "user": "none", + "user_who_performed_action": "none", + "vhost": "sorandomsorandom" + } + } +] diff --git a/plugins/inputs/rabbitmq/testdata/healthchecks.json b/plugins/inputs/rabbitmq/testdata/healthchecks.json new file mode 100644 index 000000000..1a36cf5fc --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/healthchecks.json @@ -0,0 +1 @@ +{"status":"ok"} \ No newline at end of file diff --git a/plugins/inputs/rabbitmq/testdata/memory.json b/plugins/inputs/rabbitmq/testdata/memory.json new file mode 100644 index 000000000..da252eb61 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/memory.json @@ -0,0 +1,24 @@ +{ + "memory": { + "connection_readers": 1234, + "connection_writers": 5678, + "connection_channels": 1133, + "connection_other": 2840, + "queue_procs": 2840, + "queue_slave_procs": 0, + "plugins": 1755976, + "other_proc": 23056584, + "metrics": 196536, + "mgmt_db": 491272, + "mnesia": 115600, + "other_ets": 2121872, + "binary": 418848, + "msg_index": 42848, + "code": 25179322, + "atom": 1041593, + "other_system": 14741981, + "allocated_unused": 38208528, + "reserved_unallocated": 0, + "total": 83025920 + } +} \ No newline at end of file diff --git a/plugins/inputs/rabbitmq/testdata/nodes.json b/plugins/inputs/rabbitmq/testdata/nodes.json new file mode 100644 index 000000000..42b7a4b7a --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/nodes.json @@ -0,0 +1,87 @@ +[ + { + "db_dir": "/var/lib/rabbitmq/mnesia/rabbit@vagrant-ubuntu-trusty-64", + "disk_free": 3776, + "disk_free_alarm": false, + "disk_free_details": { + "rate": 0.0 + }, + "disk_free_limit": 50000000, + "enabled_plugins": [ + "rabbitmq_management" + ], + "gc_num": 57280132, + "gc_num_details": { + "rate": 274.2 + }, + "gc_bytes_reclaimed": 2533, + "gc_bytes_reclaimed_details": { + "rate": 16490856.3 + }, + "fd_total": 1024, + "fd_used": 63, + "fd_used_details": { + "rate": 0.0 + }, + "io_read_avg_time": 983, + "io_read_avg_time_details": { + "rate": 88.77 + }, + "io_read_bytes": 1111, + "io_read_bytes_details": { + "rate": 99.99 + }, + "io_read_count": 1, + "io_read_count_details": { + "rate": 0.0 + }, + "io_sync_avg_time": 0, + "io_sync_avg_time_details": { + "rate": 0.0 + }, + "io_write_avg_time": 134, + "io_write_avg_time_details": { + "rate": 4.32 + }, + "io_write_bytes": 823, + "io_write_bytes_details": { + "rate": 32.8 + }, + "log_file": "/var/log/rabbitmq/rabbit@vagrant-ubuntu-trusty-64.log", + "mem_alarm": true, + "mem_limit": 2503, + "mem_used": 159707080, + "mem_used_details": { + "rate": 15185.6 + }, + "mnesia_disk_tx_count": 16, + "mnesia_disk_tx_count_details": { + "rate": 1.1 + }, + "mnesia_ram_tx_count": 296, + "mnesia_ram_tx_count_details": { + "rate": 2.2 + }, + "name": "rabbit@vagrant-ubuntu-trusty-64", + "net_ticktime": 60, + "os_pid": "14244", + "partitions": [], + "proc_total": 1048576, + "proc_used": 783, + "proc_used_details": { + "rate": 0.0 + }, + "processors": 1, + "rates_mode": "basic", + "run_queue": 0, + "running": true, + "sasl_log_file": "/var/log/rabbitmq/rabbit@vagrant-ubuntu-trusty-64-sasl.log", + "sockets_total": 829, + "sockets_used": 45, + "sockets_used_details": { + "rate": 0.0 + }, + "type": "disc", + "uptime": 7464827 + } +] \ No newline at end of file diff --git a/plugins/inputs/rabbitmq/testdata/overview.json b/plugins/inputs/rabbitmq/testdata/overview.json new file mode 100644 index 000000000..a4cbb2ad6 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/overview.json @@ -0,0 +1,63 @@ +{ + "message_stats": { + "ack": 5246, + "ack_details": { + "rate": 0.0 + }, + "deliver": 5234, + "deliver_details": { + "rate": 0.0 + }, + "deliver_get": 3333, + "deliver_get_details": { + "rate": 0.0 + }, + "publish": 5258, + "publish_details": { + "rate": 0.0 + }, + "return_unroutable": 10, + "return_unroutable_details": { + "rate": 3.3 + } + }, + "object_totals": { + "channels": 44, + "connections": 44, + "consumers": 65, + "exchanges": 43, + "queues": 62 + }, + "queue_totals": { + "messages": 5, + "messages_details": { + "rate": 0.0 + }, + "messages_ready": 32, + "messages_ready_details": { + "rate": 0.0 + }, + "messages_unacknowledged": 27, + "messages_unacknowledged_details": { + "rate": 0.0 + } + }, + "listeners": [ + { + "name": "rabbit@node-a", + "protocol": "amqp" + }, + { + "name": "rabbit@node-b", + "protocol": "amqp" + }, + { + "name": "rabbit@node-a", + "protocol": "clustering" + }, + { + "name": "rabbit@node-b", + "protocol": "clustering" + } + ] +} \ No newline at end of file diff --git a/plugins/inputs/rabbitmq/testdata/queues.json b/plugins/inputs/rabbitmq/testdata/queues.json new file mode 100644 index 000000000..294f78872 --- /dev/null +++ b/plugins/inputs/rabbitmq/testdata/queues.json @@ -0,0 +1,120 @@ +[ + { + "messages_details": { + "rate": 0.0 + }, + "messages": 44, + "messages_unacknowledged_details": { + "rate": 0.0 + }, + "messages_unacknowledged": 44, + "messages_ready_details": { + "rate": 0.0 + }, + "messages_ready": 32, + "reductions_details": { + "rate": 223.0 + }, + "reductions": 15875433, + "message_stats": { + "deliver_get_details": { + "rate": 0.2 + }, + "deliver_get": 3457, + "ack_details": { + "rate": 9.9 + }, + "ack": 3457, + "redeliver_details": { + "rate": 2.5 + }, + "redeliver": 33, + "deliver_no_ack_details": { + "rate": 0.0 + }, + "deliver_no_ack": 0, + "deliver_details": { + "rate": 333.4 + }, + "deliver": 22222, + "get_no_ack_details": { + "rate": 0.0 + }, + "get_no_ack": 0, + "get_details": { + "rate": 0.0 + }, + "get": 0, + "publish_details": { + "rate": 11.2 + }, + "publish": 3457 + }, + "node": "rabbit@rmqlocal-0.rmqlocal.ankorabbitstatefulset3.svc.cluster.local", + "arguments": { + "x-expires": 1800000, + "x-ha-policy": "all" + }, + "exclusive": false, + "auto_delete": false, + "durable": false, + "vhost": "sorandomsorandom", + "name": "reply_a716f0523cd44941ad2ea6ce4a3869c3", + "message_bytes_paged_out": 0, + "messages_paged_out": 0, + "idle_since": "2015-11-01 8:22:14", + "backing_queue_status": { + "avg_ack_egress_rate": 0.2374460025857711, + "avg_ack_ingress_rate": 0.2374460025857711, + "avg_egress_rate": 0.2374460025857711, + "avg_ingress_rate": 0.2374460025857711, + "delta": [ + "delta", + "undefined", + 0, + 0, + "undefined" + ], + "len": 0, + "mode": "default", + "next_seq_id": 3457, + "q1": 0, + "q2": 0, + "q3": 0, + "q4": 0, + "target_ram_count": 0 + }, + "head_message_timestamp": null, + "message_bytes_persistent": 7, + "message_bytes_ram": 6, + "message_bytes_unacknowledged": 5, + "message_bytes_ready": 4, + "message_bytes": 3, + "messages_persistent": 0, + "messages_unacknowledged_ram": 0, + "messages_ready_ram": 0, + "messages_ram": 0, + "garbage_collection": { + "minor_gcs": 314, + "fullsweep_after": 65535, + "min_heap_size": 233, + "min_bin_vheap_size": 46422, + "max_heap_size": 0 + }, + "state": "running", + "recoverable_slaves": null, + "memory": 143776, + "consumer_utilisation": 1.0, + "consumers": 3, + "exclusive_consumer_tag": null, + "effective_policy_definition": [], + "operator_policy": null, + "policy": null, + "slave_nodes":[ + "rabbit@ip-10-1-2-118" + ], + "synchronised_slave_nodes":[ + "rabbit@ip-10-1-2-118" + ] + } +] \ No newline at end of file diff --git a/plugins/inputs/redis/README.md b/plugins/inputs/redis/README.md index da4e8b71a..aa10c2887 100644 --- a/plugins/inputs/redis/README.md +++ b/plugins/inputs/redis/README.md @@ -15,6 +15,9 @@ ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] + ## specify server password + # password = "s#cr@t%" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -77,8 +80,8 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - instantaneous_ops_per_sec(int, number) - total_net_input_bytes(int, bytes) - total_net_output_bytes(int, bytes) - - instantaneous_input_kbps(float, bytes) - - instantaneous_output_kbps(float, bytes) + - instantaneous_input_kbps(float, KB/sec) + - instantaneous_output_kbps(float, KB/sec) - rejected_connections(int, number) - sync_full(int, number) - sync_partial_ok(int, number) @@ -117,6 +120,23 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - expires(int, number) - avg_ttl(int, number) +- redis_cmdstat + Every Redis used command will have 3 new fields: + - calls(int, number) + - usec(int, mircoseconds) + - usec_per_call(float, microseconds) + +- redis_replication + - tags: + - replication_role + - replica_ip + - replica_port + - state (either "online", "wait_bgsave", or "send_bulk") + + - fields: + - lag(int, number) + - offset(int, number) + ### Tags: - All measurements have the following tags: @@ -127,6 +147,9 @@ Additionally the plugin also calculates the hit/miss ratio (keyspace\_hitrate) a - The redis_keyspace measurement has an additional database tag: - database +- The redis_cmdstat measurement has an additional tag: + - command + ### Example Output: Using this configuration: @@ -158,3 +181,8 @@ redis_keyspace: ``` > redis_keyspace,database=db1,host=host,server=localhost,port=6379,replication_role=master keys=1i,expires=0i,avg_ttl=0i 1493101350000000000 ``` + +redis_command: +``` +> redis_cmdstat,command=publish,host=host,port=6379,replication_role=master,server=localhost calls=68113i,usec=325146i,usec_per_call=4.77 1559227136000000000 +``` diff --git a/plugins/inputs/redis/redis.go b/plugins/inputs/redis/redis.go index 766463cfd..598c6c4f8 100644 --- a/plugins/inputs/redis/redis.go +++ b/plugins/inputs/redis/redis.go @@ -4,8 +4,8 @@ import ( "bufio" "fmt" "io" - "log" "net/url" + "regexp" "strconv" "strings" "sync" @@ -18,9 +18,12 @@ import ( ) type Redis struct { - Servers []string + Servers []string + Password string tls.ClientConfig + Log telegraf.Logger + clients []Client initialized bool } @@ -36,7 +39,7 @@ type RedisClient struct { } func (r *RedisClient) Info() *redis.StringCmd { - return r.client.Info() + return r.client.Info("ALL") } func (r *RedisClient) BaseTags() map[string]string { @@ -47,6 +50,8 @@ func (r *RedisClient) BaseTags() map[string]string { return tags } +var replicationSlaveMetricPrefix = regexp.MustCompile(`^slave\d+`) + var sampleConfig = ` ## specify servers via a url matching: ## [protocol://][:password]@address[:port] @@ -59,6 +64,9 @@ var sampleConfig = ` ## If no port is specified, 6379 is used servers = ["tcp://localhost:6379"] + ## specify server password + # password = "s#cr@t%" + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -94,13 +102,13 @@ func (r *Redis) init(acc telegraf.Accumulator) error { for i, serv := range r.Servers { if !strings.HasPrefix(serv, "tcp://") && !strings.HasPrefix(serv, "unix://") { - log.Printf("W! [inputs.redis]: server URL found without scheme; please update your configuration file") + r.Log.Warn("Server URL found without scheme; please update your configuration file") serv = "tcp://" + serv } u, err := url.Parse(serv) if err != nil { - return fmt.Errorf("Unable to parse to address %q: %v", serv, err) + return fmt.Errorf("unable to parse to address %q: %s", serv, err.Error()) } password := "" @@ -110,6 +118,9 @@ func (r *Redis) init(acc telegraf.Accumulator) error { password = pw } } + if len(r.Password) > 0 { + password = r.Password + } var address string if u.Scheme == "unix" { @@ -241,11 +252,25 @@ func gatherInfoOutput( gatherKeyspaceLine(name, kline, acc, tags) continue } + if section == "Commandstats" { + kline := strings.TrimSpace(parts[1]) + gatherCommandstateLine(name, kline, acc, tags) + continue + } + if section == "Replication" && replicationSlaveMetricPrefix.MatchString(name) { + kline := strings.TrimSpace(parts[1]) + gatherReplicationLine(name, kline, acc, tags) + continue + } + metric = name } val := strings.TrimSpace(parts[1]) + // Some percentage values have a "%" suffix that we need to get rid of before int/float conversion + val = strings.TrimSuffix(val, "%") + // Try parsing as int if ival, err := strconv.ParseInt(val, 10, 64); err == nil { switch name { @@ -314,6 +339,95 @@ func gatherKeyspaceLine( } } +// Parse the special cmdstat lines. +// Example: +// cmdstat_publish:calls=33791,usec=208789,usec_per_call=6.18 +// Tag: cmdstat=publish; Fields: calls=33791i,usec=208789i,usec_per_call=6.18 +func gatherCommandstateLine( + name string, + line string, + acc telegraf.Accumulator, + global_tags map[string]string, +) { + if !strings.HasPrefix(name, "cmdstat") { + return + } + + fields := make(map[string]interface{}) + tags := make(map[string]string) + for k, v := range global_tags { + tags[k] = v + } + tags["command"] = strings.TrimPrefix(name, "cmdstat_") + parts := strings.Split(line, ",") + for _, part := range parts { + kv := strings.Split(part, "=") + if len(kv) != 2 { + continue + } + + switch kv[0] { + case "calls": + fallthrough + case "usec": + ival, err := strconv.ParseInt(kv[1], 10, 64) + if err == nil { + fields[kv[0]] = ival + } + case "usec_per_call": + fval, err := strconv.ParseFloat(kv[1], 64) + if err == nil { + fields[kv[0]] = fval + } + } + } + acc.AddFields("redis_cmdstat", fields, tags) +} + +// Parse the special Replication line +// Example: +// slave0:ip=127.0.0.1,port=7379,state=online,offset=4556468,lag=0 +// This line will only be visible when a node has a replica attached. +func gatherReplicationLine( + name string, + line string, + acc telegraf.Accumulator, + global_tags map[string]string, +) { + fields := make(map[string]interface{}) + tags := make(map[string]string) + for k, v := range global_tags { + tags[k] = v + } + + tags["replica_id"] = strings.TrimLeft(name, "slave") + tags["replication_role"] = "slave" + + parts := strings.Split(line, ",") + for _, part := range parts { + kv := strings.Split(part, "=") + if len(kv) != 2 { + continue + } + + switch kv[0] { + case "ip": + tags["replica_ip"] = kv[1] + case "port": + tags["replica_port"] = kv[1] + case "state": + tags[kv[0]] = kv[1] + default: + ival, err := strconv.ParseInt(kv[1], 10, 64) + if err == nil { + fields[kv[0]] = ival + } + } + } + + acc.AddFields("redis_replication", fields, tags) +} + func init() { inputs.Add("redis", func() telegraf.Input { return &Redis{} diff --git a/plugins/inputs/redis/redis_test.go b/plugins/inputs/redis/redis_test.go index fd16bbdd9..637b464f9 100644 --- a/plugins/inputs/redis/redis_test.go +++ b/plugins/inputs/redis/redis_test.go @@ -20,6 +20,7 @@ func TestRedisConnect(t *testing.T) { addr := fmt.Sprintf(testutil.GetLocalHost() + ":6379") r := &Redis{ + Log: testutil.Logger{}, Servers: []string{addr}, } @@ -49,6 +50,8 @@ func TestRedis_ParseMetrics(t *testing.T) { "used_memory_rss": int64(811008), "used_memory_peak": int64(1003936), "used_memory_lua": int64(33792), + "used_memory_peak_perc": float64(93.58), + "used_memory_dataset_perc": float64(20.27), "mem_fragmentation_ratio": float64(0.81), "loading": int64(0), "rdb_changes_since_last_save": int64(0), @@ -80,7 +83,7 @@ func TestRedis_ParseMetrics(t *testing.T) { "pubsub_channels": int64(0), "pubsub_patterns": int64(0), "latest_fork_usec": int64(0), - "connected_slaves": int64(0), + "connected_slaves": int64(2), "master_repl_offset": int64(0), "repl_backlog_active": int64(0), "repl_backlog_size": int64(1048576), @@ -116,6 +119,52 @@ func TestRedis_ParseMetrics(t *testing.T) { } acc.AssertContainsTaggedFields(t, "redis", fields, tags) acc.AssertContainsTaggedFields(t, "redis_keyspace", keyspaceFields, keyspaceTags) + + cmdstatSetTags := map[string]string{"host": "redis.net", "replication_role": "master", "command": "set"} + cmdstatSetFields := map[string]interface{}{ + "calls": int64(261265), + "usec": int64(1634157), + "usec_per_call": float64(6.25), + } + acc.AssertContainsTaggedFields(t, "redis_cmdstat", cmdstatSetFields, cmdstatSetTags) + + cmdstatCommandTags := map[string]string{"host": "redis.net", "replication_role": "master", "command": "command"} + cmdstatCommandFields := map[string]interface{}{ + "calls": int64(1), + "usec": int64(990), + "usec_per_call": float64(990.0), + } + acc.AssertContainsTaggedFields(t, "redis_cmdstat", cmdstatCommandFields, cmdstatCommandTags) + + replicationTags := map[string]string{ + "host": "redis.net", + "replication_role": "slave", + "replica_id": "0", + "replica_ip": "127.0.0.1", + "replica_port": "7379", + "state": "online", + } + replicationFields := map[string]interface{}{ + "lag": int64(0), + "offset": int64(4556468), + } + + acc.AssertContainsTaggedFields(t, "redis_replication", replicationFields, replicationTags) + + replicationTags = map[string]string{ + "host": "redis.net", + "replication_role": "slave", + "replica_id": "1", + "replica_ip": "127.0.0.1", + "replica_port": "8379", + "state": "send_bulk", + } + replicationFields = map[string]interface{}{ + "lag": int64(1), + "offset": int64(0), + } + + acc.AssertContainsTaggedFields(t, "redis_replication", replicationFields, replicationTags) } const testOutput = `# Server @@ -152,6 +201,8 @@ used_memory_peak_human:980.41K used_memory_lua:33792 mem_fragmentation_ratio:0.81 mem_allocator:libc +used_memory_peak_perc:93.58% +used_memory_dataset_perc:20.27% # Persistence loading:0 @@ -189,7 +240,9 @@ latest_fork_usec:0 # Replication role:master -connected_slaves:0 +connected_slaves:2 +slave0:ip=127.0.0.1,port=7379,state=online,offset=4556468,lag=0 +slave1:ip=127.0.0.1,port=8379,state=send_bulk,offset=0,lag=1 master_replid:8c4d7b768b26826825ceb20ff4a2c7c54616350b master_replid2:0000000000000000000000000000000000000000 master_repl_offset:0 @@ -205,6 +258,10 @@ used_cpu_user:0.05 used_cpu_sys_children:0.00 used_cpu_user_children:0.00 +# Commandstats +cmdstat_set:calls=261265,usec=1634157,usec_per_call=6.25 +cmdstat_command:calls=1,usec=990,usec_per_call=990.00 + # Keyspace db0:keys=2,expires=0,avg_ttl=0 diff --git a/plugins/inputs/rethinkdb/README.md b/plugins/inputs/rethinkdb/README.md new file mode 100644 index 000000000..d10453ace --- /dev/null +++ b/plugins/inputs/rethinkdb/README.md @@ -0,0 +1,61 @@ +# RethinkDB Input + +Collect metrics from [RethinkDB](https://www.rethinkdb.com/). + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage rethinkdb`. + +```toml +[[inputs.rethinkdb]] + ## An array of URI to gather stats about. Specify an ip or hostname + ## with optional port add password. ie, + ## rethinkdb://user:auth_key@10.10.3.30:28105, + ## rethinkdb://10.10.3.33:18832, + ## 10.0.0.1:10000, etc. + servers = ["127.0.0.1:28015"] + + ## If you use actual rethinkdb of > 2.3.0 with username/password authorization, + ## protocol have to be named "rethinkdb2" - it will use 1_0 H. + # servers = ["rethinkdb2://username:password@127.0.0.1:28015"] + + ## If you use older versions of rethinkdb (<2.2) with auth_key, protocol + ## have to be named "rethinkdb". + # servers = ["rethinkdb://username:auth_key@127.0.0.1:28015"] +``` + +### Metrics + +- rethinkdb + - tags: + - type + - ns + - rethinkdb_host + - rethinkdb_hostname + - fields: + - cache_bytes_in_use (integer, bytes) + - disk_read_bytes_per_sec (integer, reads) + - disk_read_bytes_total (integer, bytes) + - disk_written_bytes_per_sec (integer, bytes) + - disk_written_bytes_total (integer, bytes) + - disk_usage_data_bytes (integer, bytes) + - disk_usage_garbage_bytes (integer, bytes) + - disk_usage_metadata_bytes (integer, bytes) + - disk_usage_preallocated_bytes (integer, bytes) + ++ rethinkdb_engine + - tags: + - type + - ns + - rethinkdb_host + - rethinkdb_hostname + - fields: + - active_clients (integer, clients) + - clients (integer, clients) + - queries_per_sec (integer, queries) + - total_queries (integer, queries) + - read_docs_per_sec (integer, reads) + - total_reads (integer, reads) + - written_docs_per_sec (integer, writes) + - total_writes (integer, writes) diff --git a/plugins/inputs/riak/riak.go b/plugins/inputs/riak/riak.go index 9ddbbfa65..19f622289 100644 --- a/plugins/inputs/riak/riak.go +++ b/plugins/inputs/riak/riak.go @@ -127,7 +127,7 @@ func (r *Riak) gatherServer(s string, acc telegraf.Accumulator) error { // Successful responses will always return status code 200 if resp.StatusCode != http.StatusOK { - return fmt.Errorf("riak responded with unexepcted status code %d", resp.StatusCode) + return fmt.Errorf("riak responded with unexpected status code %d", resp.StatusCode) } // Decode the response JSON into a new stats struct diff --git a/plugins/inputs/salesforce/README.md b/plugins/inputs/salesforce/README.md index 5ee0f6a3d..6883f3a90 100644 --- a/plugins/inputs/salesforce/README.md +++ b/plugins/inputs/salesforce/README.md @@ -10,7 +10,7 @@ It fetches its data from the [limits endpoint](https://developer.salesforce.com/ [[inputs.salesforce]] username = "your_username" password = "your_password" - ## (Optional) security tokjen + ## (Optional) security token security_token = "your_security_token" ## (Optional) environment type (sandbox or production) ## default is: production @@ -21,7 +21,7 @@ It fetches its data from the [limits endpoint](https://developer.salesforce.com/ ### Measurements & Fields: -Salesforce provide one measurment named "salesforce". +Salesforce provide one measurement named "salesforce". Each entry is converted to snake\_case and 2 fields are created. - \_max represents the limit threshold diff --git a/plugins/inputs/salesforce/salesforce.go b/plugins/inputs/salesforce/salesforce.go index 096550db5..b66266d3f 100644 --- a/plugins/inputs/salesforce/salesforce.go +++ b/plugins/inputs/salesforce/salesforce.go @@ -5,6 +5,7 @@ import ( "encoding/xml" "errors" "fmt" + "io" "io/ioutil" "net/http" "net/url" @@ -165,7 +166,7 @@ func (s *Salesforce) getLoginEndpoint() (string, error) { } } -// Authenticate with Salesfroce +// Authenticate with Salesforce func (s *Salesforce) login() error { if s.Username == "" || s.Password == "" { return errors.New("missing username or password") @@ -200,6 +201,11 @@ func (s *Salesforce) login() error { return err } defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + // ignore the err here; LimitReader returns io.EOF and we're not interested in read errors. + body, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 200)) + return fmt.Errorf("%s returned HTTP status %s: %q", loginEndpoint, resp.Status, body) + } respBody, err := ioutil.ReadAll(resp.Body) if err != nil { diff --git a/plugins/inputs/sensors/README.md b/plugins/inputs/sensors/README.md index 9075bda72..19952fd82 100644 --- a/plugins/inputs/sensors/README.md +++ b/plugins/inputs/sensors/README.md @@ -18,7 +18,7 @@ This plugin collects sensor metrics with the `sensors` executable from the lm-se ``` ### Measurements & Fields: -Fields are created dynamicaly depending on the sensors. All fields are float. +Fields are created dynamically depending on the sensors. All fields are float. ### Tags: diff --git a/plugins/inputs/sflow/README.md b/plugins/inputs/sflow/README.md new file mode 100644 index 000000000..66d556e17 --- /dev/null +++ b/plugins/inputs/sflow/README.md @@ -0,0 +1,121 @@ +# SFlow Input Plugin + +The SFlow Input Plugin provides support for acting as an SFlow V5 collector in +accordance with the specification from [sflow.org](https://sflow.org/). + +Currently only Flow Samples of Ethernet / IPv4 & IPv4 TCP & UDP headers are +turned into metrics. Counters and other header samples are ignored. + +#### Series Cardinality Warning + +This plugin may produce a high number of series which, when not controlled +for, will cause high load on your database. Use the following techniques to +avoid cardinality issues: + +- Use [metric filtering][] options to exclude unneeded measurements and tags. +- Write to a database with an appropriate [retention policy][]. +- Limit series cardinality in your database using the + [max-series-per-database][] and [max-values-per-tag][] settings. +- Consider using the [Time Series Index][tsi]. +- Monitor your databases [series cardinality][]. +- Consult the [InfluxDB documentation][influx-docs] for the most up-to-date techniques. + +### Configuration + +```toml +[[inputs.sflow]] + ## Address to listen for sFlow packets. + ## example: service_address = "udp://:6343" + ## service_address = "udp4://:6343" + ## service_address = "udp6://:6343" + service_address = "udp://:6343" + + ## Set the size of the operating system's receive buffer. + ## example: read_buffer_size = "64KiB" + # read_buffer_size = "" +``` + +### Metrics + +- sflow + - tags: + - agent_address (IP address of the agent that obtained the sflow sample and sent it to this collector) + - source_id_type(source_id_type field of flow_sample or flow_sample_expanded structures) + - source_id_index(source_id_index field of flow_sample or flow_sample_expanded structures) + - input_ifindex (value (input) field of flow_sample or flow_sample_expanded structures) + - output_ifindex (value (output) field of flow_sample or flow_sample_expanded structures) + - sample_direction (source_id_index, netif_index_in and netif_index_out) + - header_protocol (header_protocol field of sampled_header structures) + - ether_type (eth_type field of an ETHERNET-ISO88023 header) + - src_ip (source_ipaddr field of IPv4 or IPv6 structures) + - src_port (src_port field of TCP or UDP structures) + - src_port_name (src_port) + - src_mac (source_mac_addr field of an ETHERNET-ISO88023 header) + - src_vlan (src_vlan field of extended_switch structure) + - src_priority (src_priority field of extended_switch structure) + - src_mask_len (src_mask_len field of extended_router structure) + - dst_ip (destination_ipaddr field of IPv4 or IPv6 structures) + - dst_port (dst_port field of TCP or UDP structures) + - dst_port_name (dst_port) + - dst_mac (destination_mac_addr field of an ETHERNET-ISO88023 header) + - dst_vlan (dst_vlan field of extended_switch structure) + - dst_priority (dst_priority field of extended_switch structure) + - dst_mask_len (dst_mask_len field of extended_router structure) + - next_hop (next_hop field of extended_router structure) + - ip_version (ip_ver field of IPv4 or IPv6 structures) + - ip_protocol (ip_protocol field of IPv4 or IPv6 structures) + - ip_dscp (ip_dscp field of IPv4 or IPv6 structures) + - ip_ecn (ecn field of IPv4 or IPv6 structures) + - tcp_urgent_pointer (urgent_pointer field of TCP structure) + - fields: + - bytes (integer, the product of frame_length and packets) + - drops (integer, drops field of flow_sample or flow_sample_expanded structures) + - packets (integer, sampling_rate field of flow_sample or flow_sample_expanded structures) + - frame_length (integer, frame_length field of sampled_header structures) + - header_size (integer, header_size field of sampled_header structures) + - ip_fragment_offset (integer, ip_ver field of IPv4 structures) + - ip_header_length (integer, ip_ver field of IPv4 structures) + - ip_total_length (integer, ip_total_len field of IPv4 structures) + - ip_ttl (integer, ip_ttl field of IPv4 structures or ip_hop_limit field IPv6 structures) + - tcp_header_length (integer, size field of TCP structure. This value is specified in 32-bit words. It must be multiplied by 4 to produce a value in bytes.) + - tcp_window_size (integer, window_size field of TCP structure) + - udp_length (integer, length field of UDP structures) + - ip_flags (integer, ip_ver field of IPv4 structures) + - tcp_flags (integer, TCP flags of TCP IP header (IPv4 or IPv6)) + +### Troubleshooting + +The [sflowtool][] utility can be used to print sFlow packets, and compared +against the metrics produced by Telegraf. +``` +sflowtool -p 6343 +``` + +If opening an issue, in addition to the output of sflowtool it will also be +helpful to collect a packet capture. Adjust the interface, host and port as +needed: +``` +$ sudo tcpdump -s 0 -i eth0 -w telegraf-sflow.pcap host 127.0.0.1 and port 6343 +``` + +[sflowtool]: https://github.com/sflow/sflowtool + +### Example Output +``` +sflow,agent_address=0.0.0.0,dst_ip=10.0.0.2,dst_mac=ff:ff:ff:ff:ff:ff,dst_port=40042,ether_type=IPv4,header_protocol=ETHERNET-ISO88023,input_ifindex=6,ip_dscp=27,ip_ecn=0,output_ifindex=1073741823,source_id_index=3,source_id_type=0,src_ip=10.0.0.1,src_mac=ff:ff:ff:ff:ff:ff,src_port=443 bytes=1570i,drops=0i,frame_length=157i,header_length=128i,ip_flags=2i,ip_fragment_offset=0i,ip_total_length=139i,ip_ttl=42i,sampling_rate=10i,tcp_header_length=0i,tcp_urgent_pointer=0i,tcp_window_size=14i 1584473704793580447 +``` + +### Reference Documentation + +This sflow implementation was built from the reference document +[sflow.org/sflow_version_5.txt](sflow_version_5) + + +[metric filtering]: https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#metric-filtering +[retention policy]: https://docs.influxdata.com/influxdb/latest/guides/downsampling_and_retention/ +[max-series-per-database]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-series-per-database-1000000 +[max-values-per-tag]: https://docs.influxdata.com/influxdb/latest/administration/config/#max-values-per-tag-100000 +[tsi]: https://docs.influxdata.com/influxdb/latest/concepts/time-series-index/ +[series cardinality]: https://docs.influxdata.com/influxdb/latest/query_language/spec/#show-cardinality +[influx-docs]: https://docs.influxdata.com/influxdb/latest/ +[sflow_version_5]: https://sflow.org/sflow_version_5.txt diff --git a/plugins/inputs/sflow/binaryio/minreader.go b/plugins/inputs/sflow/binaryio/minreader.go new file mode 100644 index 000000000..35ccdbcf2 --- /dev/null +++ b/plugins/inputs/sflow/binaryio/minreader.go @@ -0,0 +1,37 @@ +package binaryio + +import "io" + +// MinimumReader is the implementation for MinReader. +type MinimumReader struct { + R io.Reader + MinNumberOfBytesToRead int64 // Min number of bytes we need to read from the reader +} + +// MinReader reads from R but ensures there is at least N bytes read from the reader. +// The reader should call Close() when they are done reading. +// Closing the MinReader will read and discard any unread bytes up to MinNumberOfBytesToRead. +// CLosing the MinReader does NOT close the underlying reader. +// The underlying implementation is a MinimumReader, which implements ReaderCloser. +func MinReader(r io.Reader, minNumberOfBytesToRead int64) *MinimumReader { + return &MinimumReader{ + R: r, + MinNumberOfBytesToRead: minNumberOfBytesToRead, + } +} + +func (r *MinimumReader) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.MinNumberOfBytesToRead -= int64(n) + return n, err +} + +// Close does not close the underlying reader, only the MinimumReader +func (r *MinimumReader) Close() error { + if r.MinNumberOfBytesToRead > 0 { + b := make([]byte, r.MinNumberOfBytesToRead) + _, err := r.R.Read(b) + return err + } + return nil +} diff --git a/plugins/inputs/sflow/binaryio/minreader_test.go b/plugins/inputs/sflow/binaryio/minreader_test.go new file mode 100644 index 000000000..081564b3e --- /dev/null +++ b/plugins/inputs/sflow/binaryio/minreader_test.go @@ -0,0 +1,39 @@ +package binaryio + +import ( + "bytes" + "testing" +) + +func TestMinReader(t *testing.T) { + b := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + r := bytes.NewBuffer(b) + + mr := MinReader(r, 10) + + toRead := make([]byte, 5) + n, err := mr.Read(toRead) + if err != nil { + t.Error(err) + } + if n != 5 { + t.Error("Expected n to be 5, but was ", n) + } + if string(toRead) != string([]byte{1, 2, 3, 4, 5}) { + t.Error("expected 5 specific bytes to be read") + } + err = mr.Close() + if err != nil { + t.Error(err) + } + n, err = r.Read(toRead) // read from the outer stream + if err != nil { + t.Error(err) + } + if n != 5 { + t.Error("Expected n to be 5, but was ", n) + } + if string(toRead) != string([]byte{11, 12, 13, 14, 15}) { + t.Error("expected the last 5 bytes to be read") + } +} diff --git a/plugins/inputs/sflow/decoder_test.go b/plugins/inputs/sflow/decoder_test.go new file mode 100644 index 000000000..c6e3916b8 --- /dev/null +++ b/plugins/inputs/sflow/decoder_test.go @@ -0,0 +1,758 @@ +package sflow + +import ( + "bytes" + "encoding/hex" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestIPv4SW(t *testing.T) { + str := `00000005` + // version + `00000001` + //address type + `c0a80102` + // ip address + `00000010` + // sub agent id + `0000f3d4` + // sequence number + `0bfa047f` + // uptime + `00000002` + // sample count + `00000001` + // sample type + `000000d0` + // sample data length + `0001210a` + // sequence number + `000001fe` + // source id 00 = source id type, 0001fe = source id index + `00000400` + // sampling rate.. apparently this should be input if index???? + `04842400` + // sample pool + `00000000` + // drops + `000001fe` + // input if index + `00000200` + // output if index + `00000002` + // flow records count + `00000001` + // FlowFormat + `00000090` + // flow length + `00000001` + // header protocol + `0000010b` + // Frame length + `00000004` + // stripped octets + `00000080` + // header length + `000c2936d3d6` + // dest mac + `94c691aa9760` + // source mac + `0800` + // etype code: ipv4 + `4500` + // dscp + ecn + `00f9` + // total length + `f190` + // identification + `4000` + // fragment offset + flags + `40` + // ttl + `11` + // protocol + `b4f5` + // header checksum + `c0a80913` + // source ip + `c0a8090a` + // dest ip + `00a1` + // source port + `ba05` + // dest port + `00e5` + // udp length + // rest of header/flowSample we ignore + `641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101` + + // next flow record - ignored + `000003e90000001000000009000000000000000900000000` + + // next sample + `00000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000` + packet, err := hex.DecodeString(str) + require.NoError(t, err) + + actual := []telegraf.Metric{} + dc := NewDecoder() + dc.OnPacket(func(p *V5Format) { + metrics, err := makeMetrics(p) + require.NoError(t, err) + actual = append(actual, metrics...) + }) + buf := bytes.NewReader(packet) + err = dc.Decode(buf) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "192.168.1.2", + "dst_ip": "192.168.9.10", + "dst_mac": "00:0c:29:36:d3:d6", + "dst_port": "47621", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "510", + "output_ifindex": "512", + "sample_direction": "ingress", + "source_id_index": "510", + "source_id_type": "0", + "src_ip": "192.168.9.19", + "src_mac": "94:c6:91:aa:97:60", + "src_port": "161", + }, + map[string]interface{}{ + "bytes": uint64(0x042c00), + "drops": uint64(0x00), + "frame_length": uint64(0x010b), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0xf9), + "ip_ttl": uint64(0x40), + "sampling_rate": uint64(0x0400), + "udp_length": uint64(0xe5), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "192.168.1.2", + "dst_ip": "192.168.9.10", + "dst_mac": "00:0c:29:36:d3:d6", + "dst_port": "514", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "528", + "output_ifindex": "512", + "sample_direction": "ingress", + "source_id_index": "528", + "source_id_type": "0", + "src_ip": "192.168.8.21", + "src_mac": "fc:ec:da:44:00:8f", + "src_port": "39529", + }, + map[string]interface{}{ + "bytes": uint64(0x25c000), + "drops": uint64(0x00), + "frame_length": uint64(0x97), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x81), + "ip_ttl": uint64(0x3f), + "sampling_rate": uint64(0x4000), + "udp_length": uint64(0x6d), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func BenchmarkDecodeIPv4SW(b *testing.B) { + packet, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") + require.NoError(b, err) + + dc := NewDecoder() + require.NoError(b, err) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, err = dc.DecodeOnePacket(bytes.NewBuffer(packet)) + if err != nil { + panic(err) + } + } +} + +func TestExpandFlow(t *testing.T) { + packet, err := hex.DecodeString("00000005000000010a00015000000000000f58998ae119780000000300000003000000c4000b62a90000000000100c840000040024fb7e1e0000000000000000001017840000000000100c8400000001000000010000009000000001000005bc0000000400000080001b17000130001201f58d44810023710800450205a6305440007e06ee92ac100016d94d52f505997e701fa1e17aff62574a50100200355f000000ffff00000b004175746f72697a7a6174610400008040ffff000400008040050031303030320500313030302004000000000868a200000000000000000860a200000000000000000003000000c40003cecf000000000010170400004000a168ac1c000000000000000000101784000000000010170400000001000000010000009000000001000005f200000004000000800024e8324338d4ae52aa0b54810020060800450005dc5420400080061397c0a8060cc0a806080050efcfbb25bad9a21c839a501000fff54000008a55f70975a0ff88b05735597ae274bd81fcba17e6e9206b8ea0fb07d05fc27dad06cfe3fdba5d2fc4d057b0add711e596cbe5e9b4bbe8be59cd77537b7a89f7414a628b736d00000003000000c0000c547a0000000000100c04000004005bc3c3b50000000000000000001017840000000000100c0400000001000000010000008c000000010000007e000000040000007a001b17000130001201f58d448100237108004500006824ea4000ff32c326d94d5105501018f02e88d003000001dd39b1d025d1c68689583b2ab21522d5b5a959642243804f6d51e63323091cc04544285433eb3f6b29e1046a6a2fa7806319d62041d8fa4bd25b7cd85b8db54202054a077ac11de84acbe37a550004") + require.NoError(t, err) + + dc := NewDecoder() + p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) + require.NoError(t, err) + actual, err := makeMetrics(p) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "217.77.82.245", + "dst_mac": "00:1b:17:00:01:30", + "dst_port": "32368", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1054596", + "output_ifindex": "1051780", + "sample_direction": "egress", + "source_id_index": "1051780", + "source_id_type": "0", + "src_ip": "172.16.0.22", + "src_mac": "00:12:01:f5:8d:44", + "src_port": "1433", + }, + map[string]interface{}{ + "bytes": uint64(0x16f000), + "drops": uint64(0x00), + "frame_length": uint64(0x05bc), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x05a6), + "ip_ttl": uint64(0x7e), + "sampling_rate": uint64(0x0400), + "tcp_header_length": uint64(0x14), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0x0200), + "ip_dscp": "0", + "ip_ecn": "2", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "192.168.6.8", + "dst_mac": "00:24:e8:32:43:38", + "dst_port": "61391", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1054596", + "output_ifindex": "1054468", + "sample_direction": "egress", + "source_id_index": "1054468", + "source_id_type": "0", + "src_ip": "192.168.6.12", + "src_mac": "d4:ae:52:aa:0b:54", + "src_port": "80", + }, + map[string]interface{}{ + "bytes": uint64(0x017c8000), + "drops": uint64(0x00), + "frame_length": uint64(0x05f2), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x05dc), + "ip_ttl": uint64(0x80), + "sampling_rate": uint64(0x4000), + "tcp_header_length": uint64(0x14), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0xff), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "80.16.24.240", + "dst_mac": "00:1b:17:00:01:30", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1054596", + "output_ifindex": "1051652", + "sample_direction": "egress", + "source_id_index": "1051652", + "source_id_type": "0", + "src_ip": "217.77.81.5", + "src_mac": "00:12:01:f5:8d:44", + }, + map[string]interface{}{ + "bytes": uint64(0x01f800), + "drops": uint64(0x00), + "frame_length": uint64(0x7e), + "header_length": uint64(0x7a), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x68), + "ip_ttl": uint64(0xff), + "sampling_rate": uint64(0x0400), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestIPv4SWRT(t *testing.T) { + packet, err := hex.DecodeString("000000050000000189dd4f010000000000003d4f21151ad40000000600000001000000bc354b97090000020c000013b175792bea000000000000028f0000020c0000000300000001000000640000000100000058000000040000005408b2587a57624c16fc0b61a5080045000046c3e440003a1118a0052aada7569e5ab367a6e35b0032d7bbf1f2fb2eb2490a97f87abc31e135834be367000002590000ffffffffffffffff02add830d51e0aec14cf000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e32a000000160000000b00000001000000a88b8ffb57000002a2000013b12e344fd800000000000002a20000028f0000000300000001000000500000000100000042000000040000003e4c16fc0b6202c03e0fdecafe080045000030108000007d11fe45575185a718693996f0570e8c001c20614ad602003fd6d4afa6a6d18207324000271169b00000000003e90000001000000000000000000000000000000000000003ea000000100000000189dd4f210000000f0000001800000001000000e8354b970a0000020c000013b175793f9b000000000000028f0000020c00000003000000010000009000000001000001a500000004000000800231466d0b2c4c16fc0b61a5080045000193198f40003a114b75052aae1f5f94c778678ef24d017f50ea7622287c30799e1f7d45932d01ca92c46d930000927c0000ffffffffffffffff02ad0eea6498953d1c7ebb6dbdf0525c80e1a9a62bacfea92f69b7336c2f2f60eba0593509e14eef167eb37449f05ad70b8241c1a46d000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000160000001000000001000000e8354b970b0000020c000013b17579534c000000000000028f0000020c00000003000000010000009000000001000000b500000004000000800231466d0b2c4c16fc0b61a50800450000a327c240003606fd67b93c706a021ff365045fe8a0976d624df8207083501800edb31b0000485454502f312e3120323030204f4b0d0a5365727665723a2050726f746f636f6c20485454500d0a436f6e74656e742d4c656e6774683a20313430340d0a436f6e6e656374696f6e3a20000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000170000001000000001000000e8354b970c0000020c000013b1757966fd000000000000028f0000020c000000030000000100000090000000010000018e00000004000000800231466d0b2c4c16fc0b61a508004500017c7d2c40003a116963052abd8d021c940e67e7e0d501682342dbe7936bd47ef487dee5591ec1b24d83622e000072250000ffffffffffffffff02ad0039d8ba86a90017071d76b177de4d8c4e23bcaaaf4d795f77b032f959e0fb70234d4c28922d4e08dd3330c66e34bff51cc8ade5000003e90000001000000000000000000000000000000000000003ea0000001000000001c342e1fd000000160000001000000001000000e80d6146ac000002a1000013b17880b49d00000000000002a10000028f00000003000000010000009000000001000005ee00000004000000804c16fc0b6201d8b122766a2c0800450005dc04574000770623a11fcd80a218691d4cf2fe01bbd4f47482065fd63a5010fabd7987000052a20002c8c43ea91ca1eaa115663f5218a37fbb409dfbbedff54731ef41199b35535905ac2366a05a803146ced544abf45597f3714327d59f99e30c899c39fc5a4b67d12087bf8db2bc000003e90000001000000000000000000000000000000000000003ea000000100000000189dd4f210000001000000018") + require.NoError(t, err) + + dc := NewDecoder() + p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) + require.NoError(t, err) + actual, err := makeMetrics(p) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "86.158.90.179", + "dst_mac": "08:b2:58:7a:57:62", + "dst_port": "58203", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "655", + "output_ifindex": "524", + "sample_direction": "egress", + "source_id_index": "524", + "source_id_type": "0", + "src_ip": "5.42.173.167", + "src_mac": "4c:16:fc:0b:61:a5", + "src_port": "26534", + }, + map[string]interface{}{ + "bytes": uint64(0x06c4d8), + "drops": uint64(0x00), + "frame_length": uint64(0x58), + "header_length": uint64(0x54), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x46), + "ip_ttl": uint64(0x3a), + "sampling_rate": uint64(0x13b1), + "udp_length": uint64(0x32), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "24.105.57.150", + "dst_mac": "4c:16:fc:0b:62:02", + "dst_port": "3724", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "674", + "output_ifindex": "655", + "sample_direction": "ingress", + "source_id_index": "674", + "source_id_type": "0", + "src_ip": "87.81.133.167", + "src_mac": "c0:3e:0f:de:ca:fe", + "src_port": "61527", + }, + map[string]interface{}{ + "bytes": uint64(0x0513a2), + "drops": uint64(0x00), + "frame_length": uint64(0x42), + "header_length": uint64(0x3e), + "ip_flags": uint64(0x00), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x30), + "ip_ttl": uint64(0x7d), + "sampling_rate": uint64(0x13b1), + "udp_length": uint64(0x1c), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "95.148.199.120", + "dst_mac": "02:31:46:6d:0b:2c", + "dst_port": "62029", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "655", + "output_ifindex": "524", + "sample_direction": "egress", + "source_id_index": "524", + "source_id_type": "0", + "src_ip": "5.42.174.31", + "src_mac": "4c:16:fc:0b:61:a5", + "src_port": "26510", + }, + map[string]interface{}{ + "bytes": uint64(0x206215), + "drops": uint64(0x00), + "frame_length": uint64(0x01a5), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x0193), + "ip_ttl": uint64(0x3a), + "sampling_rate": uint64(0x13b1), + "udp_length": uint64(0x017f), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "2.31.243.101", + "dst_mac": "02:31:46:6d:0b:2c", + "dst_port": "59552", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "655", + "output_ifindex": "524", + "sample_direction": "egress", + "source_id_index": "524", + "source_id_type": "0", + "src_ip": "185.60.112.106", + "src_mac": "4c:16:fc:0b:61:a5", + "src_port": "1119", + }, + map[string]interface{}{ + "bytes": uint64(0x0dec25), + "drops": uint64(0x00), + "frame_length": uint64(0xb5), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0xa3), + "ip_ttl": uint64(0x36), + "sampling_rate": uint64(0x13b1), + "tcp_header_length": uint64(0x14), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0xed), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "2.28.148.14", + "dst_mac": "02:31:46:6d:0b:2c", + "dst_port": "57557", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "655", + "output_ifindex": "524", + "sample_direction": "egress", + "source_id_index": "524", + "source_id_type": "0", + "src_ip": "5.42.189.141", + "src_mac": "4c:16:fc:0b:61:a5", + "src_port": "26599", + }, + map[string]interface{}{ + "bytes": uint64(0x1e9d2e), + "drops": uint64(0x00), + "frame_length": uint64(0x018e), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x017c), + "ip_ttl": uint64(0x3a), + "sampling_rate": uint64(0x13b1), + "udp_length": uint64(0x0168), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "137.221.79.1", + "dst_ip": "24.105.29.76", + "dst_mac": "4c:16:fc:0b:62:01", + "dst_port": "443", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "673", + "output_ifindex": "655", + "sample_direction": "ingress", + "source_id_index": "673", + "source_id_type": "0", + "src_ip": "31.205.128.162", + "src_mac": "d8:b1:22:76:6a:2c", + "src_port": "62206", + }, + map[string]interface{}{ + "bytes": uint64(0x74c38e), + "drops": uint64(0x00), + "frame_length": uint64(0x05ee), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x05dc), + "ip_ttl": uint64(0x77), + "sampling_rate": uint64(0x13b1), + "tcp_header_length": uint64(0x14), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0xfabd), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestIPv6SW(t *testing.T) { + packet, err := hex.DecodeString("00000005000000010ae0648100000002000093d824ac82340000000100000001000000d000019f94000001010000100019f94000000000000000010100000000000000020000000100000090000000010000058c00000008000000800008e3fffc10d4f4be04612486dd60000000054e113a2607f8b0400200140000000000000008262000edc000e804a25e30c581af36fa01bbfa6f054e249810b584bcbf12926c2e29a779c26c72db483e8191524fe2288bfdaceaf9d2e724d04305706efcfdef70db86873bbacf29698affe4e7d6faa21d302f9b4b023291a05a000003e90000001000000001000000000000000100000000") + require.NoError(t, err) + + dc := NewDecoder() + p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) + require.NoError(t, err) + actual, err := makeMetrics(p) + require.NoError(t, err) + + expected := []telegraf.Metric{ + + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.224.100.129", + "dst_ip": "2620:ed:c000:e804:a25e:30c5:81af:36fa", + "dst_mac": "00:08:e3:ff:fc:10", + "dst_port": "64111", + "ether_type": "IPv6", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "257", + "output_ifindex": "0", + "sample_direction": "ingress", + "source_id_index": "257", + "source_id_type": "0", + "src_ip": "2607:f8b0:4002:14::8", + "src_mac": "d4:f4:be:04:61:24", + "src_port": "443", + }, + map[string]interface{}{ + "bytes": uint64(0x58c000), + "drops": uint64(0x00), + "frame_length": uint64(0x058c), + "header_length": uint64(0x80), + "sampling_rate": uint64(0x1000), + "payload_length": uint64(0x054e), + "udp_length": uint64(0x054e), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestExpandFlowCounter(t *testing.T) { + packet, err := hex.DecodeString("00000005000000010a00015000000000000f58898ae0fa380000000700000004000000ec00006ece0000000000101784000000030000000200000034000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000058001017840000000600000002540be400000000010000000300007b8ebd37b97e61ff94860803e8e908ffb2b500000000000000000000000000018e7c31ee7ba4195f041874579ff021ba936300000000000000000000000100000007000000380011223344550003f8b15645e7e7d6960000002fe2fc02fc01edbf580000000000000000000000000000000001dcb9cf000000000000000000000004000000ec00006ece0000000000100184000000030000000200000034000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000058001001840000000600000002540be400000000010000000300000841131d1fd9f850bfb103617cb401e6598900000000000000000000000000000bec1902e5da9212e3e96d7996e922513250000000000000000000000001000000070000003800112233445500005c260acbddb3000100000003e2fc02fc01ee414f0000000000000000000000000000000001dccdd30000000000000000000000030000008400004606000000000010030400004000ad9dc19b0000000000000000001017840000000000100304000000010000000100000050000000010000004400000004000000400012815116c4001517cf426d8100200608004500002895da40008006d74bc0a8060ac0a8064f04ef04aab1797122cf7eaf4f5010ffff7727000000000000000000000003000000b0001bd698000000000010148400000400700b180f000000000000000000101504000000000010148400000001000000010000007c000000010000006f000000040000006b001b17000131f0f755b9afc081000439080045000059045340005206920c1f0d4703d94d52e201bbf14977d1e9f15498af36801800417f1100000101080afdf3c70400e043871503010020ff268cfe2e2fd5fffe1d3d704a91d57b895f174c4b4428c66679d80a307294303f00000003000000c40003ceca000000000010170400004000a166aa7a000000000000000000101784000000000010170400000001000000010000009000000001000005f200000004000000800024e8369e2bd4ae52aa0b54810020060800450005dc4c71400080061b45c0a8060cc0a806090050f855692a7a94a1154ae1801001046b6a00000101080a6869a48d151016d046a84a7aa1c6743fa05179f7ecbd4e567150cb6f2077ff89480ae730637d26d2237c08548806f672c7476eb1b5a447b42cb9ce405994d152fa3e000000030000008c001bd699000000000010148400000400700b180f0000000000000000001015040000000000101484000000010000000100000058000000010000004a0000000400000046001b17000131f0f755b9afc0810004390800450000340ce040003a06bea5c1ce8793d94d528f00504c3b08b18f275b83d5df8010054586ad00000101050a5b83d5de5b83d5df11d800000003000000c400004e07000000000010028400004000c7ec97f2000000000000000000100784000000000010028400000001000000010000009000000001000005f2000000040000008000005e0001ff005056800dd18100000a0800450005dc5a42400040066ef70a000ac8c0a8967201bbe17c81597908caf8a05f5010010328610000f172263da0ba5d6223c079b8238bc841256bf17c4ffb08ad11c4fbff6f87ae1624a6b057b8baa9342114e5f5b46179083020cb560c4e9eadcec6dfd83e102ddbc27024803eb5") + require.NoError(t, err) + + dc := NewDecoder() + p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) + require.NoError(t, err) + actual, err := makeMetrics(p) + require.NoError(t, err) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "192.168.6.79", + "dst_mac": "00:12:81:51:16:c4", + "dst_port": "1194", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1054596", + "output_ifindex": "1049348", + "sample_direction": "egress", + "source_id_index": "1049348", + "source_id_type": "0", + "src_ip": "192.168.6.10", + "src_mac": "00:15:17:cf:42:6d", + "src_port": "1263", + }, + map[string]interface{}{ + "bytes": uint64(0x110000), + "drops": uint64(0x00), + "frame_length": uint64(0x44), + "header_length": uint64(0x40), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x28), + "ip_ttl": uint64(0x80), + "sampling_rate": uint64(0x4000), + "tcp_header_length": uint64(0x14), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0xffff), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "217.77.82.226", + "dst_mac": "00:1b:17:00:01:31", + "dst_port": "61769", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1053956", + "output_ifindex": "1053828", + "sample_direction": "egress", + "source_id_index": "1053828", + "source_id_type": "0", + "src_ip": "31.13.71.3", + "src_mac": "f0:f7:55:b9:af:c0", + "src_port": "443", + }, + map[string]interface{}{ + "bytes": uint64(0x01bc00), + "drops": uint64(0x00), + "frame_length": uint64(0x6f), + "header_length": uint64(0x6b), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x59), + "ip_ttl": uint64(0x52), + "sampling_rate": uint64(0x0400), + "tcp_header_length": uint64(0x20), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0x41), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "192.168.6.9", + "dst_mac": "00:24:e8:36:9e:2b", + "dst_port": "63573", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1054596", + "output_ifindex": "1054468", + "sample_direction": "egress", + "source_id_index": "1054468", + "source_id_type": "0", + "src_ip": "192.168.6.12", + "src_mac": "d4:ae:52:aa:0b:54", + "src_port": "80", + }, + map[string]interface{}{ + "bytes": uint64(0x017c8000), + "drops": uint64(0x00), + "frame_length": uint64(0x05f2), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x05dc), + "ip_ttl": uint64(0x80), + "sampling_rate": uint64(0x4000), + "tcp_header_length": uint64(0x20), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0x0104), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "217.77.82.143", + "dst_mac": "00:1b:17:00:01:31", + "dst_port": "19515", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1053956", + "output_ifindex": "1053828", + "sample_direction": "egress", + "source_id_index": "1053828", + "source_id_type": "0", + "src_ip": "193.206.135.147", + "src_mac": "f0:f7:55:b9:af:c0", + "src_port": "80", + }, + map[string]interface{}{ + "bytes": uint64(0x012800), + "drops": uint64(0x00), + "frame_length": uint64(0x4a), + "header_length": uint64(0x46), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x34), + "ip_ttl": uint64(0x3a), + "sampling_rate": uint64(0x0400), + "tcp_header_length": uint64(0x20), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0x0545), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "10.0.1.80", + "dst_ip": "192.168.150.114", + "dst_mac": "00:00:5e:00:01:ff", + "dst_port": "57724", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "1050500", + "output_ifindex": "1049220", + "sample_direction": "egress", + "source_id_index": "1049220", + "source_id_type": "0", + "src_ip": "10.0.10.200", + "src_mac": "00:50:56:80:0d:d1", + "src_port": "443", + }, + map[string]interface{}{ + "bytes": uint64(0x017c8000), + "drops": uint64(0x00), + "frame_length": uint64(0x05f2), + "header_length": uint64(0x80), + "ip_flags": uint64(0x02), + "ip_fragment_offset": uint64(0x00), + "ip_total_length": uint64(0x05dc), + "ip_ttl": uint64(0x40), + "sampling_rate": uint64(0x4000), + "tcp_header_length": uint64(0x14), + "tcp_urgent_pointer": uint64(0x00), + "tcp_window_size": uint64(0x0103), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} + +func TestFlowExpandCounter(t *testing.T) { + packet, err := hex.DecodeString("00000005000000010a000150000000000006d14d8ae0fe200000000200000004000000ac00006d15000000004b00ca000000000200000002000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000584b00ca0000000001000000000000000000000001000000010000308ae33bb950eb92a8a3004d0bb406899571000000000000000000000000000012f7ed9c9db8c24ed90604eaf0bd04636edb00000000000000000000000100000004000000ac00006d15000000004b0054000000000200000002000000340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000584b00540000000001000000003b9aca000000000100000003000067ba8e64fd23fa65f26d0215ec4a0021086600000000000000000000000000002002c3b21045c2378ad3001fb2f300061872000000000000000000000001") + require.NoError(t, err) + + dc := NewDecoder() + p, err := dc.DecodeOnePacket(bytes.NewBuffer(packet)) + require.NoError(t, err) + actual, err := makeMetrics(p) + require.NoError(t, err) + + // we don't do anything with samples yet + expected := []telegraf.Metric{} + testutil.RequireMetricsEqual(t, expected, actual, testutil.IgnoreTime()) +} diff --git a/plugins/inputs/sflow/metricencoder.go b/plugins/inputs/sflow/metricencoder.go new file mode 100644 index 000000000..ffc9d8e02 --- /dev/null +++ b/plugins/inputs/sflow/metricencoder.go @@ -0,0 +1,46 @@ +package sflow + +import ( + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +func makeMetrics(p *V5Format) ([]telegraf.Metric, error) { + now := time.Now() + metrics := []telegraf.Metric{} + tags := map[string]string{ + "agent_address": p.AgentAddress.String(), + } + fields := map[string]interface{}{} + for _, sample := range p.Samples { + tags["input_ifindex"] = strconv.FormatUint(uint64(sample.SampleData.InputIfIndex), 10) + tags["output_ifindex"] = strconv.FormatUint(uint64(sample.SampleData.OutputIfIndex), 10) + tags["sample_direction"] = sample.SampleData.SampleDirection + tags["source_id_index"] = strconv.FormatUint(uint64(sample.SampleData.SourceIDIndex), 10) + tags["source_id_type"] = strconv.FormatUint(uint64(sample.SampleData.SourceIDType), 10) + fields["drops"] = sample.SampleData.Drops + fields["sampling_rate"] = sample.SampleData.SamplingRate + + for _, flowRecord := range sample.SampleData.FlowRecords { + if flowRecord.FlowData != nil { + tags2 := flowRecord.FlowData.GetTags() + fields2 := flowRecord.FlowData.GetFields() + for k, v := range tags { + tags2[k] = v + } + for k, v := range fields { + fields2[k] = v + } + m, err := metric.New("sflow", tags2, fields2, now) + if err != nil { + return nil, err + } + metrics = append(metrics, m) + } + } + } + return metrics, nil +} diff --git a/plugins/inputs/sflow/packetdecoder.go b/plugins/inputs/sflow/packetdecoder.go new file mode 100644 index 000000000..9e6b2a4fe --- /dev/null +++ b/plugins/inputs/sflow/packetdecoder.go @@ -0,0 +1,483 @@ +package sflow + +import ( + "encoding/binary" + "fmt" + "io" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs/sflow/binaryio" + "github.com/pkg/errors" +) + +type PacketDecoder struct { + onPacket func(p *V5Format) + Log telegraf.Logger +} + +func NewDecoder() *PacketDecoder { + return &PacketDecoder{} +} + +func (d *PacketDecoder) debug(args ...interface{}) { + if d.Log != nil { + d.Log.Debug(args...) + } +} + +func (d *PacketDecoder) OnPacket(f func(p *V5Format)) { + d.onPacket = f +} + +func (d *PacketDecoder) Decode(r io.Reader) error { + var err error + var packet *V5Format + for err == nil { + packet, err = d.DecodeOnePacket(r) + if err != nil { + break + } + d.onPacket(packet) + } + if err != nil && errors.Cause(err) == io.EOF { + return nil + } + return err +} + +type AddressType uint32 // must be uint32 + +const ( + AddressTypeUnknown AddressType = 0 + AddressTypeIPV4 AddressType = 1 + AddressTypeIPV6 AddressType = 2 +) + +func (d *PacketDecoder) DecodeOnePacket(r io.Reader) (*V5Format, error) { + p := &V5Format{} + err := read(r, &p.Version, "version") + if err != nil { + return nil, err + } + if p.Version != 5 { + return nil, fmt.Errorf("Version %d not supported, only version 5", p.Version) + } + var addressIPType AddressType + if err = read(r, &addressIPType, "address ip type"); err != nil { + return nil, err + } + switch addressIPType { + case AddressTypeUnknown: + p.AgentAddress.IP = make([]byte, 0) + case AddressTypeIPV4: + p.AgentAddress.IP = make([]byte, 4) + case AddressTypeIPV6: + p.AgentAddress.IP = make([]byte, 16) + default: + return nil, fmt.Errorf("Unknown address IP type %d", addressIPType) + } + if err = read(r, &p.AgentAddress.IP, "Agent Address IP"); err != nil { + return nil, err + } + if err = read(r, &p.SubAgentID, "SubAgentID"); err != nil { + return nil, err + } + if err = read(r, &p.SequenceNumber, "SequenceNumber"); err != nil { + return nil, err + } + if err = read(r, &p.Uptime, "Uptime"); err != nil { + return nil, err + } + + p.Samples, err = d.decodeSamples(r) + return p, err +} + +func (d *PacketDecoder) decodeSamples(r io.Reader) ([]Sample, error) { + result := []Sample{} + // # of samples + var numOfSamples uint32 + if err := read(r, &numOfSamples, "sample count"); err != nil { + return nil, err + } + + for i := 0; i < int(numOfSamples); i++ { + sam, err := d.decodeSample(r) + if err != nil { + return result, err + } + result = append(result, sam) + } + + return result, nil +} + +func (d *PacketDecoder) decodeSample(r io.Reader) (Sample, error) { + var err error + sam := Sample{} + if err := read(r, &sam.SampleType, "SampleType"); err != nil { + return sam, err + } + sampleDataLen := uint32(0) + if err := read(r, &sampleDataLen, "Sample data length"); err != nil { + return sam, err + } + mr := binaryio.MinReader(r, int64(sampleDataLen)) + defer mr.Close() + + switch sam.SampleType { + case SampleTypeFlowSample: + sam.SampleData, err = d.decodeFlowSample(mr) + case SampleTypeFlowSampleExpanded: + sam.SampleData, err = d.decodeFlowSampleExpanded(mr) + default: + d.debug("Unknown sample type: ", sam.SampleType) + } + return sam, err +} + +type InterfaceFormatType uint8 // sflow_version_5.txt line 1497 +const ( + InterfaceFormatTypeSingleInterface InterfaceFormatType = 0 + InterfaceFormatTypePacketDiscarded InterfaceFormatType = 1 +) + +func (d *PacketDecoder) decodeFlowSample(r io.Reader) (t SampleDataFlowSampleExpanded, err error) { + if err := read(r, &t.SequenceNumber, "SequenceNumber"); err != nil { + return t, err + } + var sourceID uint32 + if err := read(r, &sourceID, "SourceID"); err != nil { // source_id sflow_version_5.txt line: 1622 + return t, err + } + // split source id to source id type and source id index + t.SourceIDIndex = sourceID & 0x00ffffff // sflow_version_5.txt line: 1468 + t.SourceIDType = sourceID >> 24 // source_id_type sflow_version_5.txt Line 1465 + if err := read(r, &t.SamplingRate, "SamplingRate"); err != nil { + return t, err + } + if err := read(r, &t.SamplePool, "SamplePool"); err != nil { + return t, err + } + if err := read(r, &t.Drops, "Drops"); err != nil { // sflow_version_5.txt line 1636 + return t, err + } + if err := read(r, &t.InputIfIndex, "InputIfIndex"); err != nil { + return t, err + } + t.InputIfFormat = t.InputIfIndex >> 30 + t.InputIfIndex = t.InputIfIndex & 0x3FFFFFFF + + if err := read(r, &t.OutputIfIndex, "OutputIfIndex"); err != nil { + return t, err + } + t.OutputIfFormat = t.OutputIfIndex >> 30 + t.OutputIfIndex = t.OutputIfIndex & 0x3FFFFFFF + + switch t.SourceIDIndex { + case t.OutputIfIndex: + t.SampleDirection = "egress" + case t.InputIfIndex: + t.SampleDirection = "ingress" + } + + t.FlowRecords, err = d.decodeFlowRecords(r, t.SamplingRate) + return t, err +} + +func (d *PacketDecoder) decodeFlowSampleExpanded(r io.Reader) (t SampleDataFlowSampleExpanded, err error) { + if err := read(r, &t.SequenceNumber, "SequenceNumber"); err != nil { // sflow_version_5.txt line 1701 + return t, err + } + if err := read(r, &t.SourceIDType, "SourceIDType"); err != nil { // sflow_version_5.txt line: 1706 + 16878 + return t, err + } + if err := read(r, &t.SourceIDIndex, "SourceIDIndex"); err != nil { // sflow_version_5.txt line: 1689 + return t, err + } + if err := read(r, &t.SamplingRate, "SamplingRate"); err != nil { // sflow_version_5.txt line: 1707 + return t, err + } + if err := read(r, &t.SamplePool, "SamplePool"); err != nil { // sflow_version_5.txt line: 1708 + return t, err + } + if err := read(r, &t.Drops, "Drops"); err != nil { // sflow_version_5.txt line: 1712 + return t, err + } + if err := read(r, &t.InputIfFormat, "InputIfFormat"); err != nil { // sflow_version_5.txt line: 1727 + return t, err + } + if err := read(r, &t.InputIfIndex, "InputIfIndex"); err != nil { + return t, err + } + if err := read(r, &t.OutputIfFormat, "OutputIfFormat"); err != nil { // sflow_version_5.txt line: 1728 + return t, err + } + if err := read(r, &t.OutputIfIndex, "OutputIfIndex"); err != nil { + return t, err + } + + switch t.SourceIDIndex { + case t.OutputIfIndex: + t.SampleDirection = "egress" + case t.InputIfIndex: + t.SampleDirection = "ingress" + } + + t.FlowRecords, err = d.decodeFlowRecords(r, t.SamplingRate) + return t, err +} + +func (d *PacketDecoder) decodeFlowRecords(r io.Reader, samplingRate uint32) (recs []FlowRecord, err error) { + var flowDataLen uint32 + var count uint32 + if err := read(r, &count, "FlowRecord count"); err != nil { + return recs, err + } + for i := uint32(0); i < count; i++ { + fr := FlowRecord{} + if err := read(r, &fr.FlowFormat, "FlowFormat"); err != nil { // sflow_version_5.txt line 1597 + return recs, err + } + if err := read(r, &flowDataLen, "Flow data length"); err != nil { + return recs, err + } + + mr := binaryio.MinReader(r, int64(flowDataLen)) + + switch fr.FlowFormat { + case FlowFormatTypeRawPacketHeader: // sflow_version_5.txt line 1938 + fr.FlowData, err = d.decodeRawPacketHeaderFlowData(mr, samplingRate) + default: + d.debug("Unknown flow format: ", fr.FlowFormat) + } + if err != nil { + mr.Close() + return recs, err + } + + recs = append(recs, fr) + mr.Close() + } + + return recs, err +} + +func (d *PacketDecoder) decodeRawPacketHeaderFlowData(r io.Reader, samplingRate uint32) (h RawPacketHeaderFlowData, err error) { + if err := read(r, &h.HeaderProtocol, "HeaderProtocol"); err != nil { // sflow_version_5.txt line 1940 + return h, err + } + if err := read(r, &h.FrameLength, "FrameLength"); err != nil { // sflow_version_5.txt line 1942 + return h, err + } + h.Bytes = h.FrameLength * samplingRate + + if err := read(r, &h.StrippedOctets, "StrippedOctets"); err != nil { // sflow_version_5.txt line 1967 + return h, err + } + if err := read(r, &h.HeaderLength, "HeaderLength"); err != nil { + return h, err + } + + mr := binaryio.MinReader(r, int64(h.HeaderLength)) + defer mr.Close() + + switch h.HeaderProtocol { + case HeaderProtocolTypeEthernetISO88023: + h.Header, err = d.decodeEthHeader(mr) + default: + d.debug("Unknown header protocol type: ", h.HeaderProtocol) + } + + return h, err +} + +// ethHeader answers a decode Directive that will decode an ethernet frame header +// according to https://en.wikipedia.org/wiki/Ethernet_frame +func (d *PacketDecoder) decodeEthHeader(r io.Reader) (h EthHeader, err error) { + // we may have to read out StrippedOctets bytes and throw them away first? + if err := read(r, &h.DestinationMAC, "DestinationMAC"); err != nil { + return h, err + } + if err := read(r, &h.SourceMAC, "SourceMAC"); err != nil { + return h, err + } + var tagOrEType uint16 + if err := read(r, &tagOrEType, "tagOrEtype"); err != nil { + return h, err + } + switch tagOrEType { + case 0x8100: // could be? + var discard uint16 + if err := read(r, &discard, "unknown"); err != nil { + return h, err + } + if err := read(r, &h.EtherTypeCode, "EtherTypeCode"); err != nil { + return h, err + } + default: + h.EtherTypeCode = tagOrEType + } + h.EtherType = ETypeMap[h.EtherTypeCode] + switch h.EtherType { + case "IPv4": + h.IPHeader, err = d.decodeIPv4Header(r) + case "IPv6": + h.IPHeader, err = d.decodeIPv6Header(r) + default: + } + if err != nil { + return h, err + } + return h, err +} + +// https://en.wikipedia.org/wiki/IPv4#Header +func (d *PacketDecoder) decodeIPv4Header(r io.Reader) (h IPV4Header, err error) { + if err := read(r, &h.Version, "Version"); err != nil { + return h, err + } + h.InternetHeaderLength = h.Version & 0x0F + h.Version = h.Version & 0xF0 + if err := read(r, &h.DSCP, "DSCP"); err != nil { + return h, err + } + h.ECN = h.DSCP & 0x03 + h.DSCP = h.DSCP >> 2 + if err := read(r, &h.TotalLength, "TotalLength"); err != nil { + return h, err + } + if err := read(r, &h.Identification, "Identification"); err != nil { + return h, err + } + if err := read(r, &h.FragmentOffset, "FragmentOffset"); err != nil { + return h, err + } + h.Flags = uint8(h.FragmentOffset >> 13) + h.FragmentOffset = h.FragmentOffset & 0x1FFF + if err := read(r, &h.TTL, "TTL"); err != nil { + return h, err + } + if err := read(r, &h.Protocol, "Protocol"); err != nil { + return h, err + } + if err := read(r, &h.HeaderChecksum, "HeaderChecksum"); err != nil { + return h, err + } + if err := read(r, &h.SourceIP, "SourceIP"); err != nil { + return h, err + } + if err := read(r, &h.DestIP, "DestIP"); err != nil { + return h, err + } + switch h.Protocol { + case IPProtocolTCP: + h.ProtocolHeader, err = d.decodeTCPHeader(r) + case IPProtocolUDP: + h.ProtocolHeader, err = d.decodeUDPHeader(r) + default: + d.debug("Unknown IP protocol: ", h.Protocol) + } + return h, err +} + +// https://en.wikipedia.org/wiki/IPv6_packet +func (d *PacketDecoder) decodeIPv6Header(r io.Reader) (h IPV6Header, err error) { + var fourByteBlock uint32 + if err := read(r, &fourByteBlock, "IPv6 header octet 0"); err != nil { + return h, err + } + version := fourByteBlock >> 28 + if version != 0x6 { + return h, fmt.Errorf("Unexpected IPv6 header version 0x%x", version) + } + h.DSCP = uint8((fourByteBlock & 0xFC00000) >> 22) + h.ECN = uint8((fourByteBlock & 0x300000) >> 20) + + // flowLabel := fourByteBlock & 0xFFFFF // not currently being used. + if err := read(r, &h.PayloadLength, "PayloadLength"); err != nil { + return h, err + } + if err := read(r, &h.NextHeaderProto, "NextHeaderProto"); err != nil { + return h, err + } + if err := read(r, &h.HopLimit, "HopLimit"); err != nil { + return h, err + } + if err := read(r, &h.SourceIP, "SourceIP"); err != nil { + return h, err + } + if err := read(r, &h.DestIP, "DestIP"); err != nil { + return h, err + } + switch h.NextHeaderProto { + case IPProtocolTCP: + h.ProtocolHeader, err = d.decodeTCPHeader(r) + case IPProtocolUDP: + h.ProtocolHeader, err = d.decodeUDPHeader(r) + default: + // not handled + d.debug("Unknown IP protocol: ", h.NextHeaderProto) + } + return h, err +} + +// https://en.wikipedia.org/wiki/Transmission_Control_Protocol#TCP_segment_structure +func (d *PacketDecoder) decodeTCPHeader(r io.Reader) (h TCPHeader, err error) { + if err := read(r, &h.SourcePort, "SourcePort"); err != nil { + return h, err + } + if err := read(r, &h.DestinationPort, "DestinationPort"); err != nil { + return h, err + } + if err := read(r, &h.Sequence, "Sequence"); err != nil { + return h, err + } + if err := read(r, &h.AckNumber, "AckNumber"); err != nil { + return h, err + } + // Next up: bit reading! + // data offset 4 bits + // reserved 3 bits + // flags 9 bits + var dataOffsetAndReservedAndFlags uint16 + if err := read(r, &dataOffsetAndReservedAndFlags, "TCP Header Octet offset 12"); err != nil { + return h, err + } + h.TCPHeaderLength = uint8((dataOffsetAndReservedAndFlags >> 12) * 4) + h.Flags = dataOffsetAndReservedAndFlags & 0x1FF + // done bit reading + + if err := read(r, &h.TCPWindowSize, "TCPWindowSize"); err != nil { + return h, err + } + if err := read(r, &h.Checksum, "Checksum"); err != nil { + return h, err + } + if err := read(r, &h.TCPUrgentPointer, "TCPUrgentPointer"); err != nil { + return h, err + } + + return h, err +} + +func (d *PacketDecoder) decodeUDPHeader(r io.Reader) (h UDPHeader, err error) { + if err := read(r, &h.SourcePort, "SourcePort"); err != nil { + return h, err + } + if err := read(r, &h.DestinationPort, "DestinationPort"); err != nil { + return h, err + } + if err := read(r, &h.UDPLength, "UDPLength"); err != nil { + return h, err + } + if err := read(r, &h.Checksum, "Checksum"); err != nil { + return h, err + } + return h, err +} + +func read(r io.Reader, data interface{}, name string) error { + err := binary.Read(r, binary.BigEndian, data) + return errors.Wrapf(err, "failed to read %s", name) +} diff --git a/plugins/inputs/sflow/packetdecoder_test.go b/plugins/inputs/sflow/packetdecoder_test.go new file mode 100644 index 000000000..f078eaf31 --- /dev/null +++ b/plugins/inputs/sflow/packetdecoder_test.go @@ -0,0 +1,207 @@ +package sflow + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUDPHeader(t *testing.T) { + octets := bytes.NewBuffer([]byte{ + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + 0x00, 0x00, // checksum + }) + + dc := NewDecoder() + actual, err := dc.decodeUDPHeader(octets) + require.NoError(t, err) + + expected := UDPHeader{ + SourcePort: 1, + DestinationPort: 2, + UDPLength: 3, + } + + require.Equal(t, expected, actual) +} + +func BenchmarkUDPHeader(b *testing.B) { + octets := bytes.NewBuffer([]byte{ + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + 0x00, 0x00, // checksum + }) + + dc := NewDecoder() + + b.ResetTimer() + for n := 0; n < b.N; n++ { + dc.decodeUDPHeader(octets) + } +} + +func TestIPv4Header(t *testing.T) { + octets := bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x11, // protocol; 0x11 = udp + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + 0x00, 0x00, // checksum + }, + ) + dc := NewDecoder() + actual, err := dc.decodeIPv4Header(octets) + require.NoError(t, err) + + expected := IPV4Header{ + Version: 0x40, + InternetHeaderLength: 0x05, + DSCP: 0, + ECN: 0, + TotalLength: 0, + Identification: 0, + Flags: 0, + FragmentOffset: 0, + TTL: 0, + Protocol: 0x11, + HeaderChecksum: 0, + SourceIP: [4]byte{127, 0, 0, 1}, + DestIP: [4]byte{127, 0, 0, 2}, + ProtocolHeader: UDPHeader{ + SourcePort: 1, + DestinationPort: 2, + UDPLength: 3, + Checksum: 0, + }, + } + + require.Equal(t, expected, actual) +} + +// Using the same Directive instance, prior paths through the parse tree should +// not affect the latest parse. +func TestIPv4HeaderSwitch(t *testing.T) { + octets := bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x11, // protocol; 0x11 = udp + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x03, // udp_length + 0x00, 0x00, // checksum + }, + ) + dc := NewDecoder() + _, err := dc.decodeIPv4Header(octets) + require.NoError(t, err) + + octets = bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x06, // protocol; 0x06 = tcp + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, 0x01, // src_port + 0x00, 0x02, // dst_port + 0x00, 0x00, 0x00, 0x00, // sequence + 0x00, 0x00, 0x00, 0x00, // ack_number + 0x00, 0x00, // tcp_header_length + 0x00, 0x00, // tcp_window_size + 0x00, 0x00, // checksum + 0x00, 0x00, // tcp_urgent_pointer + }, + ) + dc = NewDecoder() + actual, err := dc.decodeIPv4Header(octets) + require.NoError(t, err) + + expected := IPV4Header{ + Version: 64, + InternetHeaderLength: 5, + Protocol: 6, + SourceIP: [4]byte{127, 0, 0, 1}, + DestIP: [4]byte{127, 0, 0, 2}, + ProtocolHeader: TCPHeader{ + SourcePort: 1, + DestinationPort: 2, + }, + } + + require.Equal(t, expected, actual) +} + +func TestUnknownProtocol(t *testing.T) { + octets := bytes.NewBuffer( + []byte{ + 0x45, // version + IHL + 0x00, // ip_dscp + ip_ecn + 0x00, 0x00, // total length + 0x00, 0x00, // identification + 0x00, 0x00, // flags + frag offset + 0x00, // ttl + 0x99, // protocol + 0x00, 0x00, // header checksum + 0x7f, 0x00, 0x00, 0x01, // src ip + 0x7f, 0x00, 0x00, 0x02, // dst ip + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + 0x00, + }, + ) + dc := NewDecoder() + actual, err := dc.decodeIPv4Header(octets) + require.NoError(t, err) + + expected := IPV4Header{ + Version: 64, + InternetHeaderLength: 5, + Protocol: 153, + SourceIP: [4]byte{127, 0, 0, 1}, + DestIP: [4]byte{127, 0, 0, 2}, + } + + require.Equal(t, expected, actual) +} diff --git a/plugins/inputs/sflow/sflow.go b/plugins/inputs/sflow/sflow.go new file mode 100644 index 000000000..2e3fbc0cf --- /dev/null +++ b/plugins/inputs/sflow/sflow.go @@ -0,0 +1,158 @@ +package sflow + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "net/url" + "strings" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + ## Address to listen for sFlow packets. + ## example: service_address = "udp://:6343" + ## service_address = "udp4://:6343" + ## service_address = "udp6://:6343" + service_address = "udp://:6343" + + ## Set the size of the operating system's receive buffer. + ## example: read_buffer_size = "64KiB" + # read_buffer_size = "" +` + +const ( + maxPacketSize = 64 * 1024 +) + +type SFlow struct { + ServiceAddress string `toml:"service_address"` + ReadBufferSize internal.Size `toml:"read_buffer_size"` + + Log telegraf.Logger `toml:"-"` + + addr net.Addr + decoder *PacketDecoder + closer io.Closer + cancel context.CancelFunc + wg sync.WaitGroup +} + +// Description answers a description of this input plugin +func (s *SFlow) Description() string { + return "SFlow V5 Protocol Listener" +} + +// SampleConfig answers a sample configuration +func (s *SFlow) SampleConfig() string { + return sampleConfig +} + +func (s *SFlow) Init() error { + s.decoder = NewDecoder() + s.decoder.Log = s.Log + return nil +} + +// Start starts this sFlow listener listening on the configured network for sFlow packets +func (s *SFlow) Start(acc telegraf.Accumulator) error { + s.decoder.OnPacket(func(p *V5Format) { + metrics, err := makeMetrics(p) + if err != nil { + s.Log.Errorf("Failed to make metric from packet: %s", err) + return + } + for _, m := range metrics { + acc.AddMetric(m) + } + }) + + u, err := url.Parse(s.ServiceAddress) + if err != nil { + return err + } + + conn, err := listenUDP(u.Scheme, u.Host) + if err != nil { + return err + } + s.closer = conn + s.addr = conn.LocalAddr() + + if s.ReadBufferSize.Size > 0 { + conn.SetReadBuffer(int(s.ReadBufferSize.Size)) + } + + s.Log.Infof("Listening on %s://%s", s.addr.Network(), s.addr.String()) + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.read(acc, conn) + }() + + return nil +} + +// Gather is a NOOP for sFlow as it receives, asynchronously, sFlow network packets +func (s *SFlow) Gather(_ telegraf.Accumulator) error { + return nil +} + +func (s *SFlow) Stop() { + if s.closer != nil { + s.closer.Close() + } + s.wg.Wait() +} + +func (s *SFlow) Address() net.Addr { + return s.addr +} + +func (s *SFlow) read(acc telegraf.Accumulator, conn net.PacketConn) { + buf := make([]byte, maxPacketSize) + for { + n, _, err := conn.ReadFrom(buf) + if err != nil { + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + acc.AddError(err) + } + break + } + s.process(acc, buf[:n]) + } +} + +func (s *SFlow) process(acc telegraf.Accumulator, buf []byte) { + + if err := s.decoder.Decode(bytes.NewBuffer(buf)); err != nil { + acc.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) + } +} + +func listenUDP(network string, address string) (*net.UDPConn, error) { + switch network { + case "udp", "udp4", "udp6": + addr, err := net.ResolveUDPAddr(network, address) + if err != nil { + return nil, err + } + return net.ListenUDP(network, addr) + default: + return nil, fmt.Errorf("unsupported network type: %s", network) + } +} + +// init registers this SFlow input plug in with the Telegraf framework +func init() { + inputs.Add("sflow", func() telegraf.Input { + return &SFlow{} + }) +} diff --git a/plugins/inputs/sflow/sflow_test.go b/plugins/inputs/sflow/sflow_test.go new file mode 100644 index 000000000..2df56c2ae --- /dev/null +++ b/plugins/inputs/sflow/sflow_test.go @@ -0,0 +1,135 @@ +package sflow + +import ( + "encoding/hex" + "net" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSFlow(t *testing.T) { + sflow := &SFlow{ + ServiceAddress: "udp://127.0.0.1:0", + Log: testutil.Logger{}, + } + err := sflow.Init() + require.NoError(t, err) + + var acc testutil.Accumulator + err = sflow.Start(&acc) + require.NoError(t, err) + defer sflow.Stop() + + client, err := net.Dial(sflow.Address().Network(), sflow.Address().String()) + require.NoError(t, err) + + packetBytes, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") + require.NoError(t, err) + client.Write(packetBytes) + + acc.Wait(2) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "192.168.1.2", + "dst_ip": "192.168.9.10", + "dst_mac": "00:0c:29:36:d3:d6", + "dst_port": "47621", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "510", + "output_ifindex": "512", + "sample_direction": "ingress", + "source_id_index": "510", + "source_id_type": "0", + "src_ip": "192.168.9.19", + "src_mac": "94:c6:91:aa:97:60", + "src_port": "161", + }, + map[string]interface{}{ + "bytes": uint64(273408), + "drops": uint64(0), + "frame_length": uint64(267), + "header_length": uint64(128), + "ip_flags": uint64(2), + "ip_fragment_offset": uint64(0), + "ip_total_length": uint64(249), + "ip_ttl": uint64(64), + "sampling_rate": uint64(1024), + "udp_length": uint64(229), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "sflow", + map[string]string{ + "agent_address": "192.168.1.2", + "dst_ip": "192.168.9.10", + "dst_mac": "00:0c:29:36:d3:d6", + "dst_port": "514", + "ether_type": "IPv4", + "header_protocol": "ETHERNET-ISO88023", + "input_ifindex": "528", + "output_ifindex": "512", + "sample_direction": "ingress", + "source_id_index": "528", + "source_id_type": "0", + "src_ip": "192.168.8.21", + "src_mac": "fc:ec:da:44:00:8f", + "src_port": "39529", + }, + map[string]interface{}{ + "bytes": uint64(2473984), + "drops": uint64(0), + "frame_length": uint64(151), + "header_length": uint64(128), + "ip_flags": uint64(2), + "ip_fragment_offset": uint64(0), + "ip_total_length": uint64(129), + "ip_ttl": uint64(63), + "sampling_rate": uint64(16384), + "udp_length": uint64(109), + "ip_dscp": "0", + "ip_ecn": "0", + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) +} + +func BenchmarkSFlow(b *testing.B) { + sflow := &SFlow{ + ServiceAddress: "udp://127.0.0.1:0", + Log: testutil.Logger{}, + } + err := sflow.Init() + require.NoError(b, err) + + var acc testutil.Accumulator + err = sflow.Start(&acc) + require.NoError(b, err) + defer sflow.Stop() + + client, err := net.Dial(sflow.Address().Network(), sflow.Address().String()) + require.NoError(b, err) + + packetBytes, err := hex.DecodeString("0000000500000001c0a80102000000100000f3d40bfa047f0000000200000001000000d00001210a000001fe000004000484240000000000000001fe00000200000000020000000100000090000000010000010b0000000400000080000c2936d3d694c691aa97600800450000f9f19040004011b4f5c0a80913c0a8090a00a1ba0500e5641f3081da02010104066d6f746f6770a281cc02047b46462e0201000201003081bd3012060d2b06010201190501010281dc710201003013060d2b06010201190501010281e66802025acc3012060d2b0601020119050101000003e9000000100000000900000000000000090000000000000001000000d00000e3cc000002100000400048eb740000000000000002100000020000000002000000010000009000000001000000970000000400000080000c2936d3d6fcecda44008f81000009080045000081186440003f119098c0a80815c0a8090a9a690202006d23083c33303e4170722031312030393a33333a3031206b6e6f64653120736e6d70645b313039385d3a20436f6e6e656374696f6e2066726f6d205544503a205b3139322e3136382e392e31305d3a34393233362d000003e90000001000000009000000000000000900000000") + require.NoError(b, err) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + client.Write(packetBytes) + acc.Wait(2) + } +} diff --git a/plugins/inputs/sflow/types.go b/plugins/inputs/sflow/types.go new file mode 100644 index 000000000..a48857803 --- /dev/null +++ b/plugins/inputs/sflow/types.go @@ -0,0 +1,285 @@ +package sflow + +import ( + "net" + "strconv" +) + +const ( + AddressTypeIPv6 uint32 = 2 // sflow_version_5.txt line: 1384 + AddressTypeIPv4 uint32 = 1 // sflow_version_5.txt line: 1383 + + IPProtocolTCP uint8 = 6 + IPProtocolUDP uint8 = 17 + + metricName = "sflow" +) + +var ETypeMap = map[uint16]string{ + 0x0800: "IPv4", + 0x86DD: "IPv6", +} + +var IPvMap = map[uint32]string{ + 1: "IPV4", // sflow_version_5.txt line: 1383 + 2: "IPV6", // sflow_version_5.txt line: 1384 +} + +type ContainsMetricData interface { + GetTags() map[string]string + GetFields() map[string]interface{} +} + +// V5Format answers and decoder.Directive capable of decoding sFlow v5 packets in accordance +// with SFlow v5 specification at https://sflow.org/sflow_version_5.txt +type V5Format struct { + Version uint32 + AgentAddress net.IPAddr + SubAgentID uint32 + SequenceNumber uint32 + Uptime uint32 + Samples []Sample +} + +type SampleType uint32 + +const ( + SampleTypeFlowSample SampleType = 1 // sflow_version_5.txt line: 1614 + SampleTypeFlowSampleExpanded SampleType = 3 // sflow_version_5.txt line: 1698 +) + +type SampleData interface{} + +type Sample struct { + SampleType SampleType + SampleData SampleDataFlowSampleExpanded +} + +type SampleDataFlowSampleExpanded struct { + SequenceNumber uint32 + SourceIDType uint32 + SourceIDIndex uint32 + SamplingRate uint32 + SamplePool uint32 + Drops uint32 + SampleDirection string // ingress/egress + InputIfFormat uint32 + InputIfIndex uint32 + OutputIfFormat uint32 + OutputIfIndex uint32 + FlowRecords []FlowRecord +} + +type FlowFormatType uint32 + +const ( + FlowFormatTypeRawPacketHeader FlowFormatType = 1 // sflow_version_5.txt line: 1938 +) + +type FlowData ContainsMetricData + +type FlowRecord struct { + FlowFormat FlowFormatType + FlowData FlowData +} + +type HeaderProtocolType uint32 + +const ( + HeaderProtocolTypeEthernetISO88023 HeaderProtocolType = 1 + HeaderProtocolTypeISO88024TokenBus HeaderProtocolType = 2 + HeaderProtocolTypeISO88025TokenRing HeaderProtocolType = 3 + HeaderProtocolTypeFDDI HeaderProtocolType = 4 + HeaderProtocolTypeFrameRelay HeaderProtocolType = 5 + HeaderProtocolTypeX25 HeaderProtocolType = 6 + HeaderProtocolTypePPP HeaderProtocolType = 7 + HeaderProtocolTypeSMDS HeaderProtocolType = 8 + HeaderProtocolTypeAAL5 HeaderProtocolType = 9 + HeaderProtocolTypeAAL5IP HeaderProtocolType = 10 /* e.g. Cisco AAL5 mux */ + HeaderProtocolTypeIPv4 HeaderProtocolType = 11 + HeaderProtocolTypeIPv6 HeaderProtocolType = 12 + HeaderProtocolTypeMPLS HeaderProtocolType = 13 + HeaderProtocolTypePOS HeaderProtocolType = 14 /* RFC 1662, 2615 */ +) + +var HeaderProtocolMap = map[HeaderProtocolType]string{ + HeaderProtocolTypeEthernetISO88023: "ETHERNET-ISO88023", // sflow_version_5.txt line: 1920 +} + +type Header ContainsMetricData + +type RawPacketHeaderFlowData struct { + HeaderProtocol HeaderProtocolType + FrameLength uint32 + Bytes uint32 + StrippedOctets uint32 + HeaderLength uint32 + Header Header +} + +func (h RawPacketHeaderFlowData) GetTags() map[string]string { + t := h.Header.GetTags() + t["header_protocol"] = HeaderProtocolMap[h.HeaderProtocol] + return t +} +func (h RawPacketHeaderFlowData) GetFields() map[string]interface{} { + f := h.Header.GetFields() + f["bytes"] = h.Bytes + f["frame_length"] = h.FrameLength + f["header_length"] = h.HeaderLength + return f +} + +type IPHeader ContainsMetricData + +type EthHeader struct { + DestinationMAC [6]byte + SourceMAC [6]byte + TagProtocolIdentifier uint16 + TagControlInformation uint16 + EtherTypeCode uint16 + EtherType string + IPHeader IPHeader +} + +func (h EthHeader) GetTags() map[string]string { + t := h.IPHeader.GetTags() + t["src_mac"] = net.HardwareAddr(h.SourceMAC[:]).String() + t["dst_mac"] = net.HardwareAddr(h.DestinationMAC[:]).String() + t["ether_type"] = h.EtherType + return t +} +func (h EthHeader) GetFields() map[string]interface{} { + return h.IPHeader.GetFields() +} + +type ProtocolHeader ContainsMetricData + +// https://en.wikipedia.org/wiki/IPv4#Header +type IPV4Header struct { + Version uint8 // 4 bit + InternetHeaderLength uint8 // 4 bit + DSCP uint8 + ECN uint8 + TotalLength uint16 + Identification uint16 + Flags uint8 + FragmentOffset uint16 + TTL uint8 + Protocol uint8 // https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers + HeaderChecksum uint16 + SourceIP [4]byte + DestIP [4]byte + ProtocolHeader ProtocolHeader +} + +func (h IPV4Header) GetTags() map[string]string { + var t map[string]string + if h.ProtocolHeader != nil { + t = h.ProtocolHeader.GetTags() + } else { + t = map[string]string{} + } + t["src_ip"] = net.IP(h.SourceIP[:]).String() + t["dst_ip"] = net.IP(h.DestIP[:]).String() + return t +} +func (h IPV4Header) GetFields() map[string]interface{} { + var f map[string]interface{} + if h.ProtocolHeader != nil { + f = h.ProtocolHeader.GetFields() + } else { + f = map[string]interface{}{} + } + f["ip_dscp"] = strconv.FormatUint(uint64(h.DSCP), 10) + f["ip_ecn"] = strconv.FormatUint(uint64(h.ECN), 10) + f["ip_flags"] = h.Flags + f["ip_fragment_offset"] = h.FragmentOffset + f["ip_total_length"] = h.TotalLength + f["ip_ttl"] = h.TTL + return f +} + +// https://en.wikipedia.org/wiki/IPv6_packet +type IPV6Header struct { + DSCP uint8 + ECN uint8 + PayloadLength uint16 + NextHeaderProto uint8 // tcp/udp? + HopLimit uint8 + SourceIP [16]byte + DestIP [16]byte + ProtocolHeader ProtocolHeader +} + +func (h IPV6Header) GetTags() map[string]string { + var t map[string]string + if h.ProtocolHeader != nil { + t = h.ProtocolHeader.GetTags() + } else { + t = map[string]string{} + } + t["src_ip"] = net.IP(h.SourceIP[:]).String() + t["dst_ip"] = net.IP(h.DestIP[:]).String() + return t +} +func (h IPV6Header) GetFields() map[string]interface{} { + var f map[string]interface{} + if h.ProtocolHeader != nil { + f = h.ProtocolHeader.GetFields() + } else { + f = map[string]interface{}{} + } + f["ip_dscp"] = strconv.FormatUint(uint64(h.DSCP), 10) + f["ip_ecn"] = strconv.FormatUint(uint64(h.ECN), 10) + f["payload_length"] = h.PayloadLength + return f +} + +// https://en.wikipedia.org/wiki/Transmission_Control_Protocol +type TCPHeader struct { + SourcePort uint16 + DestinationPort uint16 + Sequence uint32 + AckNumber uint32 + TCPHeaderLength uint8 + Flags uint16 + TCPWindowSize uint16 + Checksum uint16 + TCPUrgentPointer uint16 +} + +func (h TCPHeader) GetTags() map[string]string { + t := map[string]string{ + "dst_port": strconv.FormatUint(uint64(h.DestinationPort), 10), + "src_port": strconv.FormatUint(uint64(h.SourcePort), 10), + } + return t +} +func (h TCPHeader) GetFields() map[string]interface{} { + return map[string]interface{}{ + "tcp_header_length": h.TCPHeaderLength, + "tcp_urgent_pointer": h.TCPUrgentPointer, + "tcp_window_size": h.TCPWindowSize, + } +} + +type UDPHeader struct { + SourcePort uint16 + DestinationPort uint16 + UDPLength uint16 + Checksum uint16 +} + +func (h UDPHeader) GetTags() map[string]string { + t := map[string]string{ + "dst_port": strconv.FormatUint(uint64(h.DestinationPort), 10), + "src_port": strconv.FormatUint(uint64(h.SourcePort), 10), + } + return t +} +func (h UDPHeader) GetFields() map[string]interface{} { + return map[string]interface{}{ + "udp_length": h.UDPLength, + } +} diff --git a/plugins/inputs/smart/README.md b/plugins/inputs/smart/README.md index 4826edbc6..47320aeac 100644 --- a/plugins/inputs/smart/README.md +++ b/plugins/inputs/smart/README.md @@ -3,6 +3,8 @@ Get metrics using the command line utility `smartctl` for S.M.A.R.T. (Self-Monitoring, Analysis and Reporting Technology) storage devices. SMART is a monitoring system included in computer hard disk drives (HDDs) and solid-state drives (SSDs)[1] that detects and reports on various indicators of drive reliability, with the intent of enabling the anticipation of hardware failures. See smartmontools (https://www.smartmontools.org/). +SMART information is separated between different measurements: `smart_device` is used for general information, while `smart_attribute` stores the detailed attribute information if `attributes = true` is enabled in the plugin configuration. + If no devices are specified, the plugin will scan for SMART devices via the following command: ``` @@ -24,52 +26,74 @@ To enable SMART on a storage device run: smartctl -s on ``` -### Configuration: +### Configuration ```toml # Read metrics from storage devices supporting S.M.A.R.T. [[inputs.smart]] ## Optionally specify the path to the smartctl executable # path = "/usr/bin/smartctl" - # + ## On most platforms smartctl requires root access. ## Setting 'use_sudo' to true will make use of sudo to run smartctl. ## Sudo must be configured to to allow the telegraf user to run smartctl - ## with out password. + ## without a password. # use_sudo = false - # + ## Skip checking disks in this power mode. Defaults to ## "standby" to not wake up disks that have stoped rotating. - ## See --nockeck in the man pages for smartctl. + ## See --nocheck in the man pages for smartctl. ## smartctl version 5.41 and 5.42 have faulty detection of ## power mode and might require changing this value to - ## "never" depending on your storage device. + ## "never" depending on your disks. # nocheck = "standby" - # - ## Gather detailed metrics for each SMART Attribute. - ## Defaults to "false" - ## + + ## Gather all returned S.M.A.R.T. attribute metrics and the detailed + ## information from each drive into the `smart_attribute` measurement. # attributes = false - # + ## Optionally specify devices to exclude from reporting. # excludes = [ "/dev/pass6" ] - # + ## Optionally specify devices and device type, if unset ## a scan (smartctl --scan) for S.M.A.R.T. devices will ## done and all found will be included except for the ## excluded in excludes. # devices = [ "/dev/ada0 -d atacam" ] + + ## Timeout for the smartctl command to complete. + # timeout = "30s" ``` -### Metrics: +### Permissions + +It's important to note that this plugin references smartctl, which may require additional permissions to execute successfully. +Depending on the user/group permissions of the telegraf user executing this plugin, you may need to use sudo. + + +You will need the following in your telegraf config: +```toml +[[inputs.smart]] + use_sudo = true +``` + +You will also need to update your sudoers file: +```bash +$ visudo +# Add the following line: +Cmnd_Alias SMARTCTL = /usr/bin/smartctl +telegraf ALL=(ALL) NOPASSWD: SMARTCTL +Defaults!SMARTCTL !logfile, !syslog, !pam_session +``` + +### Metrics - smart_device: - tags: - capacity - device - - device_model - enabled - - health + - model - serial_no - wwn - fields: @@ -82,10 +106,13 @@ smartctl -s on - smart_attribute: - tags: + - capacity - device + - enabled - fail - flags - id + - model - name - serial_no - wwn @@ -123,10 +150,24 @@ devices can be referenced by the WWN in the following location: To run `smartctl` with `sudo` create a wrapper script and use `path` in the configuration to execute that. -### Output +### Troubleshooting + +If this plugin is not working as expected for your SMART enabled device, +please run these commands and include the output in a bug report: +``` +smartctl --scan +``` + +Run the following command replacing your configuration setting for NOCHECK and +the DEVICE from the previous command: +``` +smartctl --info --health --attributes --tolerance=verypermissive --nocheck NOCHECK --format=brief -d DEVICE +``` + +### Example Output ``` smart_device,enabled=Enabled,host=mbpro.local,device=rdisk0,model=APPLE\ SSD\ SM0512F,serial_no=S1K5NYCD964433,wwn=5002538655584d30,capacity=500277790720 udma_crc_errors=0i,exit_status=0i,health_ok=true,read_error_rate=0i,temp_c=40i 1502536854000000000 -smart_attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=199,name=UDMA_CRC_Error_Count,flags=-O-RC-,fail=-,host=mbpro.local,device=rdisk0 threshold=0i,raw_value=0i,exit_status=0i,value=200i,worst=200i 1502536854000000000 -smart_attribute,device=rdisk0,serial_no=S1K5NYCD964433,wwn=5002538655584d30,id=240,name=Unknown_SSD_Attribute,flags=-O---K,fail=-,host=mbpro.local exit_status=0i,value=100i,worst=100i,threshold=0i,raw_value=0i 1502536854000000000 +smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O-RC-,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=UDMA_CRC_Error_Count,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=200i,worst=200i 1502536854000000000 +smart_attribute,capacity=500277790720,device=rdisk0,enabled=Enabled,fail=-,flags=-O---K,host=mbpro.local,id=199,model=APPLE\ SSD\ SM0512F,name=Unknown_SSD_Attribute,serial_no=S1K5NYCD964433,wwn=5002538655584d30 exit_status=0i,raw_value=0i,threshold=0i,value=100i,worst=100i 1502536854000000000 ``` diff --git a/plugins/inputs/smart/smart.go b/plugins/inputs/smart/smart.go index 46912d487..b34174a33 100644 --- a/plugins/inputs/smart/smart.go +++ b/plugins/inputs/smart/smart.go @@ -18,34 +18,97 @@ import ( ) var ( - execCommand = exec.Command // execCommand is used to mock commands in tests. - // Device Model: APPLE SSD SM256E - modelInInfo = regexp.MustCompile("^Device Model:\\s+(.*)$") + // Product: HUH721212AL5204 + // Model Number: TS128GMTE850 + modelInfo = regexp.MustCompile("^(Device Model|Product|Model Number):\\s+(.*)$") // Serial Number: S0X5NZBC422720 - serialInInfo = regexp.MustCompile("^Serial Number:\\s+(.*)$") + serialInfo = regexp.MustCompile("(?i)^Serial Number:\\s+(.*)$") // LU WWN Device Id: 5 002538 655584d30 - wwnInInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$") + wwnInfo = regexp.MustCompile("^LU WWN Device Id:\\s+(.*)$") // User Capacity: 251,000,193,024 bytes [251 GB] - usercapacityInInfo = regexp.MustCompile("^User Capacity:\\s+([0-9,]+)\\s+bytes.*$") + usercapacityInfo = regexp.MustCompile("^User Capacity:\\s+([0-9,]+)\\s+bytes.*$") // SMART support is: Enabled - smartEnabledInInfo = regexp.MustCompile("^SMART support is:\\s+(\\w+)$") + smartEnabledInfo = regexp.MustCompile("^SMART support is:\\s+(\\w+)$") // SMART overall-health self-assessment test result: PASSED + // SMART Health Status: OK // PASSED, FAILED, UNKNOWN - smartOverallHealth = regexp.MustCompile("^SMART overall-health self-assessment test result:\\s+(\\w+).*$") + smartOverallHealth = regexp.MustCompile("^(SMART overall-health self-assessment test result|SMART Health Status):\\s+(\\w+).*$") + + // sasNvmeAttr is a SAS or NVME SMART attribute + sasNvmeAttr = regexp.MustCompile(`^([^:]+):\s+(.+)$`) // ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE // 1 Raw_Read_Error_Rate -O-RC- 200 200 000 - 0 // 5 Reallocated_Sector_Ct PO--CK 100 100 000 - 0 // 192 Power-Off_Retract_Count -O--C- 097 097 000 - 14716 - attribute = regexp.MustCompile("^\\s*([0-9]+)\\s(\\S+)\\s+([-P][-O][-S][-R][-C][-K])\\s+([0-9]+)\\s+([0-9]+)\\s+([0-9]+)\\s+([-\\w]+)\\s+([\\w\\+\\.]+).*$") + attribute = regexp.MustCompile("^\\s*([0-9]+)\\s(\\S+)\\s+([-P][-O][-S][-R][-C][-K])\\s+([0-9]+)\\s+([0-9]+)\\s+([0-9-]+)\\s+([-\\w]+)\\s+([\\w\\+\\.]+).*$") deviceFieldIds = map[string]string{ "1": "read_error_rate", "7": "seek_error_rate", + "190": "temp_c", "194": "temp_c", "199": "udma_crc_errors", } + + sasNvmeAttributes = map[string]struct { + ID string + Name string + Parse func(fields, deviceFields map[string]interface{}, str string) error + }{ + "Accumulated start-stop cycles": { + ID: "4", + Name: "Start_Stop_Count", + }, + "Accumulated load-unload cycles": { + ID: "193", + Name: "Load_Cycle_Count", + }, + "Current Drive Temperature": { + ID: "194", + Name: "Temperature_Celsius", + Parse: parseTemperature, + }, + "Temperature": { + ID: "194", + Name: "Temperature_Celsius", + Parse: parseTemperature, + }, + "Power Cycles": { + ID: "12", + Name: "Power_Cycle_Count", + }, + "Power On Hours": { + ID: "9", + Name: "Power_On_Hours", + }, + "Media and Data Integrity Errors": { + Name: "Media_and_Data_Integrity_Errors", + }, + "Error Information Log Entries": { + Name: "Error_Information_Log_Entries", + }, + "Critical Warning": { + Name: "Critical_Warning", + Parse: func(fields, _ map[string]interface{}, str string) error { + var value int64 + if _, err := fmt.Sscanf(str, "0x%x", &value); err != nil { + return err + } + + fields["raw_value"] = value + + return nil + }, + }, + "Available Spare": { + Name: "Available_Spare", + Parse: func(fields, deviceFields map[string]interface{}, str string) error { + return parseCommaSeparatedInt(fields, deviceFields, strings.TrimSuffix(str, "%")) + }, + }, + } ) type Smart struct { @@ -55,18 +118,19 @@ type Smart struct { Excludes []string Devices []string UseSudo bool + Timeout internal.Duration } var sampleConfig = ` ## Optionally specify the path to the smartctl executable # path = "/usr/bin/smartctl" - # + ## On most platforms smartctl requires root access. ## Setting 'use_sudo' to true will make use of sudo to run smartctl. ## Sudo must be configured to to allow the telegraf user to run smartctl - ## with out password. + ## without a password. # use_sudo = false - # + ## Skip checking disks in this power mode. Defaults to ## "standby" to not wake up disks that have stoped rotating. ## See --nocheck in the man pages for smartctl. @@ -74,22 +138,30 @@ var sampleConfig = ` ## power mode and might require changing this value to ## "never" depending on your disks. # nocheck = "standby" - # - ## Gather detailed metrics for each SMART Attribute. - ## Defaults to "false" - ## + + ## Gather all returned S.M.A.R.T. attribute metrics and the detailed + ## information from each drive into the 'smart_attribute' measurement. # attributes = false - # + ## Optionally specify devices to exclude from reporting. # excludes = [ "/dev/pass6" ] - # + ## Optionally specify devices and device type, if unset ## a scan (smartctl --scan) for S.M.A.R.T. devices will ## done and all found will be included except for the ## excluded in excludes. # devices = [ "/dev/ada0 -d atacam" ] + + ## Timeout for the smartctl command to complete. + # timeout = "30s" ` +func NewSmart() *Smart { + return &Smart{ + Timeout: internal.Duration{Duration: time.Second * 30}, + } +} + func (m *Smart) SampleConfig() string { return sampleConfig } @@ -117,21 +189,19 @@ func (m *Smart) Gather(acc telegraf.Accumulator) error { } // Wrap with sudo -func sudo(sudo bool, command string, args ...string) *exec.Cmd { +var runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + cmd := exec.Command(command, args...) if sudo { - return execCommand("sudo", append([]string{"-n", command}, args...)...) + cmd = exec.Command("sudo", append([]string{"-n", command}, args...)...) } - - return execCommand(command, args...) + return internal.CombinedOutputTimeout(cmd, timeout.Duration) } // Scan for S.M.A.R.T. devices func (m *Smart) scan() ([]string, error) { - - cmd := sudo(m.UseSudo, m.Path, "--scan") - out, err := internal.CombinedOutputTimeout(cmd, time.Second*5) + out, err := runCmd(m.Timeout, m.UseSudo, m.Path, "--scan") if err != nil { - return []string{}, fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) + return []string{}, fmt.Errorf("failed to run command '%s --scan': %s - %s", m.Path, err, string(out)) } devices := []string{} @@ -158,12 +228,11 @@ func excludedDev(excludes []string, deviceLine string) bool { // Get info and attributes for each S.M.A.R.T. device func (m *Smart) getAttributes(acc telegraf.Accumulator, devices []string) { - var wg sync.WaitGroup wg.Add(len(devices)) for _, device := range devices { - go gatherDisk(acc, m.UseSudo, m.Attributes, m.Path, m.Nocheck, device, &wg) + go gatherDisk(acc, m.Timeout, m.UseSudo, m.Attributes, m.Path, m.Nocheck, device, &wg) } wg.Wait() @@ -180,81 +249,78 @@ func exitStatus(err error) (int, error) { return 0, err } -func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, smartctl, nockeck, device string, wg *sync.WaitGroup) { - +func gatherDisk(acc telegraf.Accumulator, timeout internal.Duration, usesudo, collectAttributes bool, smartctl, nocheck, device string, wg *sync.WaitGroup) { defer wg.Done() // smartctl 5.41 & 5.42 have are broken regarding handling of --nocheck/-n - args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nockeck, "--format=brief"} + args := []string{"--info", "--health", "--attributes", "--tolerance=verypermissive", "-n", nocheck, "--format=brief"} args = append(args, strings.Split(device, " ")...) - cmd := sudo(usesudo, smartctl, args...) - out, e := internal.CombinedOutputTimeout(cmd, time.Second*5) + out, e := runCmd(timeout, usesudo, smartctl, args...) outStr := string(out) // Ignore all exit statuses except if it is a command line parse error exitStatus, er := exitStatus(e) if er != nil { - acc.AddError(fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), e, outStr)) + acc.AddError(fmt.Errorf("failed to run command '%s %s': %s - %s", smartctl, strings.Join(args, " "), e, outStr)) return } - device_tags := map[string]string{} - device_node := strings.Split(device, " ")[0] - device_tags["device"] = path.Base(device_node) - device_fields := make(map[string]interface{}) - device_fields["exit_status"] = exitStatus + deviceTags := map[string]string{} + deviceNode := strings.Split(device, " ")[0] + deviceTags["device"] = path.Base(deviceNode) + deviceFields := make(map[string]interface{}) + deviceFields["exit_status"] = exitStatus scanner := bufio.NewScanner(strings.NewReader(outStr)) for scanner.Scan() { line := scanner.Text() - model := modelInInfo.FindStringSubmatch(line) - if len(model) > 1 { - device_tags["model"] = model[1] + model := modelInfo.FindStringSubmatch(line) + if len(model) > 2 { + deviceTags["model"] = model[2] } - serial := serialInInfo.FindStringSubmatch(line) + serial := serialInfo.FindStringSubmatch(line) if len(serial) > 1 { - device_tags["serial_no"] = serial[1] + deviceTags["serial_no"] = serial[1] } - wwn := wwnInInfo.FindStringSubmatch(line) + wwn := wwnInfo.FindStringSubmatch(line) if len(wwn) > 1 { - device_tags["wwn"] = strings.Replace(wwn[1], " ", "", -1) + deviceTags["wwn"] = strings.Replace(wwn[1], " ", "", -1) } - capacity := usercapacityInInfo.FindStringSubmatch(line) + capacity := usercapacityInfo.FindStringSubmatch(line) if len(capacity) > 1 { - device_tags["capacity"] = strings.Replace(capacity[1], ",", "", -1) + deviceTags["capacity"] = strings.Replace(capacity[1], ",", "", -1) } - enabled := smartEnabledInInfo.FindStringSubmatch(line) + enabled := smartEnabledInfo.FindStringSubmatch(line) if len(enabled) > 1 { - device_tags["enabled"] = enabled[1] + deviceTags["enabled"] = enabled[1] } health := smartOverallHealth.FindStringSubmatch(line) - if len(health) > 1 { - device_fields["health_ok"] = (health[1] == "PASSED") + if len(health) > 2 { + deviceFields["health_ok"] = (health[2] == "PASSED" || health[2] == "OK") + } + + tags := map[string]string{} + fields := make(map[string]interface{}) + + if collectAttributes { + keys := [...]string{"device", "model", "serial_no", "wwn", "capacity", "enabled"} + for _, key := range keys { + if value, ok := deviceTags[key]; ok { + tags[key] = value + } + } } attr := attribute.FindStringSubmatch(line) - if len(attr) > 1 { - - if attributes { - tags := map[string]string{} - fields := make(map[string]interface{}) - - device_node := strings.Split(device, " ")[0] - tags["device"] = path.Base(device_node) - - if serial, ok := device_tags["serial_no"]; ok { - tags["serial_no"] = serial - } - if wwn, ok := device_tags["wwn"]; ok { - tags["wwn"] = wwn - } + // attribute has been found, add it only if collectAttributes is true + if collectAttributes { tags["id"] = attr[1] tags["name"] = attr[2] tags["flags"] = attr[3] @@ -282,16 +348,39 @@ func gatherDisk(acc telegraf.Accumulator, usesudo, attributes bool, smartctl, no // save the raw value to a field. if field, ok := deviceFieldIds[attr[1]]; ok { if val, err := parseRawValue(attr[8]); err == nil { - device_fields[field] = val + deviceFields[field] = val + } + } + } else { + // what was found is not a vendor attribute + if matches := sasNvmeAttr.FindStringSubmatch(line); len(matches) > 2 { + if attr, ok := sasNvmeAttributes[matches[1]]; ok { + tags["name"] = attr.Name + if attr.ID != "" { + tags["id"] = attr.ID + } + + parse := parseCommaSeparatedInt + if attr.Parse != nil { + parse = attr.Parse + } + + if err := parse(fields, deviceFields, matches[2]); err != nil { + continue + } + // if the field is classified as an attribute, only add it + // if collectAttributes is true + if collectAttributes { + acc.AddFields("smart_attribute", fields, tags) + } } } } } - acc.AddFields("smart_device", device_fields, device_tags) + acc.AddFields("smart_device", deviceFields, deviceTags) } func parseRawValue(rawVal string) (int64, error) { - // Integer if i, err := strconv.ParseInt(rawVal, 10, 64); err == nil { return i, nil @@ -332,15 +421,37 @@ func parseInt(str string) int64 { return 0 } -func init() { - m := Smart{} - path, _ := exec.LookPath("smartctl") - if len(path) > 0 { - m.Path = path +func parseCommaSeparatedInt(fields, _ map[string]interface{}, str string) error { + i, err := strconv.ParseInt(strings.Replace(str, ",", "", -1), 10, 64) + if err != nil { + return err } - m.Nocheck = "standby" + fields["raw_value"] = i + + return nil +} + +func parseTemperature(fields, deviceFields map[string]interface{}, str string) error { + var temp int64 + if _, err := fmt.Sscanf(str, "%d C", &temp); err != nil { + return err + } + + fields["raw_value"] = temp + deviceFields["temp_c"] = temp + + return nil +} + +func init() { inputs.Add("smart", func() telegraf.Input { - return &m + m := NewSmart() + path, _ := exec.LookPath("smartctl") + if len(path) > 0 { + m.Path = path + } + m.Nocheck = "standby" + return m }) } diff --git a/plugins/inputs/smart/smart_test.go b/plugins/inputs/smart/smart_test.go index da658f5f9..3ea6e309f 100644 --- a/plugins/inputs/smart/smart_test.go +++ b/plugins/inputs/smart/smart_test.go @@ -1,19 +1,653 @@ package smart import ( - "fmt" - "os" - "os/exec" + "errors" + "sync" "testing" + "time" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func TestGatherAttributes(t *testing.T) { + s := NewSmart() + s.Path = "smartctl" + s.Attributes = true + + assert.Equal(t, time.Second*30, s.Timeout.Duration) + + var acc testutil.Accumulator + + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + if len(args) > 0 { + if args[0] == "--scan" { + return []byte(mockScanData), nil + } else if args[0] == "--info" { + return []byte(mockInfoAttributeData), nil + } + } + return nil, errors.New("command not found") + } + + err := s.Gather(&acc) + + require.NoError(t, err) + assert.Equal(t, 65, acc.NFields(), "Wrong number of fields gathered") + + var testsAda0Attributes = []struct { + fields map[string]interface{} + tags map[string]string + }{ + { + map[string]interface{}{ + "value": int64(200), + "worst": int64(200), + "threshold": int64(0), + "raw_value": int64(0), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "1", + "name": "Raw_Read_Error_Rate", + "flags": "-O-RC-", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(100), + "worst": int64(100), + "threshold": int64(0), + "raw_value": int64(0), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "5", + "name": "Reallocated_Sector_Ct", + "flags": "PO--CK", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(99), + "worst": int64(99), + "threshold": int64(0), + "raw_value": int64(2988), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "9", + "name": "Power_On_Hours", + "flags": "-O--CK", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(85), + "worst": int64(85), + "threshold": int64(0), + "raw_value": int64(14879), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "12", + "name": "Power_Cycle_Count", + "flags": "-O--CK", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(253), + "worst": int64(253), + "threshold": int64(10), + "raw_value": int64(2044932921600), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "169", + "name": "Unknown_Attribute", + "flags": "PO--C-", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(185), + "worst": int64(185), + "threshold": int64(100), + "raw_value": int64(957808640337), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "173", + "name": "Wear_Leveling_Count", + "flags": "-O--CK", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(55), + "worst": int64(40), + "threshold": int64(45), + "raw_value": int64(45), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "190", + "name": "Airflow_Temperature_Cel", + "flags": "-O---K", + "fail": "Past", + }, + }, + { + map[string]interface{}{ + "value": int64(97), + "worst": int64(97), + "threshold": int64(0), + "raw_value": int64(14716), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "192", + "name": "Power-Off_Retract_Count", + "flags": "-O--C-", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(66), + "worst": int64(21), + "threshold": int64(0), + "raw_value": int64(34), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "194", + "name": "Temperature_Celsius", + "flags": "-O---K", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(100), + "worst": int64(100), + "threshold": int64(0), + "raw_value": int64(0), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "197", + "name": "Current_Pending_Sector", + "flags": "-O---K", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(200), + "worst": int64(200), + "threshold": int64(0), + "raw_value": int64(0), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "199", + "name": "UDMA_CRC_Error_Count", + "flags": "-O-RC-", + "fail": "-", + }, + }, + { + map[string]interface{}{ + "value": int64(100), + "worst": int64(253), + "threshold": int64(0), + "raw_value": int64(23709323), + "exit_status": int(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + "id": "240", + "name": "Head_Flying_Hours", + "flags": "------", + "fail": "-", + }, + }, + } + + for _, test := range testsAda0Attributes { + acc.AssertContainsTaggedFields(t, "smart_attribute", test.fields, test.tags) + } + + var testsAda0Device = []struct { + fields map[string]interface{} + tags map[string]string + }{ + { + map[string]interface{}{ + "exit_status": int(0), + "health_ok": bool(true), + "read_error_rate": int64(0), + "temp_c": int64(34), + "udma_crc_errors": int64(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + }, + }, + } + + for _, test := range testsAda0Device { + acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) + } +} + +func TestGatherNoAttributes(t *testing.T) { + s := NewSmart() + s.Path = "smartctl" + s.Attributes = false + + assert.Equal(t, time.Second*30, s.Timeout.Duration) + + // overwriting exec commands with mock commands + var acc testutil.Accumulator + + err := s.Gather(&acc) + + require.NoError(t, err) + assert.Equal(t, 5, acc.NFields(), "Wrong number of fields gathered") + acc.AssertDoesNotContainMeasurement(t, "smart_attribute") + + var testsAda0Device = []struct { + fields map[string]interface{} + tags map[string]string + }{ + { + map[string]interface{}{ + "exit_status": int(0), + "health_ok": bool(true), + "read_error_rate": int64(0), + "temp_c": int64(34), + "udma_crc_errors": int64(0), + }, + map[string]string{ + "device": "ada0", + "model": "APPLE SSD SM256E", + "serial_no": "S0X5NZBC422720", + "wwn": "5002538043584d30", + "enabled": "Enabled", + "capacity": "251000193024", + }, + }, + } + + for _, test := range testsAda0Device { + acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) + } +} + +func TestExcludedDev(t *testing.T) { + assert.Equal(t, true, excludedDev([]string{"/dev/pass6"}, "/dev/pass6 -d atacam"), "Should be excluded.") + assert.Equal(t, false, excludedDev([]string{}, "/dev/pass6 -d atacam"), "Shouldn't be excluded.") + assert.Equal(t, false, excludedDev([]string{"/dev/pass6"}, "/dev/pass1 -d atacam"), "Shouldn't be excluded.") +} + +func TestGatherSATAInfo(t *testing.T) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(hgstSATAInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + assert.Equal(t, 101, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(20), acc.NMetrics(), "Wrong number of metrics gathered") +} + +func TestGatherSATAInfo65(t *testing.T) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(hgstSATAInfoData65), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + assert.Equal(t, 91, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(18), acc.NMetrics(), "Wrong number of metrics gathered") +} + +func TestGatherHgstSAS(t *testing.T) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(hgstSASInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + assert.Equal(t, 6, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(4), acc.NMetrics(), "Wrong number of metrics gathered") +} + +func TestGatherHtSAS(t *testing.T) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(htSASInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "smart_attribute", + map[string]string{ + "device": ".", + "serial_no": "PDWAR9GE", + "enabled": "Enabled", + "id": "194", + "model": "HUC103030CSS600", + "name": "Temperature_Celsius", + }, + map[string]interface{}{ + "raw_value": 36, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "smart_attribute", + map[string]string{ + "device": ".", + "serial_no": "PDWAR9GE", + "enabled": "Enabled", + "id": "4", + "model": "HUC103030CSS600", + "name": "Start_Stop_Count", + }, + map[string]interface{}{ + "raw_value": 47, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "smart_device", + map[string]string{ + "device": ".", + "serial_no": "PDWAR9GE", + "enabled": "Enabled", + "model": "HUC103030CSS600", + }, + map[string]interface{}{ + "exit_status": 0, + "health_ok": true, + "temp_c": 36, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.SortMetrics(), testutil.IgnoreTime()) +} + +func TestGatherSSD(t *testing.T) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(ssdInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + assert.Equal(t, 105, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(26), acc.NMetrics(), "Wrong number of metrics gathered") +} + +func TestGatherSSDRaid(t *testing.T) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(ssdRaidInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + assert.Equal(t, 74, acc.NFields(), "Wrong number of fields gathered") + assert.Equal(t, uint64(15), acc.NMetrics(), "Wrong number of metrics gathered") +} + +func TestGatherNvme(t *testing.T) { + runCmd = func(timeout internal.Duration, sudo bool, command string, args ...string) ([]byte, error) { + return []byte(nvmeInfoData), nil + } + + var ( + acc = &testutil.Accumulator{} + wg = &sync.WaitGroup{} + ) + + wg.Add(1) + gatherDisk(acc, internal.Duration{Duration: time.Second * 30}, true, true, "", "", "", wg) + + expected := []telegraf.Metric{ + testutil.MustMetric("smart_device", + map[string]string{ + "device": ".", + "model": "TS128GMTE850", + "serial_no": "D704940282?", + }, + map[string]interface{}{ + "exit_status": 0, + "health_ok": true, + "temp_c": 38, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "id": "9", + "name": "Power_On_Hours", + "serial_no": "D704940282?", + "model": "TS128GMTE850", + }, + map[string]interface{}{ + "raw_value": 6038, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "id": "12", + "name": "Power_Cycle_Count", + "serial_no": "D704940282?", + "model": "TS128GMTE850", + }, + map[string]interface{}{ + "raw_value": 472, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "name": "Media_and_Data_Integrity_Errors", + "serial_no": "D704940282?", + "model": "TS128GMTE850", + }, + map[string]interface{}{ + "raw_value": 0, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "name": "Error_Information_Log_Entries", + "serial_no": "D704940282?", + "model": "TS128GMTE850", + }, + map[string]interface{}{ + "raw_value": 119699, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "name": "Available_Spare", + "serial_no": "D704940282?", + "model": "TS128GMTE850", + }, + map[string]interface{}{ + "raw_value": 100, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "id": "194", + "name": "Temperature_Celsius", + "serial_no": "D704940282?", + "model": "TS128GMTE850", + }, + map[string]interface{}{ + "raw_value": 38, + }, + time.Now(), + ), + testutil.MustMetric("smart_attribute", + map[string]string{ + "device": ".", + "name": "Critical_Warning", + "serial_no": "D704940282?", + "model": "TS128GMTE850", + }, + map[string]interface{}{ + "raw_value": int64(9), + }, + time.Now(), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics(), testutil.IgnoreTime()) +} + +// smartctl output var ( + // smartctl --scan mockScanData = `/dev/ada0 -d atacam # /dev/ada0, ATA device ` + // smartctl --info --health --attributes --tolerance=verypermissive -n standby --format=brief [DEVICE] mockInfoAttributeData = `smartctl 6.5 2016-05-07 r4318 [Darwin 16.4.0 x86_64] (local build) Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org @@ -59,368 +693,403 @@ ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE |||____ S speed/performance ||_____ O updated online |______ P prefailure warning +` + + htSASInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.18-12-pve] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smar$montools.org + +=== START OF INFORMATION SECTION === +Vendor: HITACHI +Product: HUC103030CSS600 +Revision: J350 +Compliance: SPC-4 +User Capacity: 300,$00,000,000 bytes [300 GB] +Logical block size: 512 bytes +Rotation Rate: 10020 rpm +Form Factor: 2.5 inches +Logical Unit id: 0x5000cca00a4bdbc8 +Serial number: PDWAR9GE +Devicetype: disk +Transport protocol: SAS (SPL-3) +Local Time is: Wed Apr 17 15:01:28 2019 PDT +SMART support is: Available - device has SMART capability. +SMART support is: Enabled +Temperature Warning: Disabled or Not Supported + +=== START OF READ SMART DATA SECTION === +SMART Health Status: OK + +Current Drive Temperature: 36 C +Drive Trip Temperature: 85 C + +Manufactured in $eek 52 of year 2009 +Specified cycle count over device lifetime: 50000 +Accumulated start-stop cycles: 47 +Elements in grown defect list: 0 + +Vendor (Seagate) cache information + Blocks sent to initiator= 7270983270400000 +` + + hgstSASInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.0-46-generic] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Vendor: HGST +Product: HUH721212AL5204 +Revision: C3Q1 +Compliance: SPC-4 +User Capacity: 12,000,138,625,024 bytes [12.0 TB] +Logical block size: 512 bytes +Physical block size: 4096 bytes +LU is fully provisioned +Rotation Rate: 7200 rpm +Form Factor: 3.5 inches +Logical Unit id: 0x5000cca27076bfe8 +Serial number: 8HJ39K3H +Device type: disk +Transport protocol: SAS (SPL-3) +Local Time is: Thu Apr 18 13:25:03 2019 MSK +SMART support is: Available - device has SMART capability. +SMART support is: Enabled +Temperature Warning: Enabled + +=== START OF READ SMART DATA SECTION === +SMART Health Status: OK + +Current Drive Temperature: 34 C +Drive Trip Temperature: 85 C + +Manufactured in week 35 of year 2018 +Specified cycle count over device lifetime: 50000 +Accumulated start-stop cycles: 7 +Specified load-unload count over device lifetime: 600000 +Accumulated load-unload cycles: 39 +Elements in grown defect list: 0 + +Vendor (Seagate) cache information + Blocks sent to initiator = 544135446528 +` + + hgstSATAInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.0-46-generic] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Model Family: Hitachi/HGST Travelstar Z7K500 +Device Model: HGST HTE725050A7E630 +Serial Number: RCE50G20G81S9S +LU WWN Device Id: 5 000cca 90bc3a98b +Firmware Version: GS2OA3E0 +User Capacity: 500,107,862,016 bytes [500 GB] +Sector Sizes: 512 bytes logical, 4096 bytes physical +Rotation Rate: 7200 rpm +Form Factor: 2.5 inches +Device is: In smartctl database [for details use: -P show] +ATA Version is: ATA8-ACS T13/1699-D revision 6 +SATA Version is: SATA 2.6, 6.0 Gb/s (current: 6.0 Gb/s) +Local Time is: Thu Apr 18 13:27:51 2019 MSK +SMART support is: Available - device has SMART capability. +SMART support is: Enabled +Power mode is: ACTIVE or IDLE + +=== START OF READ SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +SMART Attributes Data Structure revision number: 16 +Vendor Specific SMART Attributes with Thresholds: +ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + 1 Raw_Read_Error_Rate PO-R-- 100 100 062 - 0 + 2 Throughput_Performance P-S--- 100 100 040 - 0 + 3 Spin_Up_Time POS--- 100 100 033 - 1 + 4 Start_Stop_Count -O--C- 100 100 000 - 4 + 5 Reallocated_Sector_Ct PO--CK 100 100 005 - 0 + 7 Seek_Error_Rate PO-R-- 100 100 067 - 0 + 8 Seek_Time_Performance P-S--- 100 100 040 - 0 + 9 Power_On_Hours -O--C- 099 099 000 - 743 + 10 Spin_Retry_Count PO--C- 100 100 060 - 0 + 12 Power_Cycle_Count -O--CK 100 100 000 - 4 +191 G-Sense_Error_Rate -O-R-- 100 100 000 - 0 +192 Power-Off_Retract_Count -O--CK 100 100 000 - 2 +193 Load_Cycle_Count -O--C- 100 100 000 - 13 +194 Temperature_Celsius -O---- 250 250 000 - 24 (Min/Max 15/29) +196 Reallocated_Event_Count -O--CK 100 100 000 - 0 +197 Current_Pending_Sector -O---K 100 100 000 - 0 +198 Offline_Uncorrectable ---R-- 100 100 000 - 0 +199 UDMA_CRC_Error_Count -O-R-- 200 200 000 - 0 +223 Load_Retry_Count -O-R-- 100 100 000 - 0 + ||||||_ K auto-keep + |||||__ C event count + ||||___ R error rate + |||____ S speed/performance + ||_____ O updated online + |______ P prefailure warning +` + + hgstSATAInfoData65 = `smartctl 6.5 2016-01-24 r4214 [x86_64-linux-4.4.0-145-generic] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Model Family: HGST Deskstar NAS +Device Model: HGST HDN724040ALE640 +Serial Number: PK1334PEK49SBS +LU WWN Device Id: 5 000cca 250ec3c9c +Firmware Version: MJAOA5E0 +User Capacity: 4,000,787,030,016 bytes [4.00 TB] +Sector Sizes: 512 bytes logical, 4096 bytes physical +Rotation Rate: 7200 rpm +Form Factor: 3.5 inches +Device is: In smartctl database [for details use: -P show] +ATA Version is: ATA8-ACS T13/1699-D revision 4 +SATA Version is: SATA 3.0, 6.0 Gb/s (current: 6.0 Gb/s) +Local Time is: Wed Apr 17 15:14:27 2019 PDT +SMART support is: Available - device has SMART capability. +SMART support is: Enabled +Power mode is: ACTIVE or IDLE + +=== START OF READ SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +SMART Attributes Data Structure revision number: 16 +Vendor Specific SMART Attributes with Thresholds: +ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + 1 Raw_Read_Error_Rate PO-R-- 100 100 016 - 0 + 2 Throughput_Performance P-S--- 135 135 054 - 84 + 3 Spin_Up_Time POS--- 125 125 024 - 621 (Average 619) + 4 Start_Stop_Count -O--C- 100 100 000 - 33 + 5 Reallocated_Sector_Ct PO--CK 100 100 005 - 0 + 7 Seek_Error_Rate PO-R-- 100 100 067 - 0 + 8 Seek_Time_Performance P-S--- 119 119 020 - 35 + 9 Power_On_Hours -O--C- 098 098 000 - 19371 + 10 Spin_Retry_Count PO--C- 100 100 060 - 0 + 12 Power_Cycle_Count -O--CK 100 100 000 - 33 +192 Power-Off_Retract_Count -O--CK 100 100 000 - 764 +193 Load_Cycle_Count -O--C- 100 100 000 - 764 +194 Temperature_Celsius -O---- 176 176 000 - 34 (Min/Max 21/53) +196 Reallocated_Event_Count -O--CK 100 100 000 - 0 +197 Current_Pending_Sector -O---K 100 100 000 - 0 +198 Offline_Uncorrectable ---R-- 100 100 000 - 0 +199 UDMA_CRC_Error_Count -O-R-- 200 200 000 - 0 + ||||||_ K auto-keep + |||||__ C event count + ||||___ R error rate + |||____ S speed/performance + ||_____ O updated online + |______ P prefailure warning +` + + ssdInfoData = `smartctl 6.6 2016-05-31 r4324 [x86_64-linux-4.15.0-33-generic] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Device Model: SanDisk Ultra II 240GB +Serial Number: XXXXXXXX +LU WWN Device Id: XXXXXXXX +Firmware Version: XXXXXXX +User Capacity: 240.057.409.536 bytes [240 GB] +Sector Size: 512 bytes logical/physical +Rotation Rate: Solid State Device +Form Factor: 2.5 inches +Device is: Not in smartctl database [for details use: -P showall] +ATA Version is: ACS-2 T13/2015-D revision 3 +SATA Version is: SATA 3.2, 6.0 Gb/s (current: 6.0 Gb/s) +Local Time is: Mon Sep 17 13:22:19 2018 CEST +SMART support is: Available - device has SMART capability. +SMART support is: Enabled +Power mode is: ACTIVE or IDLE + +=== START OF READ SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +SMART Attributes Data Structure revision number: 4 +Vendor Specific SMART Attributes with Thresholds: +ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + 5 Reallocated_Sector_Ct -O--CK 100 100 --- - 0 + 9 Power_On_Hours -O--CK 100 100 --- - 6383 + 12 Power_Cycle_Count -O--CK 100 100 --- - 19 +165 Unknown_Attribute -O--CK 100 100 --- - 59310806 +166 Unknown_Attribute -O--CK 100 100 --- - 1 +167 Unknown_Attribute -O--CK 100 100 --- - 57 +168 Unknown_Attribute -O--CK 100 100 --- - 43 +169 Unknown_Attribute -O--CK 100 100 --- - 221 +170 Unknown_Attribute -O--CK 100 100 --- - 0 +171 Unknown_Attribute -O--CK 100 100 --- - 0 +172 Unknown_Attribute -O--CK 100 100 --- - 0 +173 Unknown_Attribute -O--CK 100 100 --- - 13 +174 Unknown_Attribute -O--CK 100 100 --- - 4 +184 End-to-End_Error -O--CK 100 100 --- - 0 +187 Reported_Uncorrect -O--CK 100 100 --- - 0 +188 Command_Timeout -O--CK 100 100 --- - 0 +194 Temperature_Celsius -O---K 066 065 --- - 34 (Min/Max 19/65) +199 UDMA_CRC_Error_Count -O--CK 100 100 --- - 0 +230 Unknown_SSD_Attribute -O--CK 100 100 --- - 2229110374919 +232 Available_Reservd_Space PO--CK 100 100 004 - 100 +233 Media_Wearout_Indicator -O--CK 100 100 --- - 3129 +234 Unknown_Attribute -O--CK 100 100 --- - 7444 +241 Total_LBAs_Written ----CK 253 253 --- - 4812 +242 Total_LBAs_Read ----CK 253 253 --- - 671 +244 Unknown_Attribute -O--CK 000 100 --- - 0 + ||||||_ K auto-keep + |||||__ C event count + ||||___ R error rate + |||____ S speed/performance + ||_____ O updated online + |______ P prefailure warning +` + ssdRaidInfoData = `smartctl 6.6 2017-11-05 r4594 [FreeBSD 11.1-RELEASE-p13 amd64] (local build) +Copyright (C) 2002-17, Bruce Allen, Christian Franke, www.smartmontools.org + +CHECK POWER MODE: incomplete response, ATA output registers missing +CHECK POWER MODE not implemented, ignoring -n option +=== START OF INFORMATION SECTION === +Model Family: Samsung based SSDs +Device Model: Samsung SSD 850 PRO 256GB +Serial Number: S251NX0H869353L +LU WWN Device Id: 5 002538 84027f72f +Firmware Version: EXM02B6Q +User Capacity: 256 060 514 304 bytes [256 GB] +Sector Size: 512 bytes logical/physical +Rotation Rate: Solid State Device +Device is: In smartctl database [for details use: -P show] +ATA Version is: ACS-2, ATA8-ACS T13/1699-D revision 4c +SATA Version is: SATA 3.1, 6.0 Gb/s (current: 6.0 Gb/s) +Local Time is: Fri Sep 21 17:49:16 2018 CEST +SMART support is: Available - device has SMART capability. +SMART support is: Enabled + +=== START OF READ SMART DATA SECTION === +SMART Status not supported: Incomplete response, ATA output registers missing +SMART overall-health self-assessment test result: PASSED +Warning: This result is based on an Attribute check. + +General SMART Values: +Offline data collection status: (0x00) Offline data collection activity + was never started. + Auto Offline Data Collection: Disabled. +Self-test execution status: ( 0) The previous self-test routine completed + without error or no self-test has ever + been run. +Total time to complete Offline +data collection: ( 0) seconds. +Offline data collection +capabilities: (0x53) SMART execute Offline immediate. + Auto Offline data collection on/off support. + Suspend Offline collection upon new + command. + No Offline surface scan supported. + Self-test supported. + No Conveyance Self-test supported. + Selective Self-test supported. +SMART capabilities: (0x0003) Saves SMART data before entering + power-saving mode. + Supports SMART auto save timer. +Error logging capability: (0x01) Error logging supported. + General Purpose Logging supported. +Short self-test routine +recommended polling time: ( 2) minutes. +Extended self-test routine +recommended polling time: ( 136) minutes. +SCT capabilities: (0x003d) SCT Status supported. + SCT Error Recovery Control supported. + SCT Feature Control supported. + SCT Data Table supported. + +SMART Attributes Data Structure revision number: 1 +Vendor Specific SMART Attributes with Thresholds: +ID# ATTRIBUTE_NAME FLAGS VALUE WORST THRESH FAIL RAW_VALUE + 5 Reallocated_Sector_Ct PO--CK 099 099 010 - 1 + 9 Power_On_Hours -O--CK 094 094 000 - 26732 + 12 Power_Cycle_Count -O--CK 099 099 000 - 51 +177 Wear_Leveling_Count PO--C- 001 001 000 - 7282 +179 Used_Rsvd_Blk_Cnt_Tot PO--C- 099 099 010 - 1 +181 Program_Fail_Cnt_Total -O--CK 100 100 010 - 0 +182 Erase_Fail_Count_Total -O--CK 099 099 010 - 1 +183 Runtime_Bad_Block PO--C- 099 099 010 - 1 +187 Uncorrectable_Error_Cnt -O--CK 100 100 000 - 0 +190 Airflow_Temperature_Cel -O--CK 081 069 000 - 19 +195 ECC_Error_Rate -O-RC- 200 200 000 - 0 +199 CRC_Error_Count -OSRCK 100 100 000 - 0 +235 POR_Recovery_Count -O--C- 099 099 000 - 50 +241 Total_LBAs_Written -O--CK 099 099 000 - 61956393677 + ||||||_ K auto-keep + |||||__ C event count + ||||___ R error rate + |||____ S speed/performance + ||_____ O updated online + |______ P prefailure warning + +SMART Error Log Version: 1 +No Errors Logged + +SMART Self-test log structure revision number 1 +Num Test_Description Status Remaining LifeTime(hours) LBA_of_first_error +# 1 Short offline Completed without error 00% 26717 - +# 2 Short offline Completed without error 00% 26693 - +# 3 Short offline Completed without error 00% 26669 - +# 4 Short offline Completed without error 00% 26645 - +# 5 Short offline Completed without error 00% 26621 - +# 6 Short offline Completed without error 00% 26596 - +# 7 Extended offline Completed without error 00% 26574 - +# 8 Short offline Completed without error 00% 26572 - +# 9 Short offline Completed without error 00% 26548 - +#10 Short offline Completed without error 00% 26524 - +#11 Short offline Completed without error 00% 26500 - +#12 Short offline Completed without error 00% 26476 - +#13 Short offline Completed without error 00% 26452 - +#14 Short offline Completed without error 00% 26428 - +#15 Extended offline Completed without error 00% 26406 - +#16 Short offline Completed without error 00% 26404 - +#17 Short offline Completed without error 00% 26380 - +#18 Short offline Completed without error 00% 26356 - +#19 Short offline Completed without error 00% 26332 - +#20 Short offline Completed without error 00% 26308 - + +SMART Selective self-test log data structure revision number 1 + SPAN MIN_LBA MAX_LBA CURRENT_TEST_STATUS + 1 0 0 Not_testing + 2 0 0 Not_testing + 3 0 0 Not_testing + 4 0 0 Not_testing + 5 0 0 Not_testing +Selective self-test flags (0x0): + After scanning selected spans, do NOT read-scan remainder of disk. +If Selective self-test is pending on power-up, resume after 0 minute delay. +` + + nvmeInfoData = `smartctl 6.5 2016-05-07 r4318 [x86_64-linux-4.1.27-gvt-yocto-standard] (local build) +Copyright (C) 2002-16, Bruce Allen, Christian Franke, www.smartmontools.org + +=== START OF INFORMATION SECTION === +Model Number: TS128GMTE850 +Serial Number: D704940282? +Firmware Version: C2.3.13 +PCI Vendor/Subsystem ID: 0x126f +IEEE OUI Identifier: 0x000000 +Controller ID: 1 +Number of Namespaces: 1 +Namespace 1 Size/Capacity: 128,035,676,160 [128 GB] +Namespace 1 Formatted LBA Size: 512 +Local Time is: Fri Jun 15 11:41:35 2018 UTC + +=== START OF SMART DATA SECTION === +SMART overall-health self-assessment test result: PASSED + +SMART/Health Information (NVMe Log 0x02, NSID 0xffffffff) +Critical Warning: 0x09 +Temperature: 38 Celsius +Available Spare: 100% +Available Spare Threshold: 10% +Percentage Used: 16% +Data Units Read: 11,836,935 [6.06 TB] +Data Units Written: 62,288,091 [31.8 TB] +Host Read Commands: 135,924,188 +Host Write Commands: 7,715,573,429 +Controller Busy Time: 4,042 +Power Cycles: 472 +Power On Hours: 6,038 +Unsafe Shutdowns: 355 +Media and Data Integrity Errors: 0 +Error Information Log Entries: 119,699 +Warning Comp. Temperature Time: 0 +Critical Comp. Temperature Time: 0 ` ) - -func TestGatherAttributes(t *testing.T) { - s := &Smart{ - Path: "smartctl", - Attributes: true, - } - // overwriting exec commands with mock commands - execCommand = fakeExecCommand - var acc testutil.Accumulator - - err := s.Gather(&acc) - - require.NoError(t, err) - assert.Equal(t, 65, acc.NFields(), "Wrong number of fields gathered") - - var testsAda0Attributes = []struct { - fields map[string]interface{} - tags map[string]string - }{ - { - map[string]interface{}{ - "value": int64(200), - "worst": int64(200), - "threshold": int64(0), - "raw_value": int64(0), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "1", - "name": "Raw_Read_Error_Rate", - "flags": "-O-RC-", - "fail": "-", - }, - }, - { - map[string]interface{}{ - "value": int64(100), - "worst": int64(100), - "threshold": int64(0), - "raw_value": int64(0), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "5", - "name": "Reallocated_Sector_Ct", - "flags": "PO--CK", - "fail": "-", - }, - }, - { - map[string]interface{}{ - "value": int64(99), - "worst": int64(99), - "threshold": int64(0), - "raw_value": int64(2988), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "9", - "name": "Power_On_Hours", - "flags": "-O--CK", - "fail": "-", - }, - }, - { - map[string]interface{}{ - "value": int64(85), - "worst": int64(85), - "threshold": int64(0), - "raw_value": int64(14879), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "12", - "name": "Power_Cycle_Count", - "flags": "-O--CK", - "fail": "-", - }, - }, - { - map[string]interface{}{ - "value": int64(253), - "worst": int64(253), - "threshold": int64(10), - "raw_value": int64(2044932921600), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "169", - "name": "Unknown_Attribute", - "flags": "PO--C-", - "fail": "-", - }, - }, - { - map[string]interface{}{ - "value": int64(185), - "worst": int64(185), - "threshold": int64(100), - "raw_value": int64(957808640337), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "173", - "name": "Wear_Leveling_Count", - "flags": "-O--CK", - "fail": "-", - }, - }, - { - map[string]interface{}{ - "value": int64(55), - "worst": int64(40), - "threshold": int64(45), - "raw_value": int64(45), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "190", - "name": "Airflow_Temperature_Cel", - "flags": "-O---K", - "fail": "Past", - }, - }, - { - map[string]interface{}{ - "value": int64(97), - "worst": int64(97), - "threshold": int64(0), - "raw_value": int64(14716), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "192", - "name": "Power-Off_Retract_Count", - "flags": "-O--C-", - "fail": "-", - }, - }, - { - map[string]interface{}{ - "value": int64(66), - "worst": int64(21), - "threshold": int64(0), - "raw_value": int64(34), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "194", - "name": "Temperature_Celsius", - "flags": "-O---K", - "fail": "-", - }, - }, - { - map[string]interface{}{ - "value": int64(100), - "worst": int64(100), - "threshold": int64(0), - "raw_value": int64(0), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "197", - "name": "Current_Pending_Sector", - "flags": "-O---K", - "fail": "-", - }, - }, - { - map[string]interface{}{ - "value": int64(200), - "worst": int64(200), - "threshold": int64(0), - "raw_value": int64(0), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "199", - "name": "UDMA_CRC_Error_Count", - "flags": "-O-RC-", - "fail": "-", - }, - }, - { - map[string]interface{}{ - "value": int64(100), - "worst": int64(253), - "threshold": int64(0), - "raw_value": int64(23709323), - "exit_status": int(0), - }, - map[string]string{ - "device": "ada0", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "id": "240", - "name": "Head_Flying_Hours", - "flags": "------", - "fail": "-", - }, - }, - } - - for _, test := range testsAda0Attributes { - acc.AssertContainsTaggedFields(t, "smart_attribute", test.fields, test.tags) - } - - // tags = map[string]string{} - - var testsAda0Device = []struct { - fields map[string]interface{} - tags map[string]string - }{ - { - map[string]interface{}{ - "exit_status": int(0), - "health_ok": bool(true), - "read_error_rate": int64(0), - "temp_c": int64(34), - "udma_crc_errors": int64(0), - }, - map[string]string{ - "device": "ada0", - "model": "APPLE SSD SM256E", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "enabled": "Enabled", - "capacity": "251000193024", - }, - }, - } - - for _, test := range testsAda0Device { - acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) - } - -} - -func TestGatherNoAttributes(t *testing.T) { - s := &Smart{ - Path: "smartctl", - Attributes: false, - } - // overwriting exec commands with mock commands - execCommand = fakeExecCommand - var acc testutil.Accumulator - - err := s.Gather(&acc) - - require.NoError(t, err) - assert.Equal(t, 5, acc.NFields(), "Wrong number of fields gathered") - acc.AssertDoesNotContainMeasurement(t, "smart_attribute") - - // tags = map[string]string{} - - var testsAda0Device = []struct { - fields map[string]interface{} - tags map[string]string - }{ - { - map[string]interface{}{ - "exit_status": int(0), - "health_ok": bool(true), - "read_error_rate": int64(0), - "temp_c": int64(34), - "udma_crc_errors": int64(0), - }, - map[string]string{ - "device": "ada0", - "model": "APPLE SSD SM256E", - "serial_no": "S0X5NZBC422720", - "wwn": "5002538043584d30", - "enabled": "Enabled", - "capacity": "251000193024", - }, - }, - } - - for _, test := range testsAda0Device { - acc.AssertContainsTaggedFields(t, "smart_device", test.fields, test.tags) - } - -} - -func TestExcludedDev(t *testing.T) { - assert.Equal(t, true, excludedDev([]string{"/dev/pass6"}, "/dev/pass6 -d atacam"), "Should be excluded.") - assert.Equal(t, false, excludedDev([]string{}, "/dev/pass6 -d atacam"), "Shouldn't be excluded.") - assert.Equal(t, false, excludedDev([]string{"/dev/pass6"}, "/dev/pass1 -d atacam"), "Shouldn't be excluded.") - -} - -// fackeExecCommand is a helper function that mock -// the exec.Command call (and call the test binary) -func fakeExecCommand(command string, args ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcess", "--", command} - cs = append(cs, args...) - cmd := exec.Command(os.Args[0], cs...) - cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} - return cmd -} - -// TestHelperProcess isn't a real test. It's used to mock exec.Command -// For example, if you run: -// GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- --scan -// it returns below mockScanData. -func TestHelperProcess(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return - } - - args := os.Args - - // Previous arguments are tests stuff, that looks like : - // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, arg1, args := args[3], args[4], args[5:] - - if cmd == "smartctl" { - if arg1 == "--scan" { - fmt.Fprint(os.Stdout, mockScanData) - } - if arg1 == "--info" { - fmt.Fprint(os.Stdout, mockInfoAttributeData) - } - } else { - fmt.Fprint(os.Stdout, "command not found") - os.Exit(1) - } - os.Exit(0) -} diff --git a/plugins/inputs/snmp/CONFIG-EXAMPLES.md b/plugins/inputs/snmp/CONFIG-EXAMPLES.md deleted file mode 100644 index a0a52eeb3..000000000 --- a/plugins/inputs/snmp/CONFIG-EXAMPLES.md +++ /dev/null @@ -1,65 +0,0 @@ -Here are a few configuration examples for different use cases. - -### Switch/router interface metrics - -This setup will collect data on all interfaces from three different tables, `IF-MIB::ifTable`, `IF-MIB::ifXTable` and `EtherLike-MIB::dot3StatsTable`. It will also add the name from `IF-MIB::ifDescr` and use that as a tag. Depending on your needs and preferences you can easily use `IF-MIB::ifName` or `IF-MIB::ifAlias` instead or in addition. The values of these are typically: - - IF-MIB::ifName = Gi0/0/0 - IF-MIB::ifDescr = GigabitEthernet0/0/0 - IF-MIB::ifAlias = ### LAN ### - -This configuration also collects the hostname from the device (`RFC1213-MIB::sysName.0`) and adds as a tag. So each metric will both have the configured host/IP as `agent_host` as well as the device self-reported hostname as `hostname` and the name of the host that has collected these metrics as `host`. - -Here is the configuration that you add to your `telegraf.conf`: - -``` -[[inputs.snmp]] - agents = [ "host.example.com" ] - version = 2 - community = "public" - - [[inputs.snmp.field]] - name = "hostname" - oid = "RFC1213-MIB::sysName.0" - is_tag = true - - [[inputs.snmp.field]] - name = "uptime" - oid = "DISMAN-EXPRESSION-MIB::sysUpTimeInstance" - - # IF-MIB::ifTable contains counters on input and output traffic as well as errors and discards. - [[inputs.snmp.table]] - name = "interface" - inherit_tags = [ "hostname" ] - oid = "IF-MIB::ifTable" - - # Interface tag - used to identify interface in metrics database - [[inputs.snmp.table.field]] - name = "ifDescr" - oid = "IF-MIB::ifDescr" - is_tag = true - - # IF-MIB::ifXTable contains newer High Capacity (HC) counters that do not overflow as fast for a few of the ifTable counters - [[inputs.snmp.table]] - name = "interface" - inherit_tags = [ "hostname" ] - oid = "IF-MIB::ifXTable" - - # Interface tag - used to identify interface in metrics database - [[inputs.snmp.table.field]] - name = "ifDescr" - oid = "IF-MIB::ifDescr" - is_tag = true - - # EtherLike-MIB::dot3StatsTable contains detailed ethernet-level information about what kind of errors have been logged on an interface (such as FCS error, frame too long, etc) - [[inputs.snmp.table]] - name = "interface" - inherit_tags = [ "hostname" ] - oid = "EtherLike-MIB::dot3StatsTable" - - # Interface tag - used to identify interface in metrics database - [[inputs.snmp.table.field]] - name = "ifDescr" - oid = "IF-MIB::ifDescr" - is_tag = true -``` diff --git a/plugins/inputs/snmp/DEBUGGING.md b/plugins/inputs/snmp/DEBUGGING.md deleted file mode 100644 index f357c58b5..000000000 --- a/plugins/inputs/snmp/DEBUGGING.md +++ /dev/null @@ -1,53 +0,0 @@ -# Debugging & Testing SNMP Issues - -### Install net-snmp on your system: - -Mac: - -``` -brew install net-snmp -``` - -### Run an SNMP simulator docker image to get a full MIB on port 161: - -``` -docker run -d -p 161:161/udp xeemetric/snmp-simulator -``` - -### snmpget: - -snmpget corresponds to the inputs.snmp.field configuration. - -```bash -$ # get an snmp field with fully-qualified MIB name. -$ snmpget -v2c -c public localhost:161 system.sysUpTime.0 -DISMAN-EVENT-MIB::sysUpTimeInstance = Timeticks: (1643) 0:00:16.43 - -$ # get an snmp field, outputting the numeric OID. -$ snmpget -On -v2c -c public localhost:161 system.sysUpTime.0 -.1.3.6.1.2.1.1.3.0 = Timeticks: (1638) 0:00:16.38 -``` - -### snmptranslate: - -snmptranslate can be used to translate an OID to a MIB name: - -```bash -$ snmptranslate .1.3.6.1.2.1.1.3.0 -DISMAN-EVENT-MIB::sysUpTimeInstance -``` - -And to convert a partial MIB name to a fully qualified one: - -```bash -$ snmptranslate -IR sysUpTime.0 -DISMAN-EVENT-MIB::sysUpTimeInstance -``` - -And to convert a MIB name to an OID: - -```bash -$ snmptranslate -On -IR system.sysUpTime.0 -.1.3.6.1.2.1.1.3.0 -``` - diff --git a/plugins/inputs/snmp/README.md b/plugins/inputs/snmp/README.md index dab28e9b0..4e9ce8e50 100644 --- a/plugins/inputs/snmp/README.md +++ b/plugins/inputs/snmp/README.md @@ -1,180 +1,221 @@ -# SNMP Plugin +# SNMP Input Plugin -The SNMP input plugin gathers metrics from SNMP agents. +The `snmp` input plugin uses polling to gather metrics from SNMP agents. +Support for gathering individual OIDs as well as complete SNMP tables is +included. -## Configuration: +### Prerequisites -See additional SNMP plugin configuration examples [here](./CONFIG-EXAMPLES.md). +This plugin uses the `snmptable` and `snmptranslate` programs from the +[net-snmp][] project. These tools will need to be installed into the `PATH` in +order to be located. Other utilities from the net-snmp project may be useful +for troubleshooting, but are not directly used by the plugin. -### Example: +These programs will load available MIBs on the system. Typically the default +directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a +different location you may need to make the paths known to net-snmp. The +location of these files can be configured in the `snmp.conf` or via the +`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more +information. -SNMP data: -``` -.1.0.0.0.1.1.0 octet_str "foo" -.1.0.0.0.1.1.1 octet_str "bar" -.1.0.0.0.1.102 octet_str "bad" -.1.0.0.0.1.2.0 integer 1 -.1.0.0.0.1.2.1 integer 2 -.1.0.0.0.1.3.0 octet_str "0.123" -.1.0.0.0.1.3.1 octet_str "0.456" -.1.0.0.0.1.3.2 octet_str "9.999" -.1.0.0.1.1 octet_str "baz" -.1.0.0.1.2 uinteger 54321 -.1.0.0.1.3 uinteger 234 -``` - -Telegraf config: +### Configuration ```toml [[inputs.snmp]] - agents = [ "127.0.0.1:161" ] - version = 2 - community = "public" + ## Agent addresses to retrieve values from. + ## example: agents = ["udp://127.0.0.1:161"] + ## agents = ["tcp://127.0.0.1:161"] + agents = ["udp://127.0.0.1:161"] - name = "system" - [[inputs.snmp.field]] - name = "hostname" - oid = ".1.0.0.1.1" - is_tag = true + ## Timeout for each request. + # timeout = "5s" + + ## SNMP version; can be 1, 2, or 3. + # version = 2 + + ## SNMP community string. + # community = "public" + + ## Number of retries to attempt. + # retries = 3 + + ## The GETBULK max-repetitions parameter. + # max_repetitions = 10 + + ## SNMPv3 authentication and encryption options. + ## + ## Security Name. + # sec_name = "myuser" + ## Authentication protocol; one of "MD5", "SHA", or "". + # auth_protocol = "MD5" + ## Authentication password. + # auth_password = "pass" + ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". + # sec_level = "authNoPriv" + ## Context Name. + # context_name = "" + ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". + # priv_protocol = "" + ## Privacy password used for encrypted messages. + # priv_password = "" + + ## Add fields and tables defining the variables you wish to collect. This + ## example collects the system uptime and interface variables. Reference the + ## full plugin documentation for configuration details. [[inputs.snmp.field]] + oid = "RFC1213-MIB::sysUpTime.0" name = "uptime" - oid = ".1.0.0.1.2" - [[inputs.snmp.field]] - name = "loadavg" - oid = ".1.0.0.1.3" - conversion = "float(2)" - - [[inputs.snmp.table]] - name = "remote_servers" - inherit_tags = [ "hostname" ] - [[inputs.snmp.table.field]] - name = "server" - oid = ".1.0.0.0.1.1" - is_tag = true - [[inputs.snmp.table.field]] - name = "connections" - oid = ".1.0.0.0.1.2" - [[inputs.snmp.table.field]] - name = "latency" - oid = ".1.0.0.0.1.3" - conversion = "float" -``` - -Resulting output: -``` -* Plugin: snmp, Collection 1 -> system,agent_host=127.0.0.1,host=mylocalhost,hostname=baz loadavg=2.34,uptime=54321i 1468953135000000000 -> remote_servers,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=foo connections=1i,latency=0.123 1468953135000000000 -> remote_servers,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=bar connections=2i,latency=0.456 1468953135000000000 -``` - -#### Configuration via MIB: - -This example uses the SNMP data above, but is configured via the MIB. -The example MIB file can be found in the `testdata` directory. See the [MIB lookups](#mib-lookups) section for more information. - -Telegraf config: -```toml -[[inputs.snmp]] - agents = [ "127.0.0.1:161" ] - version = 2 - community = "public" [[inputs.snmp.field]] - oid = "TEST::hostname" + oid = "RFC1213-MIB::sysName.0" + name = "source" is_tag = true [[inputs.snmp.table]] - oid = "TEST::testTable" - inherit_tags = [ "hostname" ] + oid = "IF-MIB::ifTable" + name = "interface" + inherit_tags = ["source"] + + [[inputs.snmp.table.field]] + oid = "IF-MIB::ifDescr" + name = "ifDescr" + is_tag = true ``` -Resulting output: -``` -* Plugin: snmp, Collection 1 -> testTable,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=foo connections=1i,latency="0.123" 1468953135000000000 -> testTable,agent_host=127.0.0.1,host=mylocalhost,hostname=baz,server=bar connections=2i,latency="0.456" 1468953135000000000 +#### Configure SNMP Requests + +This plugin provides two methods for configuring the SNMP requests: `fields` +and `tables`. Use the `field` option to gather single ad-hoc variables. +To collect SNMP tables, use the `table` option. + +##### Field + +Use a `field` to collect a variable by OID. Requests specified with this +option operate similar to the `snmpget` utility. + +```toml +[[inputs.snmp]] + # ... snip ... + + [[inputs.snmp.field]] + ## Object identifier of the variable as a numeric or textual OID. + oid = "RFC1213-MIB::sysName.0" + + ## Name of the field or tag to create. If not specified, it defaults to + ## the value of 'oid'. If 'oid' is numeric, an attempt to translate the + ## numeric OID into a textual OID will be made. + # name = "" + + ## If true the variable will be added as a tag, otherwise a field will be + ## created. + # is_tag = false + + ## Apply one of the following conversions to the variable value: + ## float(X) Convert the input value into a float and divides by the + ## Xth power of 10. Effectively just moves the decimal left + ## X places. For example a value of `123` with `float(2)` + ## will result in `1.23`. + ## float: Convert the value into a float with no adjustment. Same + ## as `float(0)`. + ## int: Convert the value into an integer. + ## hwaddr: Convert the value to a MAC address. + ## ipaddr: Convert the value to an IP address. + # conversion = "" ``` -### Config parameters +##### Table -* `agents`: Default: `[]` -List of SNMP agents to connect to in the form of `IP[:PORT]`. If `:PORT` is unspecified, it defaults to `161`. +Use a `table` to configure the collection of a SNMP table. SNMP requests +formed with this option operate similarly way to the `snmptable` command. -* `version`: Default: `2` -SNMP protocol version to use. +Control the handling of specific table columns using a nested `field`. These +nested fields are specified similarly to a top-level `field`. -* `community`: Default: `"public"` -SNMP community to use. +All columns of the SNMP table will be collected, it is not required to add a +nested field for each column, only those which you wish to modify. To exclude +columns use [metric filtering][]. -* `max_repetitions`: Default: `50` -Maximum number of iterations for repeating variables. +One [metric][] is created for each row of the SNMP table. -* `sec_name`: -Security name for authenticated SNMPv3 requests. +```toml +[[inputs.snmp]] + # ... snip ... -* `auth_protocol`: Values: `"MD5"`,`"SHA"`,`""`. Default: `""` -Authentication protocol for authenticated SNMPv3 requests. + [[inputs.snmp.table]] + ## Object identifier of the SNMP table as a numeric or textual OID. + oid = "IF-MIB::ifTable" -* `auth_password`: -Authentication password for authenticated SNMPv3 requests. + ## Name of the field or tag to create. If not specified, it defaults to + ## the value of 'oid'. If 'oid' is numeric an attempt to translate the + ## numeric OID into a textual OID will be made. + # name = "" -* `sec_level`: Values: `"noAuthNoPriv"`,`"authNoPriv"`,`"authPriv"`. Default: `"noAuthNoPriv"` -Security level used for SNMPv3 messages. + ## Which tags to inherit from the top-level config and to use in the output + ## of this table's measurement. + ## example: inherit_tags = ["source"] + # inherit_tags = [] -* `context_name`: -Context name used for SNMPv3 requests. + ## Add an 'index' tag with the table row number. Use this if the table has + ## no indexes or if you are excluding them. This option is normally not + ## required as any index columns are automatically added as tags. + # index_as_tag = false -* `priv_protocol`: Values: `"DES"`,`"AES"`,`""`. Default: `""` -Privacy protocol used for encrypted SNMPv3 messages. + [[inputs.snmp.table.field]] + ## OID to get. May be a numeric or textual module-qualified OID. + oid = "IF-MIB::ifDescr" -* `priv_password`: -Privacy password used for encrypted SNMPv3 messages. + ## Name of the field or tag to create. If not specified, it defaults to + ## the value of 'oid'. If 'oid' is numeric an attempt to translate the + ## numeric OID into a textual OID will be made. + # name = "" + ## Output this field as a tag. + # is_tag = false -* `name`: -Output measurement name. + ## The OID sub-identifier to strip off so that the index can be matched + ## against other fields in the table. + # oid_index_suffix = "" -#### Field parameters: -* `oid`: -OID to get. May be a numeric or textual OID. + ## Specifies the length of the index after the supplied table OID (in OID + ## path segments). Truncates the index after this point to remove non-fixed + ## value or length index suffixes. + # oid_index_length = 0 +``` -* `oid_index_suffix`: -The OID sub-identifier to strip off so that the index can be matched against other fields in the table. +### Troubleshooting -* `oid_index_length`: -Specifies the length of the index after the supplied table OID (in OID path segments). Truncates the index after this point to remove non-fixed value or length index suffixes. +Check that a numeric field can be translated to a textual field: +``` +$ snmptranslate .1.3.6.1.2.1.1.3.0 +DISMAN-EVENT-MIB::sysUpTimeInstance +``` -* `name`: -Output field/tag name. -If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made. +Request a top-level field: +``` +$ snmpget -v2c -c public 127.0.0.1 sysUpTime.0 +``` -* `is_tag`: -Output this field as a tag. +Request a table: +``` +$ snmptable -v2c -c public 127.0.0.1 ifTable +``` -* `conversion`: Values: `"float(X)"`,`"float"`,`"int"`,`""`. Default: `""` -Converts the value according to the given specification. +To collect a packet capture, run this command in the background while running +Telegraf or one of the above commands. Adjust the interface, host and port as +needed: +``` +$ sudo tcpdump -s 0 -i eth0 -w telegraf-snmp.pcap host 127.0.0.1 and port 161 +``` - - `float(X)`: Converts the input value into a float and divides by the Xth power of 10. Efficively just moves the decimal left X places. For example a value of `123` with `float(2)` will result in `1.23`. - - `float`: Converts the value into a float with no adjustment. Same as `float(0)`. - - `int`: Convertes the value into an integer. - - `hwaddr`: Converts the value to a MAC address. - - `ipaddr`: Converts the value to an IP address. +### Example Output -#### Table parameters: -* `oid`: -Automatically populates the table's fields using data from the MIB. +``` +snmp,agent_host=127.0.0.1,source=loaner uptime=11331974i 1575509815000000000 +interface,agent_host=127.0.0.1,ifDescr=wlan0,ifIndex=3,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=3436617431i,ifInUcastPkts=2717778i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=581368041i,ifOutQLen=0i,ifOutUcastPkts=1354338i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=0i,ifType=6i 1575509815000000000 +interface,agent_host=127.0.0.1,ifDescr=eth0,ifIndex=2,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=21i,ifInOctets=3852386380i,ifInUcastPkts=3634004i,ifInUnknownProtos=0i,ifLastChange=9088763i,ifMtu=1500i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=434865441i,ifOutQLen=0i,ifOutUcastPkts=2110394i,ifPhysAddress="c8:5b:76:c9:e6:8c",ifSpecific=".0.0",ifSpeed=1000000000i,ifType=6i 1575509815000000000 +interface,agent_host=127.0.0.1,ifDescr=lo,ifIndex=1,source=example.org ifAdminStatus=1i,ifInDiscards=0i,ifInErrors=0i,ifInNUcastPkts=0i,ifInOctets=51555569i,ifInUcastPkts=339097i,ifInUnknownProtos=0i,ifLastChange=0i,ifMtu=65536i,ifOperStatus=1i,ifOutDiscards=0i,ifOutErrors=0i,ifOutNUcastPkts=0i,ifOutOctets=51555569i,ifOutQLen=0i,ifOutUcastPkts=339097i,ifSpecific=".0.0",ifSpeed=10000000i,ifType=24i 1575509815000000000 +``` -* `name`: -Output measurement name. -If not specified, it defaults to the value of `oid`. If `oid` is numeric, an attempt to translate the numeric OID into a texual OID will be made. - -* `inherit_tags`: -Which tags to inherit from the top-level config and to use in the output of this table's measurement. - -* `index_as_tag`: -Adds each row's index within the table as a tag. - -### MIB lookups -If the plugin is configured such that it needs to perform lookups from the MIB, it will use the net-snmp utilities `snmptranslate` and `snmptable`. - -When performing the lookups, the plugin will load all available MIBs. If your MIB files are in a custom path, you may add the path using the `MIBDIRS` environment variable. See [`man 1 snmpcmd`](http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK) for more information on the variable. +[net-snmp]: http://www.net-snmp.org/ +[man snmpcmd]: http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK +[metric filtering]: /docs/CONFIGURATION.md#metric-filtering +[metric]: /docs/METRICS.md diff --git a/plugins/inputs/snmp/snmp.go b/plugins/inputs/snmp/snmp.go index 50a3cb0ae..57f29bfb0 100644 --- a/plugins/inputs/snmp/snmp.go +++ b/plugins/inputs/snmp/snmp.go @@ -4,8 +4,10 @@ import ( "bufio" "bytes" "fmt" + "log" "math" "net" + "net/url" "os/exec" "strconv" "strings" @@ -15,67 +17,52 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" - + "github.com/influxdata/wlog" "github.com/soniah/gosnmp" ) const description = `Retrieves SNMP values from remote agents` const sampleConfig = ` - agents = [ "127.0.0.1:161" ] - ## Timeout for each SNMP query. - timeout = "5s" - ## Number of retries to attempt within timeout. - retries = 3 - ## SNMP version, values can be 1, 2, or 3 - version = 2 + ## Agent addresses to retrieve values from. + ## example: agents = ["udp://127.0.0.1:161"] + ## agents = ["tcp://127.0.0.1:161"] + agents = ["udp://127.0.0.1:161"] + + ## Timeout for each request. + # timeout = "5s" + + ## SNMP version; can be 1, 2, or 3. + # version = 2 ## SNMP community string. - community = "public" + # community = "public" - ## The GETBULK max-repetitions parameter - max_repetitions = 10 + ## Number of retries to attempt. + # retries = 3 - ## SNMPv3 auth parameters - #sec_name = "myuser" - #auth_protocol = "md5" # Values: "MD5", "SHA", "" - #auth_password = "pass" - #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" - #context_name = "" - #priv_protocol = "" # Values: "DES", "AES", "" - #priv_password = "" + ## The GETBULK max-repetitions parameter. + # max_repetitions = 10 - ## measurement name - name = "system" - [[inputs.snmp.field]] - name = "hostname" - oid = ".1.0.0.1.1" - [[inputs.snmp.field]] - name = "uptime" - oid = ".1.0.0.1.2" - [[inputs.snmp.field]] - name = "load" - oid = ".1.0.0.1.3" - [[inputs.snmp.field]] - oid = "HOST-RESOURCES-MIB::hrMemorySize" + ## SNMPv3 authentication and encryption options. + ## + ## Security Name. + # sec_name = "myuser" + ## Authentication protocol; one of "MD5", "SHA", or "". + # auth_protocol = "MD5" + ## Authentication password. + # auth_password = "pass" + ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". + # sec_level = "authNoPriv" + ## Context Name. + # context_name = "" + ## Privacy protocol used for encrypted messages; one of "DES", "AES" or "". + # priv_protocol = "" + ## Privacy password used for encrypted messages. + # priv_password = "" - [[inputs.snmp.table]] - ## measurement name - name = "remote_servers" - inherit_tags = [ "hostname" ] - [[inputs.snmp.table.field]] - name = "server" - oid = ".1.0.0.0.1.0" - is_tag = true - [[inputs.snmp.table.field]] - name = "connections" - oid = ".1.0.0.0.1.1" - [[inputs.snmp.table.field]] - name = "latency" - oid = ".1.0.0.0.1.2" - - [[inputs.snmp.table]] - ## auto populate table's fields using the MIB - oid = "HOST-RESOURCES-MIB::hrNetworkTable" + ## Add fields and tables defining the variables you wish to collect. This + ## example collects the system uptime and interface variables. Reference the + ## full plugin documentation for configuration details. ` // execCommand is so tests can mock out exec.Command usage. @@ -84,6 +71,14 @@ var execCommand = exec.Command // execCmd executes the specified command, returning the STDOUT content. // If command exits with error status, the output is captured into the returned error. func execCmd(arg0 string, args ...string) ([]byte, error) { + if wlog.LogLevel() == wlog.DEBUG { + quoted := make([]string, 0, len(args)) + for _, arg := range args { + quoted = append(quoted, fmt.Sprintf("%q", arg)) + } + log.Printf("D! [inputs.snmp] executing %q %s", arg0, strings.Join(quoted, " ")) + } + out, err := execCommand(arg0, args...).Output() if err != nil { if err, ok := err.(*exec.ExitError); ok { @@ -99,41 +94,42 @@ func execCmd(arg0 string, args ...string) ([]byte, error) { // Snmp holds the configuration for the plugin. type Snmp struct { - // The SNMP agent to query. Format is ADDR[:PORT] (e.g. 1.2.3.4:161). - Agents []string + // The SNMP agent to query. Format is [SCHEME://]ADDR[:PORT] (e.g. + // udp://1.2.3.4:161). If the scheme is not specified then "udp" is used. + Agents []string `toml:"agents"` // Timeout to wait for a response. - Timeout internal.Duration - Retries int + Timeout internal.Duration `toml:"timeout"` + Retries int `toml:"retries"` // Values: 1, 2, 3 - Version uint8 + Version uint8 `toml:"version"` // Parameters for Version 1 & 2 - Community string + Community string `toml:"community"` // Parameters for Version 2 & 3 - MaxRepetitions uint8 + MaxRepetitions uint8 `toml:"max_repetitions"` // Parameters for Version 3 - ContextName string + ContextName string `toml:"context_name"` // Values: "noAuthNoPriv", "authNoPriv", "authPriv" - SecLevel string - SecName string + SecLevel string `toml:"sec_level"` + SecName string `toml:"sec_name"` // Values: "MD5", "SHA", "". Default: "" - AuthProtocol string - AuthPassword string + AuthProtocol string `toml:"auth_protocol"` + AuthPassword string `toml:"auth_password"` // Values: "DES", "AES", "". Default: "" - PrivProtocol string - PrivPassword string - EngineID string - EngineBoots uint32 - EngineTime uint32 + PrivProtocol string `toml:"priv_protocol"` + PrivPassword string `toml:"priv_password"` + EngineID string `toml:"-"` + EngineBoots uint32 `toml:"-"` + EngineTime uint32 `toml:"-"` Tables []Table `toml:"table"` // Name & Fields are the elements of a Table. // Telegraf chokes if we try to embed a Table. So instead we have to embed the // fields of a Table, and construct a Table during runtime. - Name string + Name string // deprecated in 1.14; use name_override Fields []Field `toml:"field"` connectionCache []snmpConnection @@ -268,7 +264,7 @@ func (f *Field) init() error { return nil } - _, oidNum, oidText, conversion, err := snmpTranslate(f.Oid) + _, oidNum, oidText, conversion, err := SnmpTranslate(f.Oid) if err != nil { return Errorf(err, "translating") } @@ -614,16 +610,30 @@ func (s *Snmp) getConnection(idx int) (snmpConnection, error) { gs := gosnmpWrapper{&gosnmp.GoSNMP{}} s.connectionCache[idx] = gs - host, portStr, err := net.SplitHostPort(agent) + if !strings.Contains(agent, "://") { + agent = "udp://" + agent + } + + u, err := url.Parse(agent) if err != nil { - if err, ok := err.(*net.AddrError); !ok || err.Err != "missing port in address" { - return nil, Errorf(err, "parsing host") - } - host = agent + return nil, err + } + + switch u.Scheme { + case "tcp": + gs.Transport = "tcp" + case "", "udp": + gs.Transport = "udp" + default: + return nil, fmt.Errorf("unsupported scheme: %v", u.Scheme) + } + + gs.Target = u.Hostname() + + portStr := u.Port() + if portStr == "" { portStr = "161" } - gs.Target = host - port, err := strconv.ParseUint(portStr, 10, 16) if err != nil { return nil, Errorf(err, "parsing port") @@ -794,9 +804,9 @@ func fieldConvert(conv string, v interface{}) (interface{}, error) { case uint64: v = int64(vt) case []byte: - v, _ = strconv.Atoi(string(vt)) + v, _ = strconv.ParseInt(string(vt), 10, 64) case string: - v, _ = strconv.Atoi(vt) + v, _ = strconv.ParseInt(vt, 10, 64) } return v, nil } @@ -869,7 +879,7 @@ func snmpTable(oid string) (mibName string, oidNum string, oidText string, field } func snmpTableCall(oid string) (mibName string, oidNum string, oidText string, fields []Field, err error) { - mibName, oidNum, oidText, _, err = snmpTranslate(oid) + mibName, oidNum, oidText, _, err = SnmpTranslate(oid) if err != nil { return "", "", "", nil, Errorf(err, "translating") } @@ -939,7 +949,7 @@ var snmpTranslateCachesLock sync.Mutex var snmpTranslateCaches map[string]snmpTranslateCache // snmpTranslate resolves the given OID. -func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { +func SnmpTranslate(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { snmpTranslateCachesLock.Lock() if snmpTranslateCaches == nil { snmpTranslateCaches = map[string]snmpTranslateCache{} @@ -952,9 +962,9 @@ func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, c // We could speed it up by putting a lock in snmpTranslateCache and then // returning it immediately, and multiple callers would then release the // snmpTranslateCachesLock and instead wait on the individual - // snmpTranlsation.Lock to release. But I don't know that the extra complexity + // snmpTranslation.Lock to release. But I don't know that the extra complexity // is worth it. Especially when it would slam the system pretty hard if lots - // of lookups are being perfomed. + // of lookups are being performed. stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err = snmpTranslateCall(oid) snmpTranslateCaches[oid] = stc @@ -965,6 +975,28 @@ func snmpTranslate(oid string) (mibName string, oidNum string, oidText string, c return stc.mibName, stc.oidNum, stc.oidText, stc.conversion, stc.err } +func SnmpTranslateForce(oid string, mibName string, oidNum string, oidText string, conversion string) { + snmpTranslateCachesLock.Lock() + defer snmpTranslateCachesLock.Unlock() + if snmpTranslateCaches == nil { + snmpTranslateCaches = map[string]snmpTranslateCache{} + } + + var stc snmpTranslateCache + stc.mibName = mibName + stc.oidNum = oidNum + stc.oidText = oidText + stc.conversion = conversion + stc.err = nil + snmpTranslateCaches[oid] = stc +} + +func SnmpTranslateClear() { + snmpTranslateCachesLock.Lock() + defer snmpTranslateCachesLock.Unlock() + snmpTranslateCaches = map[string]snmpTranslateCache{} +} + func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText string, conversion string, err error) { var out []byte if strings.ContainsAny(oid, ":abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") { @@ -1010,7 +1042,7 @@ func snmpTranslateCall(oid string) (mibName string, oidNum string, oidText strin switch tc { case "MacAddress", "PhysAddress": conversion = "hwaddr" - case "InetAddressIPv4", "InetAddressIPv6", "InetAddress": + case "InetAddressIPv4", "InetAddressIPv6", "InetAddress", "IPSIpAddress": conversion = "ipaddr" } } else if strings.HasPrefix(line, "::= { ") { diff --git a/plugins/inputs/snmp/snmp_mocks_test.go b/plugins/inputs/snmp/snmp_mocks_test.go index 63a8a80ec..56d9326f1 100644 --- a/plugins/inputs/snmp/snmp_mocks_test.go +++ b/plugins/inputs/snmp/snmp_mocks_test.go @@ -61,26 +61,26 @@ func init() { // BEGIN GO GENERATE CONTENT var mockedCommandResults = map[string]mockedCommandResult{ - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": mockedCommandResult{stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": mockedCommandResult{stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": mockedCommandResult{stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": mockedCommandResult{stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::server": mockedCommandResult{stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": mockedCommandResult{stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": mockedCommandResult{stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::connections": mockedCommandResult{stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::latency": mockedCommandResult{stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::description": mockedCommandResult{stdout: "TEST::description\ndescription OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": mockedCommandResult{stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": mockedCommandResult{stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": mockedCommandResult{stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": mockedCommandResult{stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false}, - "snmptranslate\x00-Td\x00TEST::testTable.1": mockedCommandResult{stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false}, - "snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": mockedCommandResult{stdout: "server connections latency description \nTEST::testTable: No entries\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0": {stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.1": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.1.2": {stdout: "TEST::1.2\nanonymous#1 OBJECT-TYPE\n -- FROM\tTEST\n::= { iso(1) 0 testOID(0) 1 2 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x001.0.0.1.1": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.1.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.0.0.0.1.5": {stdout: "TEST::testTableEntry.5\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 5 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.1.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00.iso.2.3": {stdout: "iso.2.3\niso OBJECT-TYPE\n -- FROM\t#-1\n::= { iso(1) 2 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00-m\x00all\x00.999": {stdout: ".999\n [TRUNCATED]\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::server": {stdout: "TEST::server\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::server.0": {stdout: "TEST::server.0\nserver OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) server(1) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::testTable": {stdout: "TEST::testTable\ntestTable OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 0 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::connections": {stdout: "TEST::connections\nconnections OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tINTEGER\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 2 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::latency": {stdout: "TEST::latency\nlatency OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 3 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::description": {stdout: "TEST::description\ndescription OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) testTable(0) testTableEntry(1) 4 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TEST::hostname": {stdout: "TEST::hostname\nhostname OBJECT-TYPE\n -- FROM\tTEST\n SYNTAX\tOCTET STRING\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n::= { iso(1) 0 testOID(0) 1 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00IF-MIB::ifPhysAddress.1": {stdout: "IF-MIB::ifPhysAddress.1\nifPhysAddress OBJECT-TYPE\n -- FROM\tIF-MIB\n -- TEXTUAL CONVENTION PhysAddress\n SYNTAX\tOCTET STRING\n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"The interface's address at its protocol sub-layer. For\n example, for an 802.x interface, this object normally\n contains a MAC address. The interface's media-specific MIB\n must define the bit and byte ordering and the format of the\n value of this object. For interfaces which do not have such\n an address (e.g., a serial line), this object should contain\n an octet string of zero length.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) interfaces(2) ifTable(2) ifEntry(1) ifPhysAddress(6) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00BRIDGE-MIB::dot1dTpFdbAddress.1": {stdout: "BRIDGE-MIB::dot1dTpFdbAddress.1\ndot1dTpFdbAddress OBJECT-TYPE\n -- FROM\tBRIDGE-MIB\n -- TEXTUAL CONVENTION MacAddress\n SYNTAX\tOCTET STRING (6) \n DISPLAY-HINT\t\"1x:\"\n MAX-ACCESS\tread-only\n STATUS\tcurrent\n DESCRIPTION\t\"A unicast MAC address for which the bridge has\n forwarding and/or filtering information.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) dot1dBridge(17) dot1dTp(4) dot1dTpFdbTable(3) dot1dTpFdbEntry(1) dot1dTpFdbAddress(1) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00-Ob\x00TCP-MIB::tcpConnectionLocalAddress.1": {stdout: "TCP-MIB::tcpConnectionLocalAddress.1\ntcpConnectionLocalAddress OBJECT-TYPE\n -- FROM\tTCP-MIB\n -- TEXTUAL CONVENTION InetAddress\n SYNTAX\tOCTET STRING (0..255) \n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n DESCRIPTION\t\"The local IP address for this TCP connection. The type\n of this address is determined by the value of\n tcpConnectionLocalAddressType.\n\n As this object is used in the index for the\n tcpConnectionTable, implementors should be\n careful not to create entries that would result in OIDs\n with more than 128 subidentifiers; otherwise the information\n cannot be accessed by using SNMPv1, SNMPv2c, or SNMPv3.\"\n::= { iso(1) org(3) dod(6) internet(1) mgmt(2) mib-2(1) tcp(6) tcpConnectionTable(19) tcpConnectionEntry(1) tcpConnectionLocalAddress(2) 1 }\n", stderr: "", exitError: false}, + "snmptranslate\x00-Td\x00TEST::testTable.1": {stdout: "TEST::testTableEntry\ntestTableEntry OBJECT-TYPE\n -- FROM\tTEST\n MAX-ACCESS\tnot-accessible\n STATUS\tcurrent\n INDEX\t\t{ server }\n::= { iso(1) 0 testOID(0) testTable(0) 1 }\n", stderr: "", exitError: false}, + "snmptable\x00-Ch\x00-Cl\x00-c\x00public\x00127.0.0.1\x00TEST::testTable": {stdout: "server connections latency description \nTEST::testTable: No entries\n", stderr: "", exitError: false}, } diff --git a/plugins/inputs/snmp/snmp_test.go b/plugins/inputs/snmp/snmp_test.go index c2e842a00..d29b525ad 100644 --- a/plugins/inputs/snmp/snmp_test.go +++ b/plugins/inputs/snmp/snmp_test.go @@ -10,6 +10,7 @@ import ( "time" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/testutil" "github.com/influxdata/toml" "github.com/soniah/gosnmp" @@ -82,45 +83,20 @@ var tsc = &testSNMPConnection{ } func TestSampleConfig(t *testing.T) { - conf := struct { - Inputs struct { - Snmp []*Snmp - } - }{} - err := toml.Unmarshal([]byte("[[inputs.snmp]]\n"+(*Snmp)(nil).SampleConfig()), &conf) - assert.NoError(t, err) + conf := inputs.Inputs["snmp"]() + err := toml.Unmarshal([]byte(conf.SampleConfig()), conf) + require.NoError(t, err) - s := Snmp{ - Agents: []string{"127.0.0.1:161"}, + expected := &Snmp{ + Agents: []string{"udp://127.0.0.1:161"}, Timeout: internal.Duration{Duration: 5 * time.Second}, Version: 2, Community: "public", MaxRepetitions: 10, Retries: 3, - - Name: "system", - Fields: []Field{ - {Name: "hostname", Oid: ".1.0.0.1.1"}, - {Name: "uptime", Oid: ".1.0.0.1.2"}, - {Name: "load", Oid: ".1.0.0.1.3"}, - {Oid: "HOST-RESOURCES-MIB::hrMemorySize"}, - }, - Tables: []Table{ - { - Name: "remote_servers", - InheritTags: []string{"hostname"}, - Fields: []Field{ - {Name: "server", Oid: ".1.0.0.0.1.0", IsTag: true}, - {Name: "connections", Oid: ".1.0.0.0.1.1"}, - {Name: "latency", Oid: ".1.0.0.0.1.2"}, - }, - }, - { - Oid: "HOST-RESOURCES-MIB::hrNetworkTable", - }, - }, + Name: "snmp", } - assert.Equal(t, &s, conf.Inputs.Snmp[0]) + require.Equal(t, expected, conf) } func TestFieldInit(t *testing.T) { @@ -256,7 +232,7 @@ func TestSnmpInit_noTranslate(t *testing.T) { func TestGetSNMPConnection_v2(t *testing.T) { s := &Snmp{ - Agents: []string{"1.2.3.4:567", "1.2.3.4"}, + Agents: []string{"1.2.3.4:567", "1.2.3.4", "udp://127.0.0.1"}, Timeout: internal.Duration{Duration: 3 * time.Second}, Retries: 4, Version: 2, @@ -272,12 +248,53 @@ func TestGetSNMPConnection_v2(t *testing.T) { assert.EqualValues(t, 567, gs.Port) assert.Equal(t, gosnmp.Version2c, gs.Version) assert.Equal(t, "foo", gs.Community) + assert.Equal(t, "udp", gs.Transport) gsc, err = s.getConnection(1) require.NoError(t, err) gs = gsc.(gosnmpWrapper) assert.Equal(t, "1.2.3.4", gs.Target) assert.EqualValues(t, 161, gs.Port) + assert.Equal(t, "udp", gs.Transport) + + gsc, err = s.getConnection(2) + require.NoError(t, err) + gs = gsc.(gosnmpWrapper) + assert.Equal(t, "127.0.0.1", gs.Target) + assert.EqualValues(t, 161, gs.Port) + assert.Equal(t, "udp", gs.Transport) +} + +func TestGetSNMPConnectionTCP(t *testing.T) { + var wg sync.WaitGroup + wg.Add(1) + go stubTCPServer(&wg) + wg.Wait() + + s := &Snmp{ + Agents: []string{"tcp://127.0.0.1:56789"}, + } + err := s.init() + require.NoError(t, err) + + wg.Add(1) + gsc, err := s.getConnection(0) + require.NoError(t, err) + gs := gsc.(gosnmpWrapper) + assert.Equal(t, "127.0.0.1", gs.Target) + assert.EqualValues(t, 56789, gs.Port) + assert.Equal(t, "tcp", gs.Transport) + wg.Wait() +} + +func stubTCPServer(wg *sync.WaitGroup) { + defer wg.Done() + tcpAddr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:56789") + tcpServer, _ := net.ListenTCP("tcp", tcpAddr) + defer tcpServer.Close() + wg.Done() + conn, _ := tcpServer.AcceptTCP() + defer conn.Close() } func TestGetSNMPConnection_v3(t *testing.T) { @@ -677,6 +694,8 @@ func TestFieldConvert(t *testing.T) { {uint64(123), "float(3)", float64(0.123)}, {"123", "int", int64(123)}, {[]byte("123"), "int", int64(123)}, + {"123123123123", "int", int64(123123123123)}, + {[]byte("123123123123"), "int", int64(123123123123)}, {float32(12.3), "int", int64(12)}, {float64(12.3), "int", int64(12)}, {int(123), "int", int64(123)}, @@ -708,7 +727,7 @@ func TestFieldConvert(t *testing.T) { func TestSnmpTranslateCache_miss(t *testing.T) { snmpTranslateCaches = nil oid := "IF-MIB::ifPhysAddress.1" - mibName, oidNum, oidText, conversion, err := snmpTranslate(oid) + mibName, oidNum, oidText, conversion, err := SnmpTranslate(oid) assert.Len(t, snmpTranslateCaches, 1) stc := snmpTranslateCaches[oid] require.NotNil(t, stc) @@ -721,7 +740,7 @@ func TestSnmpTranslateCache_miss(t *testing.T) { func TestSnmpTranslateCache_hit(t *testing.T) { snmpTranslateCaches = map[string]snmpTranslateCache{ - "foo": snmpTranslateCache{ + "foo": { mibName: "a", oidNum: "b", oidText: "c", @@ -729,7 +748,7 @@ func TestSnmpTranslateCache_hit(t *testing.T) { err: fmt.Errorf("e"), }, } - mibName, oidNum, oidText, conversion, err := snmpTranslate("foo") + mibName, oidNum, oidText, conversion, err := SnmpTranslate("foo") assert.Equal(t, "a", mibName) assert.Equal(t, "b", oidNum) assert.Equal(t, "c", oidText) @@ -754,7 +773,7 @@ func TestSnmpTableCache_miss(t *testing.T) { func TestSnmpTableCache_hit(t *testing.T) { snmpTableCaches = map[string]snmpTableCache{ - "foo": snmpTableCache{ + "foo": { mibName: "a", oidNum: "b", oidText: "c", diff --git a/plugins/inputs/snmp_legacy/snmp_legacy.go b/plugins/inputs/snmp_legacy/snmp_legacy.go index 57f9f4fe2..8df9cff06 100644 --- a/plugins/inputs/snmp_legacy/snmp_legacy.go +++ b/plugins/inputs/snmp_legacy/snmp_legacy.go @@ -1,7 +1,6 @@ package snmp_legacy import ( - "fmt" "io/ioutil" "log" "net" @@ -24,6 +23,8 @@ type Snmp struct { Subtable []Subtable SnmptranslateFile string + Log telegraf.Logger + nameToOid map[string]string initNode Node subTableMap map[string]Subtable @@ -297,7 +298,7 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { data, err := ioutil.ReadFile(s.SnmptranslateFile) if err != nil { - log.Printf("E! Reading SNMPtranslate file error: %s", err) + s.Log.Errorf("Reading SNMPtranslate file error: %s", err.Error()) return err } else { for _, line := range strings.Split(string(data), "\n") { @@ -395,16 +396,16 @@ func (s *Snmp) Gather(acc telegraf.Accumulator) error { // only if len(s.OidInstanceMapping) == 0 if len(host.OidInstanceMapping) >= 0 { if err := host.SNMPMap(acc, s.nameToOid, s.subTableMap); err != nil { - acc.AddError(fmt.Errorf("E! SNMP Mapping error for host '%s': %s", host.Address, err)) + s.Log.Errorf("Mapping error for host %q: %s", host.Address, err.Error()) continue } } // Launch Get requests if err := host.SNMPGet(acc, s.initNode); err != nil { - acc.AddError(fmt.Errorf("E! SNMP Error for host '%s': %s", host.Address, err)) + s.Log.Errorf("Error for host %q: %s", host.Address, err.Error()) } if err := host.SNMPBulk(acc, s.initNode); err != nil { - acc.AddError(fmt.Errorf("E! SNMP Error for host '%s': %s", host.Address, err)) + s.Log.Errorf("Error for host %q: %s", host.Address, err.Error()) } } return nil @@ -801,7 +802,7 @@ func (h *Host) HandleResponse( acc.AddFields(field_name, fields, tags) case gosnmp.NoSuchObject, gosnmp.NoSuchInstance: // Oid not found - log.Printf("E! [snmp input] Oid not found: %s", oid_key) + log.Printf("E! [inputs.snmp_legacy] oid %q not found", oid_key) default: // delete other data } diff --git a/plugins/inputs/snmp_trap/README.md b/plugins/inputs/snmp_trap/README.md new file mode 100644 index 000000000..046f18e49 --- /dev/null +++ b/plugins/inputs/snmp_trap/README.md @@ -0,0 +1,102 @@ +# SNMP Trap Input Plugin + +The SNMP Trap plugin is a service input plugin that receives SNMP +notifications (traps and inform requests). + +Notifications are received on plain UDP. The port to listen is +configurable. + +### Prerequisites + +This plugin uses the `snmptranslate` programs from the +[net-snmp][] project. These tools will need to be installed into the `PATH` in +order to be located. Other utilities from the net-snmp project may be useful +for troubleshooting, but are not directly used by the plugin. + +These programs will load available MIBs on the system. Typically the default +directory for MIBs is `/usr/share/snmp/mibs`, but if your MIBs are in a +different location you may need to make the paths known to net-snmp. The +location of these files can be configured in the `snmp.conf` or via the +`MIBDIRS` environment variable. See [`man 1 snmpcmd`][man snmpcmd] for more +information. + +### Configuration +```toml +[[inputs.snmp_trap]] + ## Transport, local address, and port to listen on. Transport must + ## be "udp://". Omit local address to listen on all interfaces. + ## example: "udp://127.0.0.1:1234" + ## + ## Special permissions may be required to listen on a port less than + ## 1024. See README.md for details + ## + # service_address = "udp://:162" + ## Timeout running snmptranslate command + # timeout = "5s" + ## Snmp version + # version = "2c" + ## SNMPv3 authentication and encryption options. + ## + ## Security Name. + # sec_name = "myuser" + ## Authentication protocol; one of "MD5", "SHA" or "". + # auth_protocol = "MD5" + ## Authentication password. + # auth_password = "pass" + ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". + # sec_level = "authNoPriv" + ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". + # priv_protocol = "" + ## Privacy password used for encrypted messages. + # priv_password = "" +``` + +#### Using a Privileged Port + +On many operating systems, listening on a privileged port (a port +number less than 1024) requires extra permission. Since the default +SNMP trap port 162 is in this category, using telegraf to receive SNMP +traps may need extra permission. + +Instructions for listening on a privileged port vary by operating +system. It is not recommended to run telegraf as superuser in order to +use a privileged port. Instead follow the principle of least privilege +and use a more specific operating system mechanism to allow telegraf to +use the port. You may also be able to have telegraf use an +unprivileged port and then configure a firewall port forward rule from +the privileged port. + +To use a privileged port on Linux, you can use setcap to enable the +CAP_NET_BIND_SERVICE capability on the telegraf binary: + +``` +setcap cap_net_bind_service=+ep /usr/bin/telegraf +``` + +On Mac OS, listening on privileged ports is unrestricted on versions +10.14 and later. + +### Metrics + +- snmp_trap + - tags: + - source (string, IP address of trap source) + - name (string, value from SNMPv2-MIB::snmpTrapOID.0 PDU) + - mib (string, MIB from SNMPv2-MIB::snmpTrapOID.0 PDU) + - oid (string, OID string from SNMPv2-MIB::snmpTrapOID.0 PDU) + - version (string, "1" or "2c" or "3") + - context_name (string, value from v3 trap) + - engine_id (string, value from v3 trap) + - fields: + - Fields are mapped from variables in the trap. Field names are + the trap variable names after MIB lookup. Field values are trap + variable values. + +### Example Output +``` +snmp_trap,mib=SNMPv2-MIB,name=coldStart,oid=.1.3.6.1.6.3.1.1.5.1,source=192.168.122.102,version=2c snmpTrapEnterprise.0="linux",sysUpTimeInstance=1i 1574109187723429814 +snmp_trap,mib=NET-SNMP-AGENT-MIB,name=nsNotifyShutdown,oid=.1.3.6.1.4.1.8072.4.0.2,source=192.168.122.102,version=2c sysUpTimeInstance=5803i,snmpTrapEnterprise.0="netSnmpNotificationPrefix" 1574109186555115459 +``` + +[net-snmp]: http://www.net-snmp.org/ +[man snmpcmd]: http://net-snmp.sourceforge.net/docs/man/snmpcmd.html#lbAK diff --git a/plugins/inputs/snmp_trap/snmp_trap.go b/plugins/inputs/snmp_trap/snmp_trap.go new file mode 100644 index 000000000..dbf0cdbf3 --- /dev/null +++ b/plugins/inputs/snmp_trap/snmp_trap.go @@ -0,0 +1,416 @@ +package snmp_trap + +import ( + "bufio" + "bytes" + "fmt" + "net" + "os/exec" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" + + "github.com/soniah/gosnmp" +) + +var defaultTimeout = internal.Duration{Duration: time.Second * 5} + +type handler func(*gosnmp.SnmpPacket, *net.UDPAddr) +type execer func(internal.Duration, string, ...string) ([]byte, error) + +type mibEntry struct { + mibName string + oidText string +} + +type SnmpTrap struct { + ServiceAddress string `toml:"service_address"` + Timeout internal.Duration `toml:"timeout"` + Version string `toml:"version"` + + // Settings for version 3 + // Values: "noAuthNoPriv", "authNoPriv", "authPriv" + SecLevel string `toml:"sec_level"` + SecName string `toml:"sec_name"` + // Values: "MD5", "SHA", "". Default: "" + AuthProtocol string `toml:"auth_protocol"` + AuthPassword string `toml:"auth_password"` + // Values: "DES", "AES", "". Default: "" + PrivProtocol string `toml:"priv_protocol"` + PrivPassword string `toml:"priv_password"` + + acc telegraf.Accumulator + listener *gosnmp.TrapListener + timeFunc func() time.Time + errCh chan error + + makeHandlerWrapper func(handler) handler + + Log telegraf.Logger `toml:"-"` + + cacheLock sync.Mutex + cache map[string]mibEntry + + execCmd execer +} + +var sampleConfig = ` + ## Transport, local address, and port to listen on. Transport must + ## be "udp://". Omit local address to listen on all interfaces. + ## example: "udp://127.0.0.1:1234" + ## + ## Special permissions may be required to listen on a port less than + ## 1024. See README.md for details + ## + # service_address = "udp://:162" + ## Timeout running snmptranslate command + # timeout = "5s" + ## Snmp version, defaults to 2c + # version = "2c" + ## SNMPv3 authentication and encryption options. + ## + ## Security Name. + # sec_name = "myuser" + ## Authentication protocol; one of "MD5", "SHA" or "". + # auth_protocol = "MD5" + ## Authentication password. + # auth_password = "pass" + ## Security Level; one of "noAuthNoPriv", "authNoPriv", or "authPriv". + # sec_level = "authNoPriv" + ## Privacy protocol used for encrypted messages; one of "DES", "AES", "AES192", "AES192C", "AES256", "AES256C" or "". + # priv_protocol = "" + ## Privacy password used for encrypted messages. + # priv_password = "" +` + +func (s *SnmpTrap) SampleConfig() string { + return sampleConfig +} + +func (s *SnmpTrap) Description() string { + return "Receive SNMP traps" +} + +func (s *SnmpTrap) Gather(_ telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("snmp_trap", func() telegraf.Input { + return &SnmpTrap{ + timeFunc: time.Now, + ServiceAddress: "udp://:162", + Timeout: defaultTimeout, + Version: "2c", + } + }) +} + +func realExecCmd(Timeout internal.Duration, arg0 string, args ...string) ([]byte, error) { + cmd := exec.Command(arg0, args...) + var out bytes.Buffer + cmd.Stdout = &out + err := internal.RunTimeout(cmd, Timeout.Duration) + if err != nil { + return nil, err + } + return out.Bytes(), nil +} + +func (s *SnmpTrap) Init() error { + s.cache = map[string]mibEntry{} + s.execCmd = realExecCmd + return nil +} + +func (s *SnmpTrap) Start(acc telegraf.Accumulator) error { + s.acc = acc + s.listener = gosnmp.NewTrapListener() + s.listener.OnNewTrap = makeTrapHandler(s) + s.listener.Params = gosnmp.Default + + switch s.Version { + case "3": + s.listener.Params.Version = gosnmp.Version3 + case "2c": + s.listener.Params.Version = gosnmp.Version2c + case "1": + s.listener.Params.Version = gosnmp.Version1 + default: + s.listener.Params.Version = gosnmp.Version2c + } + + if s.listener.Params.Version == gosnmp.Version3 { + s.listener.Params.SecurityModel = gosnmp.UserSecurityModel + + switch strings.ToLower(s.SecLevel) { + case "noauthnopriv", "": + s.listener.Params.MsgFlags = gosnmp.NoAuthNoPriv + case "authnopriv": + s.listener.Params.MsgFlags = gosnmp.AuthNoPriv + case "authpriv": + s.listener.Params.MsgFlags = gosnmp.AuthPriv + default: + return fmt.Errorf("unknown security level '%s'", s.SecLevel) + } + + var authenticationProtocol gosnmp.SnmpV3AuthProtocol + switch strings.ToLower(s.AuthProtocol) { + case "md5": + authenticationProtocol = gosnmp.MD5 + case "sha": + authenticationProtocol = gosnmp.SHA + //case "sha224": + // authenticationProtocol = gosnmp.SHA224 + //case "sha256": + // authenticationProtocol = gosnmp.SHA256 + //case "sha384": + // authenticationProtocol = gosnmp.SHA384 + //case "sha512": + // authenticationProtocol = gosnmp.SHA512 + case "": + authenticationProtocol = gosnmp.NoAuth + default: + return fmt.Errorf("unknown authentication protocol '%s'", s.AuthProtocol) + } + + var privacyProtocol gosnmp.SnmpV3PrivProtocol + switch strings.ToLower(s.PrivProtocol) { + case "aes": + privacyProtocol = gosnmp.AES + case "des": + privacyProtocol = gosnmp.DES + case "aes192": + privacyProtocol = gosnmp.AES192 + case "aes192c": + privacyProtocol = gosnmp.AES192C + case "aes256": + privacyProtocol = gosnmp.AES256 + case "aes256c": + privacyProtocol = gosnmp.AES256C + case "": + privacyProtocol = gosnmp.NoPriv + default: + return fmt.Errorf("unknown privacy protocol '%s'", s.PrivProtocol) + } + + s.listener.Params.SecurityParameters = &gosnmp.UsmSecurityParameters{ + UserName: s.SecName, + PrivacyProtocol: privacyProtocol, + PrivacyPassphrase: s.PrivPassword, + AuthenticationPassphrase: s.AuthPassword, + AuthenticationProtocol: authenticationProtocol, + } + + } + + // wrap the handler, used in unit tests + if nil != s.makeHandlerWrapper { + s.listener.OnNewTrap = s.makeHandlerWrapper(s.listener.OnNewTrap) + } + + split := strings.SplitN(s.ServiceAddress, "://", 2) + if len(split) != 2 { + return fmt.Errorf("invalid service address: %s", s.ServiceAddress) + } + + protocol := split[0] + addr := split[1] + + // gosnmp.TrapListener currently supports udp only. For forward + // compatibility, require udp in the service address + if protocol != "udp" { + return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, s.ServiceAddress) + } + + // If (*TrapListener).Listen immediately returns an error we need + // to return it from this function. Use a channel to get it here + // from the goroutine. Buffer one in case Listen returns after + // Listening but before our Close is called. + s.errCh = make(chan error, 1) + go func() { + s.errCh <- s.listener.Listen(addr) + }() + + select { + case <-s.listener.Listening(): + s.Log.Infof("Listening on %s", s.ServiceAddress) + case err := <-s.errCh: + return err + } + + return nil +} + +func (s *SnmpTrap) Stop() { + s.listener.Close() + err := <-s.errCh + if nil != err { + s.Log.Errorf("Error stopping trap listener %v", err) + } +} + +func setTrapOid(tags map[string]string, oid string, e mibEntry) { + tags["oid"] = oid + tags["name"] = e.oidText + tags["mib"] = e.mibName +} + +func makeTrapHandler(s *SnmpTrap) handler { + return func(packet *gosnmp.SnmpPacket, addr *net.UDPAddr) { + tm := s.timeFunc() + fields := map[string]interface{}{} + tags := map[string]string{} + + tags["version"] = packet.Version.String() + tags["source"] = addr.IP.String() + + if packet.Version == gosnmp.Version1 { + // Follow the procedure described in RFC 2576 3.1 to + // translate a v1 trap to v2. + var trapOid string + + if packet.GenericTrap >= 0 && packet.GenericTrap < 6 { + trapOid = ".1.3.6.1.6.3.1.1.5." + strconv.Itoa(packet.GenericTrap+1) + } else if packet.GenericTrap == 6 { + trapOid = packet.Enterprise + ".0." + strconv.Itoa(packet.SpecificTrap) + } + + if trapOid != "" { + e, err := s.lookup(trapOid) + if err != nil { + s.Log.Errorf("Error resolving V1 OID: %v", err) + return + } + setTrapOid(tags, trapOid, e) + } + + if packet.AgentAddress != "" { + tags["agent_address"] = packet.AgentAddress + } + + fields["sysUpTimeInstance"] = packet.Timestamp + } + + for _, v := range packet.Variables { + // Use system mibs to resolve oids. Don't fall back to + // numeric oid because it's not useful enough to the end + // user and can be difficult to translate or remove from + // the database later. + + var value interface{} + + // todo: format the pdu value based on its snmp type and + // the mib's textual convention. The snmp input plugin + // only handles textual convention for ip and mac + // addresses + + switch v.Type { + case gosnmp.ObjectIdentifier: + val, ok := v.Value.(string) + if !ok { + s.Log.Errorf("Error getting value OID") + return + } + + var e mibEntry + var err error + e, err = s.lookup(val) + if nil != err { + s.Log.Errorf("Error resolving value OID: %v", err) + return + } + + value = e.oidText + + // 1.3.6.1.6.3.1.1.4.1.0 is SNMPv2-MIB::snmpTrapOID.0. + // If v.Name is this oid, set a tag of the trap name. + if v.Name == ".1.3.6.1.6.3.1.1.4.1.0" { + setTrapOid(tags, val, e) + continue + } + default: + value = v.Value + } + + e, err := s.lookup(v.Name) + if nil != err { + s.Log.Errorf("Error resolving OID: %v", err) + return + } + + name := e.oidText + + fields[name] = value + } + + if packet.Version == gosnmp.Version3 { + if packet.ContextName != "" { + tags["context_name"] = packet.ContextName + } + if packet.ContextEngineID != "" { + // SNMP RFCs like 3411 and 5343 show engine ID as a hex string + tags["engine_id"] = fmt.Sprintf("%x", packet.ContextEngineID) + } + } + + s.acc.AddFields("snmp_trap", fields, tags, tm) + } +} + +func (s *SnmpTrap) lookup(oid string) (e mibEntry, err error) { + s.cacheLock.Lock() + defer s.cacheLock.Unlock() + var ok bool + if e, ok = s.cache[oid]; !ok { + // cache miss. exec snmptranslate + e, err = s.snmptranslate(oid) + if err == nil { + s.cache[oid] = e + } + return e, err + } + return e, nil +} + +func (s *SnmpTrap) clear() { + s.cacheLock.Lock() + defer s.cacheLock.Unlock() + s.cache = map[string]mibEntry{} +} + +func (s *SnmpTrap) load(oid string, e mibEntry) { + s.cacheLock.Lock() + defer s.cacheLock.Unlock() + s.cache[oid] = e +} + +func (s *SnmpTrap) snmptranslate(oid string) (e mibEntry, err error) { + var out []byte + out, err = s.execCmd(s.Timeout, "snmptranslate", "-Td", "-Ob", "-m", "all", oid) + + if err != nil { + return e, err + } + + scanner := bufio.NewScanner(bytes.NewBuffer(out)) + ok := scanner.Scan() + if err = scanner.Err(); !ok && err != nil { + return e, err + } + + e.oidText = scanner.Text() + + i := strings.Index(e.oidText, "::") + if i == -1 { + return e, fmt.Errorf("not found") + } + e.mibName = e.oidText[:i] + e.oidText = e.oidText[i+2:] + return e, nil +} diff --git a/plugins/inputs/snmp_trap/snmp_trap_test.go b/plugins/inputs/snmp_trap/snmp_trap_test.go new file mode 100644 index 000000000..dcc9b5d68 --- /dev/null +++ b/plugins/inputs/snmp_trap/snmp_trap_test.go @@ -0,0 +1,1320 @@ +package snmp_trap + +import ( + "fmt" + "net" + "strconv" + "strings" + "testing" + "time" + + "github.com/soniah/gosnmp" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/require" +) + +func TestLoad(t *testing.T) { + s := &SnmpTrap{} + require.Nil(t, s.Init()) + + defer s.clear() + s.load( + ".1.3.6.1.6.3.1.1.5.1", + mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + ) + + e, err := s.lookup(".1.3.6.1.6.3.1.1.5.1") + require.NoError(t, err) + require.Equal(t, "SNMPv2-MIB", e.mibName) + require.Equal(t, "coldStart", e.oidText) +} + +func fakeExecCmd(_ internal.Duration, x string, y ...string) ([]byte, error) { + return nil, fmt.Errorf("mock " + x + " " + strings.Join(y, " ")) +} + +func sendTrap(t *testing.T, port uint16, now uint32, trap gosnmp.SnmpTrap, version gosnmp.SnmpVersion, secLevel string, username string, authProto string, authPass string, privProto string, privPass string, contextName string, engineID string) { + var s gosnmp.GoSNMP + + if version == gosnmp.Version3 { + var msgFlags gosnmp.SnmpV3MsgFlags + switch strings.ToLower(secLevel) { + case "noauthnopriv", "": + msgFlags = gosnmp.NoAuthNoPriv + case "authnopriv": + msgFlags = gosnmp.AuthNoPriv + case "authpriv": + msgFlags = gosnmp.AuthPriv + default: + msgFlags = gosnmp.NoAuthNoPriv + } + + var authenticationProtocol gosnmp.SnmpV3AuthProtocol + switch strings.ToLower(authProto) { + case "md5": + authenticationProtocol = gosnmp.MD5 + case "sha": + authenticationProtocol = gosnmp.SHA + //case "sha224": + // authenticationProtocol = gosnmp.SHA224 + //case "sha256": + // authenticationProtocol = gosnmp.SHA256 + //case "sha384": + // authenticationProtocol = gosnmp.SHA384 + //case "sha512": + // authenticationProtocol = gosnmp.SHA512 + case "": + authenticationProtocol = gosnmp.NoAuth + default: + authenticationProtocol = gosnmp.NoAuth + } + + var privacyProtocol gosnmp.SnmpV3PrivProtocol + switch strings.ToLower(privProto) { + case "aes": + privacyProtocol = gosnmp.AES + case "des": + privacyProtocol = gosnmp.DES + case "aes192": + privacyProtocol = gosnmp.AES192 + case "aes192c": + privacyProtocol = gosnmp.AES192C + case "aes256": + privacyProtocol = gosnmp.AES256 + case "aes256c": + privacyProtocol = gosnmp.AES256C + case "": + privacyProtocol = gosnmp.NoPriv + default: + privacyProtocol = gosnmp.NoPriv + } + + sp := &gosnmp.UsmSecurityParameters{ + AuthoritativeEngineID: "1", + AuthoritativeEngineBoots: 1, + AuthoritativeEngineTime: 1, + UserName: username, + PrivacyProtocol: privacyProtocol, + PrivacyPassphrase: privPass, + AuthenticationPassphrase: authPass, + AuthenticationProtocol: authenticationProtocol, + } + s = gosnmp.GoSNMP{ + Port: port, + Version: version, + Timeout: time.Duration(2) * time.Second, + Retries: 1, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + SecurityParameters: sp, + SecurityModel: gosnmp.UserSecurityModel, + MsgFlags: msgFlags, + ContextName: contextName, + ContextEngineID: engineID, + } + } else { + s = gosnmp.GoSNMP{ + Port: port, + Version: version, + Timeout: time.Duration(2) * time.Second, + Retries: 1, + MaxOids: gosnmp.MaxOids, + Target: "127.0.0.1", + Community: "public", + } + } + + err := s.Connect() + if err != nil { + t.Errorf("Connect() err: %v", err) + } + defer s.Conn.Close() + + _, err = s.SendTrap(trap) + if err != nil { + t.Errorf("SendTrap() err: %v", err) + } +} + +func TestReceiveTrap(t *testing.T) { + var now uint32 + now = 123123123 + + var fakeTime time.Time + fakeTime = time.Unix(456456456, 456) + + type entry struct { + oid string + e mibEntry + } + + // If the first pdu isn't type TimeTicks, gosnmp.SendTrap() will + // prepend one with time.Now() + var tests = []struct { + name string + + // send + version gosnmp.SnmpVersion + trap gosnmp.SnmpTrap // include pdus + // V3 auth and priv parameters + secName string // v3 username + secLevel string // v3 security level + authProto string // Auth protocol: "", MD5 or SHA + authPass string // Auth passphrase + privProto string // Priv protocol: "", DES or AES + privPass string // Priv passphrase + + // V3 sender context + contextName string + engineID string + + // receive + entries []entry + metrics []telegraf.Metric + }{ + //ordinary v2c coldStart trap + { + name: "v2c coldStart", + version: gosnmp.Version2c, + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "2c", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //Check that we're not running snmptranslate to look up oids + //when we shouldn't be. This sends and receives a valid trap + //but metric production should fail because the oids aren't in + //the cache and oid lookup is intentionally mocked to fail. + { + name: "missing oid", + version: gosnmp.Version2c, + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{}, //nothing in cache + metrics: []telegraf.Metric{}, + }, + //v1 enterprise specific trap + { + name: "v1 trap enterprise", + version: gosnmp.Version1, + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.2.3.4.5", + Type: gosnmp.OctetString, + Value: "payload", + }, + }, + Enterprise: ".1.2.3", + AgentAddress: "10.20.30.40", + GenericTrap: 6, // enterpriseSpecific + SpecificTrap: 55, + Timestamp: uint(now), + }, + entries: []entry{ + { + ".1.2.3.4.5", + mibEntry{ + "valueMIB", + "valueOID", + }, + }, + { + ".1.2.3.0.55", + mibEntry{ + "enterpriseMIB", + "enterpriseOID", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.2.3.0.55", + "name": "enterpriseOID", + "mib": "enterpriseMIB", + "version": "1", + "source": "127.0.0.1", + "agent_address": "10.20.30.40", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": uint(now), + "valueOID": "payload", + }, + fakeTime, + ), + }, + }, + //v1 generic trap + { + name: "v1 trap generic", + version: gosnmp.Version1, + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.2.3.4.5", + Type: gosnmp.OctetString, + Value: "payload", + }, + }, + Enterprise: ".1.2.3", + AgentAddress: "10.20.30.40", + GenericTrap: 0, //coldStart + SpecificTrap: 0, + Timestamp: uint(now), + }, + entries: []entry{ + { + ".1.2.3.4.5", + mibEntry{ + "valueMIB", + "valueOID", + }, + }, + { + ".1.3.6.1.6.3.1.1.5.1", + mibEntry{ + "coldStartMIB", + "coldStartOID", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStartOID", + "mib": "coldStartMIB", + "version": "1", + "source": "127.0.0.1", + "agent_address": "10.20.30.40", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": uint(now), + "valueOID": "payload", + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart trap no auth and no priv + { + name: "v3 coldStart noAuthNoPriv", + version: gosnmp.Version3, + secName: "noAuthNoPriv", + secLevel: "noAuthNoPriv", + contextName: "foo_context_name", + engineID: "bar_engine_id", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + "context_name": "foo_context_name", + "engine_id": "6261725f656e67696e655f6964", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldstart trap SHA auth and no priv + { + name: "v3 coldStart authShaNoPriv", + version: gosnmp.Version3, + secName: "authShaNoPriv", + secLevel: "authNoPriv", + authProto: "SHA", + authPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + /* + //ordinary v3 coldstart trap SHA224 auth and no priv + { + name: "v3 coldStart authShaNoPriv", + version: gosnmp.Version3, + secName: "authSha224NoPriv", + secLevel: "authNoPriv", + authProto: "SHA224", + authPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldstart trap SHA256 auth and no priv + { + name: "v3 coldStart authSha256NoPriv", + version: gosnmp.Version3, + secName: "authSha256NoPriv", + secLevel: "authNoPriv", + authProto: "SHA256", + authPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldstart trap SHA384 auth and no priv + { + name: "v3 coldStart authSha384NoPriv", + version: gosnmp.Version3, + secName: "authSha384NoPriv", + secLevel: "authNoPriv", + authProto: "SHA384", + authPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldstart trap SHA512 auth and no priv + { + name: "v3 coldStart authShaNoPriv", + version: gosnmp.Version3, + secName: "authSha512NoPriv", + secLevel: "authNoPriv", + authProto: "SHA512", + authPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + },*/ + //ordinary v3 coldstart trap SHA auth and no priv + { + name: "v3 coldStart authShaNoPriv", + version: gosnmp.Version3, + secName: "authShaNoPriv", + secLevel: "authNoPriv", + authProto: "SHA", + authPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldstart trap MD5 auth and no priv + { + name: "v3 coldStart authMD5NoPriv", + version: gosnmp.Version3, + secName: "authMD5NoPriv", + secLevel: "authNoPriv", + authProto: "MD5", + authPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and AES priv + { + name: "v3 coldStart authSHAPrivAES", + version: gosnmp.Version3, + secName: "authSHAPrivAES", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "AES", + privPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and DES priv + { + name: "v3 coldStart authSHAPrivDES", + version: gosnmp.Version3, + secName: "authSHAPrivDES", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "DES", + privPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and AES192 priv + { + name: "v3 coldStart authSHAPrivAES192", + version: gosnmp.Version3, + secName: "authSHAPrivAES192", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "AES192", + privPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and AES192C priv + { + name: "v3 coldStart authSHAPrivAES192C", + version: gosnmp.Version3, + secName: "authSHAPrivAES192C", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "AES192C", + privPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and AES256 priv + { + name: "v3 coldStart authSHAPrivAES256", + version: gosnmp.Version3, + secName: "authSHAPrivAES256", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "AES256", + privPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + //ordinary v3 coldStart SHA trap auth and AES256C priv + { + name: "v3 coldStart authSHAPrivAES256C", + version: gosnmp.Version3, + secName: "authSHAPrivAES256C", + secLevel: "authPriv", + authProto: "SHA", + authPass: "passpass", + privProto: "AES256C", + privPass: "passpass", + trap: gosnmp.SnmpTrap{ + Variables: []gosnmp.SnmpPDU{ + { + Name: ".1.3.6.1.2.1.1.3.0", + Type: gosnmp.TimeTicks, + Value: now, + }, + { + Name: ".1.3.6.1.6.3.1.1.4.1.0", // SNMPv2-MIB::snmpTrapOID.0 + Type: gosnmp.ObjectIdentifier, + Value: ".1.3.6.1.6.3.1.1.5.1", // coldStart + }, + }, + }, + entries: []entry{ + { + oid: ".1.3.6.1.6.3.1.1.4.1.0", + e: mibEntry{ + "SNMPv2-MIB", + "snmpTrapOID.0", + }, + }, + { + oid: ".1.3.6.1.6.3.1.1.5.1", + e: mibEntry{ + "SNMPv2-MIB", + "coldStart", + }, + }, + { + oid: ".1.3.6.1.2.1.1.3.0", + e: mibEntry{ + "UNUSED_MIB_NAME", + "sysUpTimeInstance", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "snmp_trap", // name + map[string]string{ // tags + "oid": ".1.3.6.1.6.3.1.1.5.1", + "name": "coldStart", + "mib": "SNMPv2-MIB", + "version": "3", + "source": "127.0.0.1", + }, + map[string]interface{}{ // fields + "sysUpTimeInstance": now, + }, + fakeTime, + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // We would prefer to specify port 0 and let the network + // stack choose an unused port for us but TrapListener + // doesn't have a way to return the autoselected port. + // Instead, we'll use an unusual port and hope it's + // unused. + const port = 12399 + + // Hook into the trap handler so the test knows when the + // trap has been received + received := make(chan int) + wrap := func(f handler) handler { + return func(p *gosnmp.SnmpPacket, a *net.UDPAddr) { + f(p, a) + received <- 0 + } + } + + // Set up the service input plugin + s := &SnmpTrap{ + ServiceAddress: "udp://:" + strconv.Itoa(port), + makeHandlerWrapper: wrap, + timeFunc: func() time.Time { + return fakeTime + }, + Log: testutil.Logger{}, + Version: tt.version.String(), + SecName: tt.secName, + SecLevel: tt.secLevel, + AuthProtocol: tt.authProto, + AuthPassword: tt.authPass, + PrivProtocol: tt.privProto, + PrivPassword: tt.privPass, + } + require.Nil(t, s.Init()) + var acc testutil.Accumulator + require.Nil(t, s.Start(&acc)) + defer s.Stop() + + // Preload the cache with the oids we'll use in this test + // so snmptranslate and mibs don't need to be installed. + for _, entry := range tt.entries { + s.load(entry.oid, entry.e) + } + + // Don't look up oid with snmptranslate. + s.execCmd = fakeExecCmd + + // Send the trap + sendTrap(t, port, now, tt.trap, tt.version, tt.secLevel, tt.secName, tt.authProto, tt.authPass, tt.privProto, tt.privPass, tt.contextName, tt.engineID) + + // Wait for trap to be received + select { + case <-received: + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for trap to be received") + } + + // Verify plugin output + testutil.RequireMetricsEqual(t, + tt.metrics, acc.GetTelegrafMetrics(), + testutil.SortMetrics()) + }) + } + +} diff --git a/plugins/inputs/socket_listener/README.md b/plugins/inputs/socket_listener/README.md index ff73b1fbb..840b92709 100644 --- a/plugins/inputs/socket_listener/README.md +++ b/plugins/inputs/socket_listener/README.md @@ -25,6 +25,13 @@ This is a sample configuration for the plugin. # service_address = "unix:///tmp/telegraf.sock" # service_address = "unixgram:///tmp/telegraf.sock" + ## Change the file mode bits on unix sockets. These permissions may not be + ## respected by some platforms, to safely restrict write permissions it is best + ## to place the socket into a directory that has previously been created + ## with the desired permissions. + ## ex: socket_mode = "777" + # socket_mode = "" + ## Maximum number of concurrent connections. ## Only applies to stream sockets (e.g. TCP). ## 0 (default) is unlimited. @@ -42,11 +49,11 @@ This is a sample configuration for the plugin. ## Enables client authentication if set. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - ## Maximum socket buffer size in bytes. + ## Maximum socket buffer size (in bytes when no unit specified). ## For stream sockets, once the buffer fills up, the sender will start backing up. ## For datagram sockets, once the buffer fills up, metrics will start dropping. ## Defaults to the OS default. - # read_buffer_size = 65535 + # read_buffer_size = "64KiB" ## Period between keep alive probes. ## Only applies to TCP sockets. @@ -59,6 +66,10 @@ This is a sample configuration for the plugin. ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" + + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + # content_encoding = "identity" ``` ## A Note on UDP OS Buffer Sizes @@ -71,12 +82,13 @@ setting. Instructions on how to adjust these OS settings are available below. -Some OSes (most notably, Linux) place very restricive limits on the performance +Some OSes (most notably, Linux) place very restrictive limits on the performance of UDP protocols. It is _highly_ recommended that you increase these OS limits to at least 8MB before trying to run large amounts of UDP traffic to your instance. 8MB is just a recommendation, and can be adjusted higher. ### Linux + Check the current UDP/IP receive buffer limit & default by typing the following commands: diff --git a/plugins/inputs/socket_listener/socket_listener.go b/plugins/inputs/socket_listener/socket_listener.go index daab84952..d79030f66 100644 --- a/plugins/inputs/socket_listener/socket_listener.go +++ b/plugins/inputs/socket_listener/socket_listener.go @@ -2,18 +2,16 @@ package socket_listener import ( "bufio" + "crypto/tls" "fmt" "io" - "log" "net" "os" + "strconv" "strings" "sync" - "time" - "crypto/tls" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/internal/tls" @@ -38,20 +36,22 @@ type streamSocketListener struct { func (ssl *streamSocketListener) listen() { ssl.connections = map[string]net.Conn{} + wg := sync.WaitGroup{} + for { c, err := ssl.Accept() if err != nil { if !strings.HasSuffix(err.Error(), ": use of closed network connection") { - ssl.AddError(err) + ssl.Log.Error(err.Error()) } break } - if ssl.ReadBufferSize > 0 { + if ssl.ReadBufferSize.Size > 0 { if srb, ok := c.(setReadBufferer); ok { - srb.SetReadBuffer(ssl.ReadBufferSize) + srb.SetReadBuffer(int(ssl.ReadBufferSize.Size)) } else { - log.Printf("W! Unable to set read buffer on a %s socket", ssl.sockType) + ssl.Log.Warnf("Unable to set read buffer on a %s socket", ssl.sockType) } } @@ -65,10 +65,14 @@ func (ssl *streamSocketListener) listen() { ssl.connectionsMtx.Unlock() if err := ssl.setKeepAlive(c); err != nil { - ssl.AddError(fmt.Errorf("unable to configure keep alive (%s): %s", ssl.ServiceAddress, err)) + ssl.Log.Errorf("Unable to configure keep alive %q: %s", ssl.ServiceAddress, err.Error()) } - go ssl.read(c) + wg.Add(1) + go func() { + defer wg.Done() + ssl.read(c) + }() } ssl.connectionsMtx.Lock() @@ -76,6 +80,8 @@ func (ssl *streamSocketListener) listen() { c.Close() } ssl.connectionsMtx.Unlock() + + wg.Wait() } func (ssl *streamSocketListener) setKeepAlive(c net.Conn) error { @@ -105,7 +111,12 @@ func (ssl *streamSocketListener) read(c net.Conn) { defer ssl.removeConnection(c) defer c.Close() - scnr := bufio.NewScanner(c) + decoder, err := internal.NewStreamContentDecoder(ssl.ContentEncoding, c) + if err != nil { + ssl.Log.Error("Read error: %v", err) + } + + scnr := bufio.NewScanner(decoder) for { if ssl.ReadTimeout != nil && ssl.ReadTimeout.Duration > 0 { c.SetReadDeadline(time.Now().Add(ssl.ReadTimeout.Duration)) @@ -113,22 +124,25 @@ func (ssl *streamSocketListener) read(c net.Conn) { if !scnr.Scan() { break } - metrics, err := ssl.Parse(scnr.Bytes()) + + body := scnr.Bytes() + + metrics, err := ssl.Parse(body) if err != nil { - ssl.AddError(fmt.Errorf("unable to parse incoming line: %s", err)) - //TODO rate limit + ssl.Log.Errorf("Unable to parse incoming line: %s", err.Error()) + // TODO rate limit continue } for _, m := range metrics { - ssl.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + ssl.AddMetric(m) } } if err := scnr.Err(); err != nil { if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - log.Printf("D! Timeout in plugin [input.socket_listener]: %s", err) + ssl.Log.Debugf("Timeout in plugin: %s", err.Error()) } else if netErr != nil && !strings.HasSuffix(err.Error(), ": use of closed network connection") { - ssl.AddError(err) + ssl.Log.Error(err.Error()) } } } @@ -136,6 +150,7 @@ func (ssl *streamSocketListener) read(c net.Conn) { type packetSocketListener struct { net.PacketConn *SocketListener + decoder internal.ContentDecoder } func (psl *packetSocketListener) listen() { @@ -144,19 +159,24 @@ func (psl *packetSocketListener) listen() { n, _, err := psl.ReadFrom(buf) if err != nil { if !strings.HasSuffix(err.Error(), ": use of closed network connection") { - psl.AddError(err) + psl.Log.Error(err.Error()) } break } - metrics, err := psl.Parse(buf[:n]) + body, err := psl.decoder.Decode(buf[:n]) if err != nil { - psl.AddError(fmt.Errorf("unable to parse incoming packet: %s", err)) - //TODO rate limit + psl.Log.Errorf("Unable to decode incoming packet: %s", err.Error()) + } + + metrics, err := psl.Parse(body) + if err != nil { + psl.Log.Errorf("Unable to parse incoming packet: %s", err.Error()) + // TODO rate limit continue } for _, m := range metrics { - psl.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + psl.AddMetric(m) } } } @@ -164,11 +184,17 @@ func (psl *packetSocketListener) listen() { type SocketListener struct { ServiceAddress string `toml:"service_address"` MaxConnections int `toml:"max_connections"` - ReadBufferSize int `toml:"read_buffer_size"` + ReadBufferSize internal.Size `toml:"read_buffer_size"` ReadTimeout *internal.Duration `toml:"read_timeout"` KeepAlivePeriod *internal.Duration `toml:"keep_alive_period"` + SocketMode string `toml:"socket_mode"` + ContentEncoding string `toml:"content_encoding"` tlsint.ServerConfig + wg sync.WaitGroup + + Log telegraf.Logger + parsers.Parser telegraf.Accumulator io.Closer @@ -192,6 +218,13 @@ func (sl *SocketListener) SampleConfig() string { # service_address = "unix:///tmp/telegraf.sock" # service_address = "unixgram:///tmp/telegraf.sock" + ## Change the file mode bits on unix sockets. These permissions may not be + ## respected by some platforms, to safely restrict write permissions it is best + ## to place the socket into a directory that has previously been created + ## with the desired permissions. + ## ex: socket_mode = "777" + # socket_mode = "" + ## Maximum number of concurrent connections. ## Only applies to stream sockets (e.g. TCP). ## 0 (default) is unlimited. @@ -209,11 +242,11 @@ func (sl *SocketListener) SampleConfig() string { ## Enables client authentication if set. # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] - ## Maximum socket buffer size in bytes. + ## Maximum socket buffer size (in bytes when no unit specified). ## For stream sockets, once the buffer fills up, the sender will start backing up. ## For datagram sockets, once the buffer fills up, metrics will start dropping. ## Defaults to the OS default. - # read_buffer_size = 65535 + # read_buffer_size = "64KiB" ## Period between keep alive probes. ## Only applies to TCP sockets. @@ -226,6 +259,10 @@ func (sl *SocketListener) SampleConfig() string { ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md # data_format = "influx" + + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + # content_encoding = "identity" ` } @@ -244,34 +281,46 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { return fmt.Errorf("invalid service address: %s", sl.ServiceAddress) } - if spl[0] == "unix" || spl[0] == "unixpacket" || spl[0] == "unixgram" { + protocol := spl[0] + addr := spl[1] + + if protocol == "unix" || protocol == "unixpacket" || protocol == "unixgram" { // no good way of testing for "file does not exist". // Instead just ignore error and blow up when we try to listen, which will // indicate "address already in use" if file existed and we couldn't remove. - os.Remove(spl[1]) + os.Remove(addr) } - switch spl[0] { + switch protocol { case "tcp", "tcp4", "tcp6", "unix", "unixpacket": - var ( - err error - l net.Listener - ) - tlsCfg, err := sl.ServerConfig.TLSConfig() if err != nil { - return nil + return err } + var l net.Listener if tlsCfg == nil { - l, err = net.Listen(spl[0], spl[1]) + l, err = net.Listen(protocol, addr) } else { - l, err = tls.Listen(spl[0], spl[1], tlsCfg) + l, err = tls.Listen(protocol, addr, tlsCfg) } if err != nil { return err } + sl.Log.Infof("Listening on %s://%s", protocol, l.Addr()) + + // Set permissions on socket + if (spl[0] == "unix" || spl[0] == "unixpacket") && sl.SocketMode != "" { + // Convert from octal in string to int + i, err := strconv.ParseUint(sl.SocketMode, 8, 32) + if err != nil { + return err + } + + os.Chmod(spl[1], os.FileMode(uint32(i))) + } + ssl := &streamSocketListener{ Listener: l, SocketListener: sl, @@ -279,44 +328,99 @@ func (sl *SocketListener) Start(acc telegraf.Accumulator) error { } sl.Closer = ssl - go ssl.listen() + sl.wg = sync.WaitGroup{} + sl.wg.Add(1) + go func() { + defer sl.wg.Done() + ssl.listen() + }() case "udp", "udp4", "udp6", "ip", "ip4", "ip6", "unixgram": - pc, err := net.ListenPacket(spl[0], spl[1]) + decoder, err := internal.NewContentDecoder(sl.ContentEncoding) if err != nil { return err } - if sl.ReadBufferSize > 0 { + pc, err := udpListen(protocol, addr) + if err != nil { + return err + } + + // Set permissions on socket + if spl[0] == "unixgram" && sl.SocketMode != "" { + // Convert from octal in string to int + i, err := strconv.ParseUint(sl.SocketMode, 8, 32) + if err != nil { + return err + } + + os.Chmod(spl[1], os.FileMode(uint32(i))) + } + + if sl.ReadBufferSize.Size > 0 { if srb, ok := pc.(setReadBufferer); ok { - srb.SetReadBuffer(sl.ReadBufferSize) + srb.SetReadBuffer(int(sl.ReadBufferSize.Size)) } else { - log.Printf("W! Unable to set read buffer on a %s socket", spl[0]) + sl.Log.Warnf("Unable to set read buffer on a %s socket", protocol) } } + sl.Log.Infof("Listening on %s://%s", protocol, pc.LocalAddr()) + psl := &packetSocketListener{ PacketConn: pc, SocketListener: sl, + decoder: decoder, } sl.Closer = psl - go psl.listen() + sl.wg = sync.WaitGroup{} + sl.wg.Add(1) + go func() { + defer sl.wg.Done() + psl.listen() + }() default: - return fmt.Errorf("unknown protocol '%s' in '%s'", spl[0], sl.ServiceAddress) + return fmt.Errorf("unknown protocol '%s' in '%s'", protocol, sl.ServiceAddress) } - if spl[0] == "unix" || spl[0] == "unixpacket" || spl[0] == "unixgram" { + if protocol == "unix" || protocol == "unixpacket" || protocol == "unixgram" { sl.Closer = unixCloser{path: spl[1], closer: sl.Closer} } return nil } +func udpListen(network string, address string) (net.PacketConn, error) { + switch network { + case "udp", "udp4", "udp6": + var addr *net.UDPAddr + var err error + var ifi *net.Interface + if spl := strings.SplitN(address, "%", 2); len(spl) == 2 { + address = spl[0] + ifi, err = net.InterfaceByName(spl[1]) + if err != nil { + return nil, err + } + } + addr, err = net.ResolveUDPAddr(network, address) + if err != nil { + return nil, err + } + if addr.IP.IsMulticast() { + return net.ListenMulticastUDP(network, ifi, addr) + } + return net.ListenUDP(network, addr) + } + return net.ListenPacket(network, address) +} + func (sl *SocketListener) Stop() { if sl.Closer != nil { sl.Close() sl.Closer = nil } + sl.wg.Wait() } func newSocketListener() *SocketListener { diff --git a/plugins/inputs/socket_listener/socket_listener_test.go b/plugins/inputs/socket_listener/socket_listener_test.go index 4370ac577..a46add15c 100644 --- a/plugins/inputs/socket_listener/socket_listener_test.go +++ b/plugins/inputs/socket_listener/socket_listener_test.go @@ -3,6 +3,7 @@ package socket_listener import ( "bytes" "crypto/tls" + "io" "io/ioutil" "log" "net" @@ -11,7 +12,9 @@ import ( "testing" "time" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/wlog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -22,11 +25,22 @@ var pki = testutil.NewPKI("../../../testutil/pki") // Should be called at the start of the test, and returns a function which should run at the end. func testEmptyLog(t *testing.T) func() { buf := bytes.NewBuffer(nil) - log.SetOutput(buf) + log.SetOutput(wlog.NewWriter(buf)) + + level := wlog.WARN + wlog.SetLevel(level) return func() { log.SetOutput(os.Stderr) - assert.Empty(t, string(buf.Bytes()), "log not empty") + + for { + line, err := buf.ReadBytes('\n') + if err != nil { + assert.Equal(t, io.EOF, err) + break + } + assert.Empty(t, string(line), "log not empty") + } } } @@ -34,6 +48,7 @@ func TestSocketListener_tcp_tls(t *testing.T) { defer testEmptyLog(t)() sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:0" sl.ServerConfig = *pki.TLSServerConfig() @@ -55,9 +70,10 @@ func TestSocketListener_unix_tls(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "socket_listener.TestSocketListener_unix_tls.sock") + sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix_tls.sock") sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "unix://" + sock sl.ServerConfig = *pki.TLSServerConfig() @@ -80,8 +96,9 @@ func TestSocketListener_tcp(t *testing.T) { defer testEmptyLog(t)() sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "tcp://127.0.0.1:0" - sl.ReadBufferSize = 1024 + sl.ReadBufferSize = internal.Size{Size: 1024} acc := &testutil.Accumulator{} err := sl.Start(acc) @@ -98,8 +115,9 @@ func TestSocketListener_udp(t *testing.T) { defer testEmptyLog(t)() sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "udp://127.0.0.1:0" - sl.ReadBufferSize = 1024 + sl.ReadBufferSize = internal.Size{Size: 1024} acc := &testutil.Accumulator{} err := sl.Start(acc) @@ -116,14 +134,15 @@ func TestSocketListener_unix(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "socket_listener.TestSocketListener_unix.sock") + sock := filepath.Join(tmpdir, "sl.TestSocketListener_unix.sock") defer testEmptyLog(t)() os.Create(sock) sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "unix://" + sock - sl.ReadBufferSize = 1024 + sl.ReadBufferSize = internal.Size{Size: 1024} acc := &testutil.Accumulator{} err = sl.Start(acc) @@ -140,14 +159,15 @@ func TestSocketListener_unixgram(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "socket_listener.TestSocketListener_unixgram.sock") + sock := filepath.Join(tmpdir, "sl.TestSocketListener_unixgram.sock") defer testEmptyLog(t)() os.Create(sock) sl := newSocketListener() + sl.Log = testutil.Logger{} sl.ServiceAddress = "unixgram://" + sock - sl.ReadBufferSize = 1024 + sl.ReadBufferSize = internal.Size{Size: 1024} acc := &testutil.Accumulator{} err = sl.Start(acc) @@ -160,16 +180,65 @@ func TestSocketListener_unixgram(t *testing.T) { testSocketListener(t, sl, client) } +func TestSocketListenerDecode_tcp(t *testing.T) { + defer testEmptyLog(t)() + + sl := newSocketListener() + sl.Log = testutil.Logger{} + sl.ServiceAddress = "tcp://127.0.0.1:0" + sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ContentEncoding = "gzip" + + acc := &testutil.Accumulator{} + err := sl.Start(acc) + require.NoError(t, err) + defer sl.Stop() + + client, err := net.Dial("tcp", sl.Closer.(net.Listener).Addr().String()) + require.NoError(t, err) + + testSocketListener(t, sl, client) +} + +func TestSocketListenerDecode_udp(t *testing.T) { + defer testEmptyLog(t)() + + sl := newSocketListener() + sl.Log = testutil.Logger{} + sl.ServiceAddress = "udp://127.0.0.1:0" + sl.ReadBufferSize = internal.Size{Size: 1024} + sl.ContentEncoding = "gzip" + + acc := &testutil.Accumulator{} + err := sl.Start(acc) + require.NoError(t, err) + defer sl.Stop() + + client, err := net.Dial("udp", sl.Closer.(net.PacketConn).LocalAddr().String()) + require.NoError(t, err) + + testSocketListener(t, sl, client) +} + func testSocketListener(t *testing.T, sl *SocketListener, client net.Conn) { - mstr12 := "test,foo=bar v=1i 123456789\ntest,foo=baz v=2i 123456790\n" - mstr3 := "test,foo=zab v=3i 123456791" - client.Write([]byte(mstr12)) - client.Write([]byte(mstr3)) - if _, ok := client.(net.Conn); ok { - // stream connection. needs trailing newline to terminate mstr3 - client.Write([]byte{'\n'}) + mstr12 := []byte("test,foo=bar v=1i 123456789\ntest,foo=baz v=2i 123456790\n") + mstr3 := []byte("test,foo=zab v=3i 123456791\n") + + if sl.ContentEncoding == "gzip" { + encoder, err := internal.NewContentEncoder(sl.ContentEncoding) + require.NoError(t, err) + mstr12, err = encoder.Encode(mstr12) + require.NoError(t, err) + + encoder, err = internal.NewContentEncoder(sl.ContentEncoding) + require.NoError(t, err) + mstr3, err = encoder.Encode(mstr3) + require.NoError(t, err) } + client.Write(mstr12) + client.Write(mstr3) + acc := sl.Accumulator.(*testutil.Accumulator) acc.Wait(3) diff --git a/plugins/inputs/solr/README.md b/plugins/inputs/solr/README.md index 67f4e06ae..458572825 100644 --- a/plugins/inputs/solr/README.md +++ b/plugins/inputs/solr/README.md @@ -16,6 +16,10 @@ Tested from 3.5 to 7.* ## ## specify a list of one or more Solr cores (default - all) # cores = ["main"] + ## + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" ``` ### Example output of gathered metrics: diff --git a/plugins/inputs/solr/solr.go b/plugins/inputs/solr/solr.go index 9b5ce9299..ce44fa086 100644 --- a/plugins/inputs/solr/solr.go +++ b/plugins/inputs/solr/solr.go @@ -28,12 +28,18 @@ const sampleConfig = ` ## specify a list of one or more Solr cores (default - all) # cores = ["main"] + + ## Optional HTTP Basic Auth Credentials + # username = "username" + # password = "pa$$word" ` // Solr is a plugin to read stats from one or many Solr servers type Solr struct { Local bool Servers []string + Username string + Password string HTTPTimeout internal.Duration Cores []string client *http.Client @@ -220,7 +226,7 @@ func addAdminCoresStatusToAcc(acc telegraf.Accumulator, adminCoreStatus *AdminCo func addCoreMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { var coreMetrics map[string]Core if len(mBeansData.SolrMbeans) < 2 { - return fmt.Errorf("no core metric data to unmarshall") + return fmt.Errorf("no core metric data to unmarshal") } if err := json.Unmarshal(mBeansData.SolrMbeans[1], &coreMetrics); err != nil { return err @@ -251,7 +257,7 @@ func addQueryHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansDa var queryMetrics map[string]QueryHandler if len(mBeansData.SolrMbeans) < 4 { - return fmt.Errorf("no query handler metric data to unmarshall") + return fmt.Errorf("no query handler metric data to unmarshal") } if err := json.Unmarshal(mBeansData.SolrMbeans[3], &queryMetrics); err != nil { @@ -326,7 +332,7 @@ func addUpdateHandlerMetricsToAcc(acc telegraf.Accumulator, core string, mBeansD var updateMetrics map[string]UpdateHandler if len(mBeansData.SolrMbeans) < 6 { - return fmt.Errorf("no update handler metric data to unmarshall") + return fmt.Errorf("no update handler metric data to unmarshal") } if err := json.Unmarshal(mBeansData.SolrMbeans[5], &updateMetrics); err != nil { return err @@ -404,7 +410,7 @@ func getInt(unk interface{}) int64 { // Add cache metrics section to accumulator func addCacheMetricsToAcc(acc telegraf.Accumulator, core string, mBeansData *MBeansData, time time.Time) error { if len(mBeansData.SolrMbeans) < 8 { - return fmt.Errorf("no cache metric data to unmarshall") + return fmt.Errorf("no cache metric data to unmarshal") } var cacheMetrics map[string]Cache if err := json.Unmarshal(mBeansData.SolrMbeans[7], &cacheMetrics); err != nil { @@ -471,7 +477,18 @@ func (s *Solr) createHTTPClient() *http.Client { } func (s *Solr) gatherData(url string, v interface{}) error { - r, err := s.client.Get(url) + req, reqErr := http.NewRequest(http.MethodGet, url, nil) + if reqErr != nil { + return reqErr + } + + if s.Username != "" { + req.SetBasicAuth(s.Username, s.Password) + } + + req.Header.Set("User-Agent", internal.ProductToken()) + + r, err := s.client.Do(req) if err != nil { return err } diff --git a/plugins/inputs/sqlserver/README.md b/plugins/inputs/sqlserver/README.md index 766bb95e0..320fee275 100644 --- a/plugins/inputs/sqlserver/README.md +++ b/plugins/inputs/sqlserver/README.md @@ -1,7 +1,7 @@ # SQL Server Input Plugin The `sqlserver` plugin provides metrics for your SQL Server instance. It -currently works with SQL Server versions 2008+. Recorded metrics are +currently works with SQL Server 2008 SP3 and newer. Recorded metrics are lightweight and use Dynamic Management Views supplied by SQL Server. ### Additional Setup: @@ -18,9 +18,21 @@ GRANT VIEW ANY DEFINITION TO [telegraf]; GO ``` +For Azure SQL Database, you require the View Database State permission and can create a user with a password directly in the database. +```sql +CREATE USER [telegraf] WITH PASSWORD = N'mystrongpassword'; +GO +GRANT VIEW DATABASE STATE TO [telegraf]; +GO +``` + ### Configuration: ```toml +[agent] + ## Default data collection interval for all inputs, can be changed as per collection interval needs + interval = "10s" + # Read metrics from Microsoft SQL Server [[inputs.sqlserver]] ## Specify instances to monitor with a list of connection strings. @@ -28,7 +40,8 @@ GO ## By default, the host is localhost, listening on default port, TCP 1433. ## for Windows, the user is the currently running AD user (SSO). ## See https://github.com/denisenkom/go-mssqldb for detailed connection - ## parameters. + ## parameters, in particular, tls connections can be created like so: + ## "encrypt=true;certificate=;hostNameInCertificate=" # servers = [ # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", # ] @@ -36,29 +49,46 @@ GO ## Optional parameter, setting this to 2 will use a new version ## of the collection queries that break compatibility with the original ## dashboards. + ## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB query_version = 2 ## If you are using AzureDB, setting this to true will gather resource utilization metrics # azuredb = false - ## If you would like to exclude some of the metrics queries, list them here - ## Possible choices: + ## Possible queries + ## Version 2: ## - PerformanceCounters ## - WaitStatsCategorized ## - DatabaseIO - ## - DatabaseProperties + ## - ServerProperties + ## - MemoryClerk + ## - Schedulers + ## - SqlRequests + ## - VolumeSpace + ## - Cpu + ## Version 1: + ## - PerformanceCounters + ## - WaitStatsCategorized ## - CPUHistory + ## - DatabaseIO ## - DatabaseSize ## - DatabaseStats + ## - DatabaseProperties ## - MemoryClerk ## - VolumeSpace - exclude_query = [ 'DatabaseIO' ] + ## - PerformanceMetrics + + ## A list of queries to include. If not specified, all the above listed queries are used. + # include_query = [] + + ## A list of queries to explicitly ignore. + exclude_query = [ 'Schedulers' , 'SqlRequests' ] ``` ### Metrics: To provide backwards compatibility, this plugin support two versions of metrics queries. -**Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries are written in such a way as to only gather SQL specific metrics (no disk space or overall CPU related metrics) and they only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. +**Note**: Version 2 queries are not backwards compatible with the old queries. Any dashboards or queries based on the old query format will not work with the new format. The version 2 queries only report raw metrics, no math has been done to calculate deltas. To graph this data you must calculate deltas in your dashboarding software. #### Version 1 (deprecated in 1.6): The original metrics queries provide: @@ -79,7 +109,6 @@ If you are using the original queries all stats have the following tags: #### Version 2: The new (version 2) metrics provide: -- *AzureDB*: AzureDB resource utilization from `sys.dm_db_resource_stats` - *Database IO*: IO stats from `sys.dm_io_virtual_file_stats` - *Memory Clerk*: Memory clerk breakdown from `sys.dm_os_memory_clerks`, most clerks have been given a friendly name. - *Performance Counters*: A select list of performance counters from `sys.dm_os_performance_counters`. Some of the important metrics included: @@ -89,8 +118,34 @@ The new (version 2) metrics provide: - *Memory*: PLE, Page reads/sec, Page writes/sec, + more - *TempDB*: Free space, Version store usage, Active temp tables, temp table creation rate, + more - *Resource Governor*: CPU Usage, Requests/sec, Queued Requests, and Blocked tasks per workload group + more -- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version -- *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the sasme categories used in Query Store. +- *Server properties*: Number of databases in all possible states (online, offline, suspect, etc.), cpu count, physical memory, SQL Server service uptime, and SQL Server version. In the case of Azure SQL relevent properties such as Tier, #Vcores, Memory etc. +- *Wait stats*: Wait time in ms, number of waiting tasks, resource wait time, signal wait time, max wait time in ms, wait type, and wait category. The waits are categorized using the same categories used in Query Store. +- *Schedulers* - This captures sys.dm_os_schedulers. +- *SqlRequests* - This captures a snapshot of dm_exec_requests and + dm_exec_sessions that gives you running requests as well as wait types and + blocking sessions. +- *VolumeSpace* - uses sys.dm_os_volume_stats to get total, used and occupied space on every disk that contains a data or log file. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). It is pointless to run this with high frequency (ie: every 10s), but it won't cause any problem. +- *Cpu* - uses the buffer ring (sys.dm_os_ring_buffers) to get CPU data, the table is updated once per minute. (Note that even if enabled it won't get any data from Azure SQL Database or SQL Managed Instance). + + In order to allow tracking on a per statement basis this query produces a + unique tag for each query. Depending on the database workload, this may + result in a high cardinality series. Reference the FAQ for tips on + [managing series cardinality][cardinality]. +- *Azure Managed Instances* + - Stats from `sys.server_resource_stats`: + - cpu_count + - server_memory + - sku + - engine_edition + - hardware_type + - total_storage_mb + - available_storage_mb + - uptime + - Resource governance stats from sys.dm_instance_resource_governance +- *Azure SQL Database* + - Stats from sys.dm_db_wait_stats + - Resource governance stats from sys.dm_user_db_resource_governance + - Stats from sys.dm_db_resource_stats The following metrics can be used directly, with no delta calculations: - SQLServer:Buffer Manager\Buffer cache hit ratio @@ -130,4 +185,6 @@ The following metrics can be used directly, with no delta calculations: Version 2 queries have the following tags: - `sql_instance`: Physical host and instance name (hostname:instance) +- database_name: For Azure SQLDB, database_name denotes the name of the Azure SQL Database as server name is a logical construct. +[cardinality]: /docs/FAQ.md#user-content-q-how-can-i-manage-series-cardinality diff --git a/plugins/inputs/sqlserver/sqlserver.go b/plugins/inputs/sqlserver/sqlserver.go index 41a8b7ec7..c69a0fb7c 100644 --- a/plugins/inputs/sqlserver/sqlserver.go +++ b/plugins/inputs/sqlserver/sqlserver.go @@ -5,19 +5,21 @@ import ( "sync" "time" + _ "github.com/denisenkom/go-mssqldb" // go-mssqldb initialization "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/plugins/inputs" - - // go-mssqldb initialization - _ "github.com/zensqlmonitor/go-mssqldb" ) // SQLServer struct type SQLServer struct { - Servers []string `toml:"servers"` - QueryVersion int `toml:"query_version"` - AzureDB bool `toml:"azuredb"` - ExcludeQuery []string `toml:"exclude_query"` + Servers []string `toml:"servers"` + QueryVersion int `toml:"query_version"` + AzureDB bool `toml:"azuredb"` + IncludeQuery []string `toml:"include_query"` + ExcludeQuery []string `toml:"exclude_query"` + queries MapQuery + isInitialized bool } // Query struct @@ -30,45 +32,56 @@ type Query struct { // MapQuery type type MapQuery map[string]Query -var queries MapQuery +const defaultServer = "Server=.;app name=telegraf;log=1;" -// Initialized flag -var isInitialized = false +const sampleConfig = ` +## Specify instances to monitor with a list of connection strings. +## All connection parameters are optional. +## By default, the host is localhost, listening on default port, TCP 1433. +## for Windows, the user is the currently running AD user (SSO). +## See https://github.com/denisenkom/go-mssqldb for detailed connection +## parameters, in particular, tls connections can be created like so: +## "encrypt=true;certificate=;hostNameInCertificate=" +# servers = [ +# "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# ] -var defaultServer = "Server=.;app name=telegraf;log=1;" +## Optional parameter, setting this to 2 will use a new version +## of the collection queries that break compatibility with the original +## dashboards. +## Version 2 - is compatible from SQL Server 2012 and later versions and also for SQL Azure DB +query_version = 2 -var sampleConfig = ` - ## Specify instances to monitor with a list of connection strings. - ## All connection parameters are optional. - ## By default, the host is localhost, listening on default port, TCP 1433. - ## for Windows, the user is the currently running AD user (SSO). - ## See https://github.com/denisenkom/go-mssqldb for detailed connection - ## parameters. - # servers = [ - # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", - # ] +## If you are using AzureDB, setting this to true will gather resource utilization metrics +# azuredb = false - ## Optional parameter, setting this to 2 will use a new version - ## of the collection queries that break compatibility with the original - ## dashboards. - query_version = 2 +## Possible queries +## Version 2: +## - PerformanceCounters +## - WaitStatsCategorized +## - DatabaseIO +## - ServerProperties +## - MemoryClerk +## - Schedulers +## - SqlRequests +## - VolumeSpace +## Version 1: +## - PerformanceCounters +## - WaitStatsCategorized +## - CPUHistory +## - DatabaseIO +## - DatabaseSize +## - DatabaseStats +## - DatabaseProperties +## - MemoryClerk +## - VolumeSpace +## - PerformanceMetrics - ## If you are using AzureDB, setting this to true will gather resource utilization metrics - # azuredb = false +## A list of queries to include. If not specified, all the above listed queries are used. +# include_query = [] - ## If you would like to exclude some of the metrics queries, list them here - ## Possible choices: - ## - PerformanceCounters - ## - WaitStatsCategorized - ## - DatabaseIO - ## - DatabaseProperties - ## - CPUHistory - ## - DatabaseSize - ## - DatabaseStats - ## - MemoryClerk - ## - VolumeSpace - ## - PerformanceMetrics - # exclude_query = [ 'DatabaseIO' ] +## A list of queries to explicitly ignore. +exclude_query = [ 'Schedulers' , 'SqlRequests'] ` // SampleConfig return the sample configuration @@ -85,12 +98,13 @@ type scanner interface { Scan(dest ...interface{}) error } -func initQueries(s *SQLServer) { - queries = make(MapQuery) - +func initQueries(s *SQLServer) error { + s.queries = make(MapQuery) + queries := s.queries // If this is an AzureDB instance, grab some extra metrics if s.AzureDB { - queries["AzureDB"] = Query{Script: sqlAzureDB, ResultByRow: true} + queries["AzureDBResourceStats"] = Query{Script: sqlAzureDBResourceStats, ResultByRow: false} + queries["AzureDBResourceGovernance"] = Query{Script: sqlAzureDBResourceGovernance, ResultByRow: false} } // Decide if we want to run version 1 or version 2 queries @@ -100,6 +114,10 @@ func initQueries(s *SQLServer) { queries["DatabaseIO"] = Query{Script: sqlDatabaseIOV2, ResultByRow: false} queries["ServerProperties"] = Query{Script: sqlServerPropertiesV2, ResultByRow: false} queries["MemoryClerk"] = Query{Script: sqlMemoryClerkV2, ResultByRow: false} + queries["Schedulers"] = Query{Script: sqlServerSchedulersV2, ResultByRow: false} + queries["SqlRequests"] = Query{Script: sqlServerRequestsV2, ResultByRow: false} + queries["VolumeSpace"] = Query{Script: sqlServerVolumeSpaceV2, ResultByRow: false} + queries["Cpu"] = Query{Script: sqlServerCpuV2, ResultByRow: false} } else { queries["PerformanceCounters"] = Query{Script: sqlPerformanceCounters, ResultByRow: true} queries["WaitStatsCategorized"] = Query{Script: sqlWaitStatsCategorized, ResultByRow: false} @@ -113,18 +131,29 @@ func initQueries(s *SQLServer) { queries["PerformanceMetrics"] = Query{Script: sqlPerformanceMetrics, ResultByRow: false} } - for _, query := range s.ExcludeQuery { - delete(queries, query) + filterQueries, err := filter.NewIncludeExcludeFilter(s.IncludeQuery, s.ExcludeQuery) + if err != nil { + return err + } + + for query := range queries { + if !filterQueries.Match(query) { + delete(queries, query) + } } // Set a flag so we know that queries have already been initialized - isInitialized = true + s.isInitialized = true + return nil } // Gather collect data from SQL Server func (s *SQLServer) Gather(acc telegraf.Accumulator) error { - if !isInitialized { - initQueries(s) + if !s.isInitialized { + if err := initQueries(s); err != nil { + acc.AddError(err) + return err + } } if len(s.Servers) == 0 { @@ -134,7 +163,7 @@ func (s *SQLServer) Gather(acc telegraf.Accumulator) error { var wg sync.WaitGroup for _, serv := range s.Servers { - for _, query := range queries { + for _, query := range s.queries { wg.Add(1) go func(serv string, query Query) { defer wg.Done() @@ -153,12 +182,6 @@ func (s *SQLServer) gatherServer(server string, query Query, acc telegraf.Accumu if err != nil { return err } - // verify that a connection can be made before making a query - err = conn.Ping() - if err != nil { - // Handle error - return err - } defer conn.Close() // execute query @@ -244,9 +267,11 @@ func init() { // Thanks Bob Ward (http://aka.ms/bobwardms) // and the folks at Stack Overflow (https://github.com/opserver/Opserver/blob/9c89c7e9936b58ad237b30e6f4cc6cd59c406889/Opserver.Core/Data/SQL/SQLInstance.Memory.cs) // for putting most of the memory clerk definitions online! -const sqlMemoryClerkV2 = `DECLARE @SQL NVARCHAR(MAX) = 'SELECT +const sqlMemoryClerkV2 = `SET DEADLOCK_PRIORITY -10; +DECLARE @SQL NVARCHAR(MAX) = 'SELECT "sqlserver_memory_clerks" As [measurement], REPLACE(@@SERVERNAME,"\",":") AS [sql_instance], +DB_NAME() as [database_name], ISNULL(clerk_names.name,mc.type) AS clerk_type, SUM({pages_kb}) AS size_kb FROM @@ -348,56 +373,233 @@ ELSE EXEC(@SQL) ` -const sqlDatabaseIOV2 = `SELECT -'sqlserver_database_io' As [measurement], -REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], -DB_NAME([vfs].[database_id]) [database_name], -vfs.io_stall_read_ms AS read_latency_ms, -vfs.num_of_reads AS reads, -vfs.num_of_bytes_read AS read_bytes, -vfs.io_stall_write_ms AS write_latency_ms, -vfs.num_of_writes AS writes, -vfs.num_of_bytes_written AS write_bytes, -CASE WHEN vfs.file_id = 2 THEN 'LOG' ELSE 'ROWS' END AS file_type -FROM -[sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs -OPTION( RECOMPILE ); +// Conditional check based on Azure SQL DB OR On-prem SQL Server +// EngineEdition=5 is Azure SQL DB +const sqlDatabaseIOV2 = ` +SET DEADLOCK_PRIORITY -10; +DECLARE @SqlStatement AS nvarchar(max); +IF SERVERPROPERTY('EngineEdition') = 5 +BEGIN + SET @SqlStatement = ' + SELECT + ''sqlserver_database_io'' As [measurement] + ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] + ,DB_NAME() as database_name + ,vfs.database_id -- /*needed as tempdb is different for each Azure SQL DB as grouping has to be by logical server + db_name + database_id*/ + ,vfs.file_id + ,vfs.io_stall_read_ms AS read_latency_ms + ,vfs.num_of_reads AS reads + ,vfs.num_of_bytes_read AS read_bytes + ,vfs.io_stall_write_ms AS write_latency_ms + ,vfs.num_of_writes AS writes + ,vfs.num_of_bytes_written AS write_bytes + ,vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] + ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms] + ,CASE + WHEN (vfs.database_id = 0) THEN ''RBPEX'' + ELSE b.logical_filename + END as logical_filename + ,CASE + WHEN (vfs.database_id = 0) THEN ''RBPEX'' + ELSE b.physical_filename + END as physical_filename + ,CASE WHEN vfs.file_id = 2 THEN ''LOG'' ELSE ''DATA'' END AS file_type + ,ISNULL(size,0)/128 AS current_size_mb + ,ISNULL(FILEPROPERTY(b.logical_filename,''SpaceUsed'')/128,0) as space_used_mb + FROM [sys].[dm_io_virtual_file_stats](NULL,NULL) AS vfs + -- needed to get Tempdb file names on Azure SQL DB so you can join appropriately. Without this had a bug where join was only on file_id + LEFT OUTER join + ( + SELECT DB_ID() as database_id, file_id, logical_filename=name COLLATE SQL_Latin1_General_CP1_CI_AS + , physical_filename = physical_name COLLATE SQL_Latin1_General_CP1_CI_AS, size from sys.database_files + where type <> 2 + UNION ALL + SELECT 2 as database_id, file_id, logical_filename = name , physical_filename = physical_name, size + from tempdb.sys.database_files + ) b ON b.database_id = vfs.database_id and b.file_id = vfs.file_id + where vfs.database_id IN (DB_ID(),0,2) + ' + EXEC sp_executesql @SqlStatement + +END +ELSE +BEGIN + + SET @SqlStatement = N' + SELECT + ''sqlserver_database_io'' AS [measurement] + ,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance] + ,DB_NAME(vfs.[database_id]) AS [database_name] + ,COALESCE(mf.[physical_name],''RBPEX'') AS [physical_filename] --RPBEX = Resilient Buffer Pool Extension + ,COALESCE(mf.[name],''RBPEX'') AS [logical_filename] --RPBEX = Resilient Buffer Pool Extension + ,mf.[type_desc] AS [file_type] + ,IIF( RIGHT(vs.[volume_mount_point],1) = ''\'' /*Tag value cannot end with \ */ + ,LEFT(vs.[volume_mount_point],LEN(vs.[volume_mount_point])-1) + ,vs.[volume_mount_point] + ) AS [volume_mount_point] + ,vfs.[io_stall_read_ms] AS [read_latency_ms] + ,vfs.[num_of_reads] AS [reads] + ,vfs.[num_of_bytes_read] AS [read_bytes] + ,vfs.[io_stall_write_ms] AS [write_latency_ms] + ,vfs.[num_of_writes] AS [writes] + ,vfs.[num_of_bytes_written] AS [write_bytes] + ' + + + CASE + WHEN LEFT(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar) ,2) = '11' + /*SQL Server 2012 (ver 11.x) does not have [io_stall_queued_read_ms] and [io_stall_queued_write_ms]*/ + THEN '' + ELSE N',vfs.io_stall_queued_read_ms AS [rg_read_stall_ms] ,vfs.io_stall_queued_write_ms AS [rg_write_stall_ms]' + END + + + N'FROM sys.dm_io_virtual_file_stats(NULL, NULL) AS vfs + INNER JOIN sys.master_files AS mf WITH (NOLOCK) + ON vfs.[database_id] = mf.[database_id] AND vfs.[file_id] = mf.[file_id] + CROSS APPLY sys.dm_os_volume_stats(vfs.[database_id], vfs.[file_id]) AS vs + ' + EXEC sp_executesql @SqlStatement + +END + ` -const sqlServerPropertiesV2 = `DECLARE @sys_info TABLE ( +// Conditional check based on Azure SQL DB, Azure SQL Managed instance OR On-prem SQL Server +// EngineEdition=5 is Azure SQL DB, EngineEdition=8 is Managed instance + +const sqlServerPropertiesV2 = `SET DEADLOCK_PRIORITY -10; +DECLARE @sys_info TABLE ( cpu_count INT, - server_memory INT, + server_memory BIGINT, + sku NVARCHAR(64), + engine_edition SMALLINT, + hardware_type VARCHAR(16), + total_storage_mb BIGINT, + available_storage_mb BIGINT, uptime INT ) -IF OBJECT_ID('master.sys.dm_os_sys_info') IS NOT NULL -BEGIN - INSERT INTO @sys_info ( cpu_count, server_memory, uptime ) - EXEC('SELECT cpu_count, (select total_physical_memory_kb from sys.dm_os_sys_memory) AS physical_memory_kb, DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) FROM sys.dm_os_sys_info') -END +IF SERVERPROPERTY('EngineEdition') = 8 -- Managed Instance + INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) + SELECT TOP(1) + virtual_core_count AS cpu_count, + (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory, + sku, + cast(SERVERPROPERTY('EngineEdition') as smallint) AS engine_edition, + hardware_generation AS hardware_type, + reserved_storage_mb AS total_storage_mb, + (reserved_storage_mb - storage_space_used_mb) AS available_storage_mb, + (select DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) from sys.dm_os_sys_info) as uptime + FROM sys.server_resource_stats + ORDER BY start_time DESC -SELECT -'sqlserver_server_properties' As [measurement], -REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], -SUM( CASE WHEN state = 0 THEN 1 ELSE 0 END ) AS db_online, -SUM( CASE WHEN state = 1 THEN 1 ELSE 0 END ) AS db_restoring, -SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering, -SUM( CASE WHEN state = 3 THEN 1 ELSE 0 END ) AS db_recoveryPending, -SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect, -SUM( CASE WHEN state = 10 THEN 1 ELSE 0 END ) AS db_offline, -MAX( sinfo.cpu_count ) AS cpu_count, -MAX( sinfo.server_memory ) AS server_memory, -MAX( sinfo.uptime ) AS uptime, -SERVERPROPERTY('ProductVersion') AS sql_version -FROM sys.databases -CROSS APPLY ( - SELECT * - FROM @sys_info -) AS sinfo -OPTION( RECOMPILE ); +IF SERVERPROPERTY('EngineEdition') = 5 -- Azure SQL DB + INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) + SELECT TOP(1) + (SELECT count(*) FROM sys.dm_os_schedulers WHERE status = 'VISIBLE ONLINE') AS cpu_count, + (SELECT process_memory_limit_mb FROM sys.dm_os_job_object) AS server_memory, + slo.edition as sku, + cast(SERVERPROPERTY('EngineEdition') as smallint) AS engine_edition, + slo.service_objective AS hardware_type, + cast(DATABASEPROPERTYEX(DB_NAME(),'MaxSizeInBytes') as bigint)/(1024*1024) AS total_storage_mb, + NULL AS available_storage_mb, -- Can we find out storage? + NULL as uptime + FROM sys.databases d + -- sys.databases.database_id may not match current DB_ID on Azure SQL DB + CROSS JOIN sys.database_service_objectives slo + WHERE d.name = DB_NAME() AND slo.database_id = DB_ID() + +ELSE +BEGIN + INSERT INTO @sys_info ( cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime ) + SELECT cpu_count, + (SELECT total_physical_memory_kb FROM sys.dm_os_sys_memory) AS server_memory, + CAST(SERVERPROPERTY('Edition') AS NVARCHAR(64)) as sku, + CAST(SERVERPROPERTY('EngineEdition') as smallint) as engine_edition, + CASE virtual_machine_type_desc + WHEN 'NONE' THEN 'PHYSICAL Machine' + ELSE virtual_machine_type_desc + END AS hardware_type, + NULL, + NULL, + DATEDIFF(MINUTE,sqlserver_start_time,GETDATE()) + FROM sys.dm_os_sys_info +END +SELECT 'sqlserver_server_properties' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name], + s.cpu_count, + s.server_memory, + s.sku, + s.engine_edition, + s.hardware_type, + s.total_storage_mb, + s.available_storage_mb, + s.uptime, + SERVERPROPERTY('ProductVersion') AS sql_version, + db_online, + db_restoring, + db_recovering, + db_recoveryPending, + db_suspect, + db_offline +FROM ( + SELECT SUM( CASE WHEN state = 0 THEN 1 ELSE 0 END ) AS db_online, + SUM( CASE WHEN state = 1 THEN 1 ELSE 0 END ) AS db_restoring, + SUM( CASE WHEN state = 2 THEN 1 ELSE 0 END ) AS db_recovering, + SUM( CASE WHEN state = 3 THEN 1 ELSE 0 END ) AS db_recoveryPending, + SUM( CASE WHEN state = 4 THEN 1 ELSE 0 END ) AS db_suspect, + SUM( CASE WHEN state = 6 or state = 10 THEN 1 ELSE 0 END ) AS db_offline + FROM sys.databases + ) AS dbs + CROSS APPLY ( + SELECT cpu_count, server_memory, sku, engine_edition, hardware_type, total_storage_mb, available_storage_mb, uptime + FROM @sys_info + ) AS s ` -const sqlPerformanceCountersV2 string = ` +//Recommend disabling this by default, but is useful to detect single CPU spikes/bottlenecks +const sqlServerSchedulersV2 string = ` + + + + +SET DEADLOCK_PRIORITY - 10; +DECLARE @SqlStatement AS nvarchar(max); +SET @SqlStatement = N' +SELECT + ''sqlserver_schedulers'' AS [measurement] + ,REPLACE(@@SERVERNAME, ''\'', '':'') AS [sql_instance] + ,DB_NAME() AS [database_name] + ,cast(s.[scheduler_id] AS VARCHAR(4)) AS [scheduler_id] + ,cast(s.[cpu_id] AS VARCHAR(4)) AS [cpu_id] + ,s.[is_online] + ,s.[is_idle] + ,s.[preemptive_switches_count] + ,s.[context_switches_count] + ,s.[current_tasks_count] + ,s.[runnable_tasks_count] + ,s.[current_workers_count] + ,s.[active_workers_count] + ,s.[work_queue_count] + ,s.[pending_disk_io_count] + ,s.[load_factor] + ,s.[yield_count] + ' + + + CASE + WHEN CAST(LEFT(CAST(SERVERPROPERTY('ProductVersion') AS nvarchar) ,2) AS int) >= 13 + /*Only from SQL Server 2016+ (ver 13.x) [total_cpu_usage_ms] and [total_scheduler_delay_ms]*/ + THEN N',s.[total_cpu_usage_ms], s.[total_scheduler_delay_ms]' + ELSE '' + END + + +N' +FROM sys.dm_os_schedulers AS s +' +EXEC sp_executesql @SqlStatement +` + +const sqlPerformanceCountersV2 string = `SET DEADLOCK_PRIORITY -10; DECLARE @PCounters TABLE ( object_name nvarchar(128), @@ -407,115 +609,151 @@ DECLARE @PCounters TABLE cntr_type INT, Primary Key(object_name, counter_name, instance_name) ); -INSERT INTO @PCounters -SELECT DISTINCT - RTrim(spi.object_name) object_name, - RTrim(spi.counter_name) counter_name, - RTrim(spi.instance_name) instance_name, - CAST(spi.cntr_value AS BIGINT) AS cntr_value, - spi.cntr_type -FROM sys.dm_os_performance_counters AS spi -WHERE ( - counter_name IN ( - 'SQL Compilations/sec', - 'SQL Re-Compilations/sec', - 'User Connections', - 'Batch Requests/sec', - 'Logouts/sec', - 'Logins/sec', - 'Processes blocked', - 'Latch Waits/sec', - 'Full Scans/sec', - 'Index Searches/sec', - 'Page Splits/sec', - 'Page Lookups/sec', - 'Page Reads/sec', - 'Page Writes/sec', - 'Readahead Pages/sec', - 'Lazy Writes/sec', - 'Checkpoint Pages/sec', - 'Page life expectancy', - 'Log File(s) Size (KB)', - 'Log File(s) Used Size (KB)', - 'Data File(s) Size (KB)', - 'Transactions/sec', - 'Write Transactions/sec', - 'Active Temp Tables', - 'Temp Tables Creation Rate', - 'Temp Tables For Destruction', - 'Free Space in tempdb (KB)', - 'Version Store Size (KB)', - 'Memory Grants Pending', - 'Free list stalls/sec', - 'Buffer cache hit ratio', - 'Buffer cache hit ratio base', - 'Backup/Restore Throughput/sec', - 'Total Server Memory (KB)', - 'Target Server Memory (KB)' - ) - ) OR ( - instance_name IN ('_Total','Column store object pool') - AND counter_name IN ( - 'Log Flushes/sec', - 'Log Flush Wait Time', - 'Lock Timeouts/sec', - 'Number of Deadlocks/sec', - 'Lock Waits/sec', - 'Latch Waits/sec', - 'Memory broker clerk size', - 'Log Bytes Flushed/sec', - 'Bytes Sent to Replica/sec', - 'Log Send Queue', - 'Bytes Sent to Transport/sec', - 'Sends to Replica/sec', - 'Bytes Sent to Transport/sec', - 'Sends to Transport/sec', - 'Bytes Received from Replica/sec', - 'Receives from Replica/sec', - 'Flow Control Time (ms/sec)', - 'Flow Control/sec', - 'Resent Messages/sec', - 'Redone Bytes/sec', - 'XTP Memory Used (KB)' - ) OR ( - counter_name IN ( - 'Log Bytes Received/sec', - 'Log Apply Pending Queue', - 'Redone Bytes/sec', - 'Recovery Queue', - 'Log Apply Ready Queue' - ) - AND instance_name = '_Total' - ) - ) OR ( - counter_name IN ('Transaction Delay') - ) OR ( - counter_name IN ( - 'CPU usage %', - 'CPU usage % base', - 'Queued requests', - 'Requests completed/sec', - 'Blocked tasks' - ) - ) OR ( - counter_name IN ( - 'Active memory grant amount (KB)', - 'Disk Read Bytes/sec', - 'Disk Read IO Throttled/sec', - 'Disk Read IO/sec', - 'Disk Write Bytes/sec', - 'Disk Write IO Throttled/sec', - 'Disk Write IO/sec', - 'Used memory (KB)' - ) - ) OR ( - object_name LIKE '%User Settable%' - OR object_name LIKE '%SQL Errors%' - ) DECLARE @SQL NVARCHAR(MAX) -SET @SQL = REPLACE(' -SELECT +SET @SQL = N'SELECT DISTINCT + RTrim(spi.object_name) object_name, + RTrim(spi.counter_name) counter_name,' + + + CASE + WHEN CAST(SERVERPROPERTY('EngineEdition') AS int) IN (5,8) --- needed to get actual DB Name for SQL DB/ Managed instance + THEN N'CASE WHEN ( + RTRIM(spi.object_name) LIKE ''%:Databases'' + OR RTRIM(spi.object_name) LIKE ''%:Database Replica'' + OR RTRIM(spi.object_name) LIKE ''%:Catalog Metadata'' + OR RTRIM(spi.object_name) LIKE ''%:Query Store'' + OR RTRIM(spi.object_name) LIKE ''%:Columnstore'' + OR RTRIM(spi.object_name) LIKE ''%:Advanced Analytics'') + AND TRY_CONVERT(uniqueidentifier, spi.instance_name) + IS NOT NULL -- for cloud only + THEN ISNULL(d.name,RTRIM(spi.instance_name)) -- Elastic Pools counters exist for all databases but sys.databases only has current DB value + WHEN RTRIM(object_name) LIKE ''%:Availability Replica'' + AND TRY_CONVERT(uniqueidentifier, spi.instance_name) IS NOT NULL -- for cloud only + THEN ISNULL(d.name,RTRIM(spi.instance_name)) + RTRIM(SUBSTRING(spi.instance_name, 37, LEN(spi.instance_name))) + ELSE RTRIM(spi.instance_name) + END AS instance_name,' + ELSE 'RTRIM(spi.instance_name) as instance_name, ' + END + + + 'CAST(spi.cntr_value AS BIGINT) AS cntr_value, + spi.cntr_type + FROM sys.dm_os_performance_counters AS spi ' ++ +CASE + WHEN CAST(SERVERPROPERTY('EngineEdition') AS int) IN (5,8) --- Join is ONLY for managed instance and SQL DB, not for on-prem + THEN CAST(N'LEFT JOIN sys.databases AS d + ON LEFT(spi.instance_name, 36) -- some instance_name values have an additional identifier appended after the GUID + = CASE WHEN -- in SQL DB standalone, physical_database_name for master is the GUID of the user database + d.name = ''master'' AND TRY_CONVERT(uniqueidentifier, d.physical_database_name) IS NOT NULL + THEN d.name + ELSE d.physical_database_name + END ' as NVARCHAR(MAX)) + ELSE N' ' +END + +SET @SQL = @SQL + CAST(N' WHERE ( + counter_name IN ( + ''SQL Compilations/sec'', + ''SQL Re-Compilations/sec'', + ''User Connections'', + ''Batch Requests/sec'', + ''Logouts/sec'', + ''Logins/sec'', + ''Processes blocked'', + ''Latch Waits/sec'', + ''Full Scans/sec'', + ''Index Searches/sec'', + ''Page Splits/sec'', + ''Page lookups/sec'', + ''Page reads/sec'', + ''Page writes/sec'', + ''Readahead pages/sec'', + ''Lazy writes/sec'', + ''Checkpoint pages/sec'', + ''Page life expectancy'', + ''Log File(s) Size (KB)'', + ''Log File(s) Used Size (KB)'', + ''Data File(s) Size (KB)'', + ''Transactions/sec'', + ''Write Transactions/sec'', + ''Active Temp Tables'', + ''Temp Tables Creation Rate'', + ''Temp Tables For Destruction'', + ''Free Space in tempdb (KB)'', + ''Version Store Size (KB)'', + ''Memory Grants Pending'', + ''Memory Grants Outstanding'', + ''Free list stalls/sec'', + ''Buffer cache hit ratio'', + ''Buffer cache hit ratio base'', + ''Backup/Restore Throughput/sec'', + ''Total Server Memory (KB)'', + ''Target Server Memory (KB)'', + ''Log Flushes/sec'', + ''Log Flush Wait Time'', + ''Memory broker clerk size'', + ''Log Bytes Flushed/sec'', + ''Bytes Sent to Replica/sec'', + ''Log Send Queue'', + ''Bytes Sent to Transport/sec'', + ''Sends to Replica/sec'', + ''Bytes Sent to Transport/sec'', + ''Sends to Transport/sec'', + ''Bytes Received from Replica/sec'', + ''Receives from Replica/sec'', + ''Flow Control Time (ms/sec)'', + ''Flow Control/sec'', + ''Resent Messages/sec'', + ''Redone Bytes/sec'', + ''XTP Memory Used (KB)'', + ''Transaction Delay'', + ''Log Bytes Received/sec'', + ''Log Apply Pending Queue'', + ''Redone Bytes/sec'', + ''Recovery Queue'', + ''Log Apply Ready Queue'', + ''CPU usage %'', + ''CPU usage % base'', + ''Queued requests'', + ''Requests completed/sec'', + ''Blocked tasks'', + ''Active memory grant amount (KB)'', + ''Disk Read Bytes/sec'', + ''Disk Read IO Throttled/sec'', + ''Disk Read IO/sec'', + ''Disk Write Bytes/sec'', + ''Disk Write IO Throttled/sec'', + ''Disk Write IO/sec'', + ''Used memory (KB)'', + ''Forwarded Records/sec'', + ''Background Writer pages/sec'', + ''Percent Log Used'', + ''Log Send Queue KB'', + ''Redo Queue KB'', + ''Mirrored Write Transactions/sec'', + ''Group Commit Time'', + ''Group Commits/Sec'' + ) + ) OR ( + object_name LIKE ''%User Settable%'' + OR object_name LIKE ''%SQL Errors%'' + ) OR ( + object_name LIKE ''%Batch Resp Statistics%'' + ) OR ( + instance_name IN (''_Total'') + AND counter_name IN ( + ''Lock Timeouts/sec'', + ''Number of Deadlocks/sec'', + ''Lock Waits/sec'', + ''Latch Waits/sec'' + ) + ) +' as NVARCHAR(MAX)) +INSERT INTO @PCounters +EXEC (@SQL) + + +SET @SQL = REPLACE('SELECT "SQLServer:Workload Group Stats" AS object, counter, instance, @@ -523,13 +761,13 @@ CAST(vs.value AS BIGINT) AS value, 1 FROM ( - SELECT + SELECT rgwg.name AS instance, rgwg.total_request_count AS "Request Count", rgwg.total_queued_request_count AS "Queued Request Count", rgwg.total_cpu_limit_violation_count AS "CPU Limit Violation Count", rgwg.total_cpu_usage_ms AS "CPU Usage (time)", - ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN 'rgwg.total_cpu_usage_preemptive_ms AS "Premptive CPU Usage (time)",' ELSE '' END + ' + ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN 'rgwg.total_cpu_usage_preemptive_ms AS "Preemptive CPU Usage (time)",' ELSE '' END + ' rgwg.total_lock_wait_count AS "Lock Wait Count", rgwg.total_lock_wait_time_ms AS "Lock Wait Time", rgwg.total_reduced_memgrant_count AS "Reduced Memory Grant Count" @@ -538,7 +776,7 @@ FROM ON rgwg.pool_id = rgrp.pool_id ) AS rg UNPIVOT ( - value FOR counter IN ( [Request Count], [Queued Request Count], [CPU Limit Violation Count], [CPU Usage (time)], ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN '[Premptive CPU Usage (time)], ' ELSE '' END + '[Lock Wait Count], [Lock Wait Time], [Reduced Memory Grant Count] ) + value FOR counter IN ( [Request Count], [Queued Request Count], [CPU Limit Violation Count], [CPU Usage (time)], ' + CASE WHEN SERVERPROPERTY('ProductMajorVersion') > 10 THEN '[Preemptive CPU Usage (time)], ' ELSE '' END + '[Lock Wait Count], [Lock Wait Time], [Reduced Memory Grant Count] ) ) AS vs' ,'"','''') @@ -547,6 +785,7 @@ EXEC( @SQL ) SELECT 'sqlserver_performance' AS [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name], pc.object_name AS [object], pc.counter_name AS [counter], CASE pc.instance_name WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.instance_name,'') END AS [instance], @@ -564,9 +803,14 @@ WHERE pc.counter_name NOT LIKE '% base' OPTION(RECOMPILE); ` -const sqlWaitStatsCategorizedV2 string = `SELECT -'sqlserver_waitstats' AS [measurement], +// Conditional check based on Azure SQL DB v/s the rest aka (Azure SQL Managed instance OR On-prem SQL Server) +// EngineEdition=5 is Azure SQL DB +const sqlWaitStatsCategorizedV2 string = `SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') != 5 +SELECT + 'sqlserver_waitstats' AS [measurement], REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], +DB_NAME() as [database_name], ws.wait_type, wait_time_ms, wait_time_ms - signal_wait_time_ms AS [resource_wait_ms], @@ -616,6 +860,7 @@ LEFT OUTER JOIN ( VALUES ('CMEMPARTITIONED','Memory'), ('CMEMTHREAD','Memory'), ('CXPACKET','Parallelism'), +('CXCONSUMER','Parallelism'), ('DBMIRROR_DBM_EVENT','Mirroring'), ('DBMIRROR_DBM_MUTEX','Mirroring'), ('DBMIRROR_EVENTS_QUEUE','Mirroring'), @@ -1090,17 +1335,15 @@ ws.wait_type NOT IN ( N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE', N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', - N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', - N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', N'PARALLEL_REDO_WORKER_WAIT_WORK', N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', - N'PREEMPTIVE_OS_PIPEOPS', N'PREEMPTIVE_OS_AUTHENTICATIONOPS', - N'PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', - N'PREEMPTIVE_OS_FILEOPS', N'PREEMPTIVE_OS_DEVICEOPS', N'PREEMPTIVE_OS_QUERYREGISTRY', - N'PREEMPTIVE_OS_WRITEFILE', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', @@ -1112,45 +1355,283 @@ ws.wait_type NOT IN ( N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', - N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', N'SQLTRACE_WAIT_ENTRIES', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', - N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT') + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT') AND waiting_tasks_count > 0 -ORDER BY -waiting_tasks_count DESC -OPTION (RECOMPILE); +AND wait_time_ms > 100; + +ELSE + SELECT + 'sqlserver_azuredb_waitstats' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name'], + dbws.wait_type, + dbws.wait_time_ms, + dbws.wait_time_ms - signal_wait_time_ms AS [resource_wait_ms], + dbws.signal_wait_time_ms, + dbws.max_wait_time_ms, + dbws.waiting_tasks_count + FROM + sys.dm_db_wait_stats AS dbws WITH (NOLOCK) + WHERE + dbws.wait_type NOT IN ( + N'BROKER_EVENTHANDLER', N'BROKER_RECEIVE_WAITFOR', N'BROKER_TASK_STOP', + N'BROKER_TO_FLUSH', N'BROKER_TRANSMITTER', N'CHECKPOINT_QUEUE', + N'CHKPT', N'CLR_AUTO_EVENT', N'CLR_MANUAL_EVENT', N'CLR_SEMAPHORE', + N'DBMIRROR_DBM_EVENT', N'DBMIRROR_EVENTS_QUEUE', N'DBMIRROR_WORKER_QUEUE', + N'DBMIRRORING_CMD', N'DIRTY_PAGE_POLL', N'DISPATCHER_QUEUE_SEMAPHORE', + N'EXECSYNC', N'FSAGENT', N'FT_IFTS_SCHEDULER_IDLE_WAIT', N'FT_IFTSHC_MUTEX', + N'HADR_CLUSAPI_CALL', N'HADR_FILESTREAM_IOMGR_IOCOMPLETION', N'HADR_LOGCAPTURE_WAIT', + N'HADR_NOTIFICATION_DEQUEUE', N'HADR_TIMER_TASK', N'HADR_WORK_QUEUE', + N'KSOURCE_WAKEUP', N'LAZYWRITER_SLEEP', N'LOGMGR_QUEUE', + N'MEMORY_ALLOCATION_EXT', N'ONDEMAND_TASK_QUEUE', + N'PARALLEL_REDO_WORKER_WAIT_WORK', + N'PREEMPTIVE_HADR_LEASE_MECHANISM', N'PREEMPTIVE_SP_SERVER_DIAGNOSTICS', + N'PREEMPTIVE_OS_LIBRARYOPS', N'PREEMPTIVE_OS_COMOPS', N'PREEMPTIVE_OS_CRYPTOPS', + N'PREEMPTIVE_OS_PIPEOPS','PREEMPTIVE_OS_GENERICOPS', N'PREEMPTIVE_OS_VERIFYTRUST', + N'PREEMPTIVE_OS_DEVICEOPS', + N'PREEMPTIVE_XE_CALLBACKEXECUTE', N'PREEMPTIVE_XE_DISPATCHER', + N'PREEMPTIVE_XE_GETTARGETSTATE', N'PREEMPTIVE_XE_SESSIONCOMMIT', + N'PREEMPTIVE_XE_TARGETINIT', N'PREEMPTIVE_XE_TARGETFINALIZE', + N'PWAIT_ALL_COMPONENTS_INITIALIZED', N'PWAIT_DIRECTLOGCONSUMER_GETNEXT', + N'QDS_PERSIST_TASK_MAIN_LOOP_SLEEP', + N'QDS_ASYNC_QUEUE', + N'QDS_CLEANUP_STALE_QUERIES_TASK_MAIN_LOOP_SLEEP', N'REQUEST_FOR_DEADLOCK_SEARCH', + N'RESOURCE_QUEUE', N'SERVER_IDLE_CHECK', N'SLEEP_BPOOL_FLUSH', N'SLEEP_DBSTARTUP', + N'SLEEP_DCOMSTARTUP', N'SLEEP_MASTERDBREADY', N'SLEEP_MASTERMDREADY', + N'SLEEP_MASTERUPGRADED', N'SLEEP_MSDBSTARTUP', N'SLEEP_SYSTEMTASK', N'SLEEP_TASK', + N'SLEEP_TEMPDBSTARTUP', N'SNI_HTTP_ACCEPT', N'SP_SERVER_DIAGNOSTICS_SLEEP', + N'SQLTRACE_BUFFER_FLUSH', N'SQLTRACE_INCREMENTAL_FLUSH_SLEEP', + N'SQLTRACE_WAIT_ENTRIES', + N'WAIT_FOR_RESULTS', N'WAITFOR', N'WAITFOR_TASKSHUTDOWN', N'WAIT_XTP_HOST_WAIT', + N'WAIT_XTP_OFFLINE_CKPT_NEW_LOG', N'WAIT_XTP_CKPT_CLOSE', + N'XE_BUFFERMGR_ALLPROCESSED_EVENT', N'XE_DISPATCHER_JOIN', + N'XE_DISPATCHER_WAIT', N'XE_LIVE_TARGET_TVF', N'XE_TIMER_EVENT', + N'SOS_WORK_DISPATCHER','RESERVED_MEMORY_ALLOCATION_EXT') + AND waiting_tasks_count > 0 + AND wait_time_ms > 100; ` -const sqlAzureDB string = `IF OBJECT_ID('sys.dm_db_resource_stats') IS NOT NULL +// Only executed if AzureDB flag is set +const sqlAzureDBResourceStats string = `SET DEADLOCK_PRIORITY -10; +IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB? BEGIN - SELECT TOP(1) - 'sqlserver_azurestats' AS [measurement], - REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], - avg_cpu_percent, - avg_data_io_percent, - avg_log_write_percent, - avg_memory_usage_percent, - xtp_storage_percent, - max_worker_percent, - max_session_percent, - dtu_limit, - avg_login_rate_percent, - end_time - FROM - sys.dm_db_resource_stats WITH (NOLOCK) - ORDER BY - end_time DESC - OPTION (RECOMPILE) + SELECT TOP(1) + 'sqlserver_azure_db_resource_stats' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name], + cast(avg_cpu_percent as float) as avg_cpu_percent, + cast(avg_data_io_percent as float) as avg_data_io_percent, + cast(avg_log_write_percent as float) as avg_log_write_percent, + cast(avg_memory_usage_percent as float) as avg_memory_usage_percent, + cast(xtp_storage_percent as float) as xtp_storage_percent, + cast(max_worker_percent as float) as max_worker_percent, + cast(max_session_percent as float) as max_session_percent, + dtu_limit, + cast(avg_login_rate_percent as float) as avg_login_rate_percent , + end_time, + cast(avg_instance_memory_percent as float) as avg_instance_memory_percent , + cast(avg_instance_cpu_percent as float) as avg_instance_cpu_percent + FROM + sys.dm_db_resource_stats WITH (NOLOCK) + ORDER BY + end_time DESC END +` + +//Only executed if AzureDB Flag is set +const sqlAzureDBResourceGovernance string = ` +IF SERVERPROPERTY('EngineEdition') = 5 -- Is this Azure SQL DB? +SELECT + 'sqlserver_db_resource_governance' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name], + slo_name, + dtu_limit, + max_cpu, + cap_cpu, + instance_cap_cpu, + max_db_memory, + max_db_max_size_in_mb, + db_file_growth_in_mb, + log_size_in_mb, + instance_max_worker_threads, + primary_group_max_workers, + instance_max_log_rate, + primary_min_log_rate, + primary_max_log_rate, + primary_group_min_io, + primary_group_max_io, + primary_group_min_cpu, + primary_group_max_cpu, + primary_pool_max_workers, + pool_max_io, + checkpoint_rate_mbps, + checkpoint_rate_io, + volume_local_iops, + volume_managed_xstore_iops, + volume_external_xstore_iops, + volume_type_local_iops, + volume_type_managed_xstore_iops, + volume_type_external_xstore_iops, + volume_pfs_iops, + volume_type_pfs_iops + FROM + sys.dm_user_db_resource_governance WITH (NOLOCK); ELSE BEGIN - RAISERROR('This does not seem to be an AzureDB instance. Set "azureDB = false" in your telegraf configuration.',16,1) -END` + IF SERVERPROPERTY('EngineEdition') = 8 -- Is this Azure SQL Managed Instance? + SELECT + 'sqlserver_instance_resource_governance' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + instance_cap_cpu, + instance_max_log_rate, + instance_max_worker_threads, + tempdb_log_file_number, + volume_local_iops, + volume_external_xstore_iops, + volume_managed_xstore_iops, + volume_type_local_iops as voltype_local_iops, + volume_type_managed_xstore_iops as voltype_man_xtore_iops, + volume_type_external_xstore_iops as voltype_ext_xtore_iops, + volume_external_xstore_iops as vol_ext_xtore_iops + from + sys.dm_instance_resource_governance + END; +` + +const sqlServerRequestsV2 string = ` +SET NOCOUNT ON; +SELECT blocking_session_id into #blockingSessions FROM sys.dm_exec_requests WHERE blocking_session_id != 0 +create index ix_blockingSessions_1 on #blockingSessions (blocking_session_id) +SELECT + 'sqlserver_requests' AS [measurement], + REPLACE(@@SERVERNAME,'\',':') AS [sql_instance], + DB_NAME() as [database_name], + r.session_id + , r.request_id + , DB_NAME(s.database_id) as session_db_name + , r.status + , r.cpu_time as cpu_time_ms + , r.total_elapsed_time as total_elapsed_time_ms + , r.logical_reads + , r.writes + , r.command + , wait_time as wait_time_ms + , wait_type + , wait_resource + , blocking_session_id + , s.program_name + , s.host_name + , s.nt_user_name + , r.open_transaction_count AS open_transaction + , LEFT (CASE COALESCE(r.transaction_isolation_level, s.transaction_isolation_level) + WHEN 0 THEN '0-Read Committed' + WHEN 1 THEN '1-Read Uncommitted (NOLOCK)' + WHEN 2 THEN '2-Read Committed' + WHEN 3 THEN '3-Repeatable Read' + WHEN 4 THEN '4-Serializable' + WHEN 5 THEN '5-Snapshot' + ELSE CONVERT (varchar(30), r.transaction_isolation_level) + '-UNKNOWN' + END, 30) AS transaction_isolation_level + ,r.granted_query_memory as granted_query_memory_pages + , r.percent_complete + , (SUBSTRING(qt.text, r.statement_start_offset / 2 + 1, + (CASE WHEN r.statement_end_offset = -1 + THEN LEN(CONVERT(NVARCHAR(MAX), qt.text)) * 2 + ELSE r.statement_end_offset + END - r.statement_start_offset) / 2) + ) AS statement_text + , qt.objectid + , QUOTENAME(OBJECT_SCHEMA_NAME(qt.objectid,qt.dbid)) + '.' + QUOTENAME(OBJECT_NAME(qt.objectid,qt.dbid)) as stmt_object_name + , DB_NAME(qt.dbid) stmt_db_name + ,CONVERT(varchar(20),[query_hash],1) as [query_hash] + ,CONVERT(varchar(20),[query_plan_hash],1) as [query_plan_hash] + FROM sys.dm_exec_requests r + LEFT OUTER JOIN sys.dm_exec_sessions s ON (s.session_id = r.session_id) + OUTER APPLY sys.dm_exec_sql_text(sql_handle) AS qt + + WHERE 1=1 + AND (r.session_id IS NOT NULL AND (s.is_user_process = 1 OR r.status COLLATE Latin1_General_BIN NOT IN ('background', 'sleeping'))) + OR (s.session_id IN (SELECT blocking_session_id FROM #blockingSessions)) + OPTION(MAXDOP 1) + +` + +const sqlServerVolumeSpaceV2 string = ` +/* Only for on-prem version of SQL Server +Gets data about disk space, only if the disk is used by SQL Server +EngineEdition: +1 = Personal or Desktop Engine +2 = Standard +3 = Enterprise +4 = Express +5 = SQL Database +6 = SQL Data Warehouse +8 = Managed Instance +*/ +IF SERVERPROPERTY('EngineEdition') NOT IN (5,8) + BEGIN + SELECT DISTINCT + 'sqlserver_volume_space' AS [measurement] + ,SERVERPROPERTY('machinename') AS [server_name] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,IIF( RIGHT(vs.[volume_mount_point],1) = '\' /*Tag value cannot end with \ */ + ,LEFT(vs.[volume_mount_point],LEN(vs.[volume_mount_point])-1) + ,vs.[volume_mount_point] + ) AS [volume_mount_point] + ,vs.[total_bytes] AS [total_space_bytes] + ,vs.[available_bytes] AS [available_space_bytes] + ,vs.[total_bytes] - vs.[available_bytes] AS [used_space_bytes] + FROM + sys.master_files as mf + CROSS APPLY sys.dm_os_volume_stats(mf.database_id, mf.file_id) as vs + END +` + +const sqlServerCpuV2 string = ` +/*The ring buffer has a new value every minute*/ +IF SERVERPROPERTY('EngineEdition') NOT IN (5,8) /*No azure DB and managed instance*/ +BEGIN +SELECT + 'sqlserver_cpu' AS [measurement] + ,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance] + ,[SQLProcessUtilization] AS [sqlserver_process_cpu] + ,[SystemIdle] AS [system_idle_cpu] + ,100 - [SystemIdle] - [SQLProcessUtilization] AS [other_process_cpu] +FROM ( + SELECT TOP 1 + [record_id] + /*,dateadd(ms, (y.[timestamp] - (SELECT CAST([ms_ticks] AS BIGINT) FROM sys.dm_os_sys_info)), GETDATE()) AS [EventTime] --use for check/debug purpose*/ + ,[SQLProcessUtilization] + ,[SystemIdle] + FROM ( + SELECT record.value('(./Record/@id)[1]', 'int') AS [record_id] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS [SystemIdle] + ,record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS [SQLProcessUtilization] + ,[TIMESTAMP] + FROM ( + SELECT [TIMESTAMP] + ,convert(XML, [record]) AS [record] + FROM sys.dm_os_ring_buffers + WHERE [ring_buffer_type] = N'RING_BUFFER_SCHEDULER_MONITOR' + AND [record] LIKE '%%' + ) AS x + ) AS y + ORDER BY record_id DESC +) as z + +END +` // Queries V1 -const sqlPerformanceMetrics string = `SET NOCOUNT ON; +const sqlPerformanceMetrics string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET ARITHABORT ON; SET QUOTED_IDENTIFIER ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED @@ -1243,7 +1724,8 @@ PIVOT(SUM(cntr_value) FOR counter_name IN (' + @ColumnName + ')) AS PVTTable EXEC sp_executesql @DynamicPivotQuery; ` -const sqlMemoryClerk string = `SET NOCOUNT ON; +const sqlMemoryClerk string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; DECLARE @sqlVers numeric(4,2) @@ -1356,7 +1838,8 @@ PIVOT ) as T; ` -const sqlDatabaseSize string = `SET NOCOUNT ON; +const sqlDatabaseSize string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED IF OBJECT_ID('tempdb..#baseline') IS NOT NULL @@ -1449,7 +1932,8 @@ PIVOT(SUM(database_max_size_8k_pages) FOR database_name IN (' + @ColumnName + ') EXEC sp_executesql @DynamicPivotQuery; ` -const sqlDatabaseStats string = `SET NOCOUNT ON; +const sqlDatabaseStats string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; IF OBJECT_ID('tempdb..#baseline') IS NOT NULL @@ -1583,7 +2067,8 @@ PIVOT(SUM(AvgBytesPerWrite) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTab EXEC sp_executesql @DynamicPivotQuery; ` -const sqlDatabaseIO string = `SET NOCOUNT ON; +const sqlDatabaseIO string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; DECLARE @secondsBetween tinyint = 5; DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetween, '00:00:00'), 108); @@ -1697,7 +2182,7 @@ SELECT database_name, num_of_writes_persec FROM #baselinewritten WHERE datafile_type = ''ROWS'' ) as V -PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTabl +PIVOT(SUM(num_of_writes_persec) FOR database_name IN (' + @ColumnName + ')) AS PVTTable UNION ALL SELECT measurement = ''Log (reads/sec)'', servername = REPLACE(@@SERVERNAME, ''\'', '':''), type = ''Database IO'' , ' + @ColumnName + ', Total = ' + @ColumnName2 + ' FROM @@ -1720,7 +2205,8 @@ PIVOT(SUM(num_of_reads_persec) FOR database_name IN (' + @ColumnName + ')) AS PV EXEC sp_executesql @DynamicPivotQuery; ` -const sqlDatabaseProperties string = `SET NOCOUNT ON; +const sqlDatabaseProperties string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET ARITHABORT ON; SET QUOTED_IDENTIFIER ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED @@ -1935,7 +2421,8 @@ PIVOT(SUM(Value) FOR DatabaseName IN (' + @ColumnName + ')) AS PVTTable EXEC sp_executesql @DynamicPivotQuery; ` -const sqlCPUHistory string = `SET NOCOUNT ON; +const sqlCPUHistory string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET ARITHABORT ON; SET QUOTED_IDENTIFIER ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; @@ -1971,7 +2458,8 @@ ORDER BY timestamp_ms Desc ) as T; ` -const sqlPerformanceCounters string = `SET NOCOUNT ON; +const sqlPerformanceCounters string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; IF OBJECT_ID('tempdb..#PCounters') IS NOT NULL DROP TABLE #PCounters CREATE TABLE #PCounters @@ -1990,7 +2478,7 @@ SELECT DISTINCT RTrim(spi.object_name) object_name , spi.cntr_value , spi.cntr_type FROM sys.dm_os_performance_counters spi -WHERE spi.object_name NOT LIKE 'SQLServer:Backup Device%' +WHERE spi.object_name NOT LIKE '%Backup Device%' AND NOT EXISTS (SELECT 1 FROM sys.databases WHERE Name = spi.instance_name); WAITFOR DELAY '00:00:01'; @@ -2012,7 +2500,7 @@ SELECT DISTINCT RTrim(spi.object_name) object_name , spi.cntr_value , spi.cntr_type FROM sys.dm_os_performance_counters spi -WHERE spi.object_name NOT LIKE 'SQLServer:Backup Device%' +WHERE spi.object_name NOT LIKE '%Backup Device%' AND NOT EXISTS (SELECT 1 FROM sys.databases WHERE Name = spi.instance_name); SELECT @@ -2034,10 +2522,10 @@ SELECT -- value , value = CAST(CASE cc.cntr_type When 65792 Then cc.cntr_value -- Count - When 537003264 Then IsNull(Cast(cc.cntr_value as Money) / NullIf(cbc.cntr_value, 0), 0) -- Ratio + When 537003264 Then IsNull(Cast(cc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value, 0), 0) -- Ratio When 272696576 Then cc.cntr_value - pc.cntr_value -- Per Second - When 1073874176 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as Money) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg - When 272696320 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as Money) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg/sec + When 1073874176 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg + When 272696320 Then IsNull(Cast(cc.cntr_value - pc.cntr_value as decimal(19,4)) / NullIf(cbc.cntr_value - pbc.cntr_value, 0), 0) -- Avg/sec When 1073939712 Then cc.cntr_value - pc.cntr_value -- Base Else cc.cntr_value End as bigint) --, currentvalue= CAST(cc.cntr_value as bigint) @@ -2048,7 +2536,7 @@ INNER JOIN #PCounters pc On cc.object_name = pc.object_name And cc.cntr_type = pc.cntr_type LEFT JOIN #CCounters cbc On cc.object_name = cbc.object_name And (Case When cc.counter_name Like '%(ms)' Then Replace(cc.counter_name, ' (ms)',' Base') - When cc.object_name = 'SQLServer:FileTable' Then Replace(cc.counter_name, 'Avg ','') + ' base' + When cc.object_name like '%FileTable' Then Replace(cc.counter_name, 'Avg ','') + ' base' When cc.counter_name = 'Worktables From Cache Ratio' Then 'Worktables From Cache Base' When cc.counter_name = 'Avg. Length of Batched Writes' Then 'Avg. Length of Batched Writes BS' Else cc.counter_name + ' base' @@ -2059,7 +2547,7 @@ LEFT JOIN #CCounters cbc On cc.object_name = cbc.object_name LEFT JOIN #PCounters pbc On pc.object_name = pbc.object_name And pc.instance_name = pbc.instance_name And (Case When pc.counter_name Like '%(ms)' Then Replace(pc.counter_name, ' (ms)',' Base') - When pc.object_name = 'SQLServer:FileTable' Then Replace(pc.counter_name, 'Avg ','') + ' base' + When pc.object_name like '%FileTable' Then Replace(pc.counter_name, 'Avg ','') + ' base' When pc.counter_name = 'Worktables From Cache Ratio' Then 'Worktables From Cache Base' When pc.counter_name = 'Avg. Length of Batched Writes' Then 'Avg. Length of Batched Writes BS' Else pc.counter_name + ' base' @@ -2070,7 +2558,8 @@ IF OBJECT_ID('tempdb..#CCounters') IS NOT NULL DROP TABLE #CCounters; IF OBJECT_ID('tempdb..#PCounters') IS NOT NULL DROP TABLE #PCounters; ` -const sqlWaitStatsCategorized string = `SET NOCOUNT ON; +const sqlWaitStatsCategorized string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED DECLARE @secondsBetween tinyint = 5 DECLARE @delayInterval char(8) = CONVERT(Char(8), DATEADD(SECOND, @secondsBetween, '00:00:00'), 108); @@ -2117,7 +2606,7 @@ VALUES (N'QDS_SHUTDOWN_QUEUE'), (N'HADR_FILESTREAM_IOMGR_IOCOMPLETION'), (N'DIRTY_PAGE_POLL'), (N'DISPATCHER_QUEUE_SEMAPHORE'), (N'EXECSYNC'), (N'FSAGENT'), (N'FT_IFTS_SCHEDULER_IDLE_WAIT'), (N'FT_IFTSHC_MUTEX'), - (N'HADR_CLUSAPI_CALL'), (N'HADR_FILESTREAM_IOMGR_IOCOMPLETIO(N'), + (N'HADR_CLUSAPI_CALL'), (N'HADR_FILESTREAM_IOMGR_IOCOMPLETION'), (N'HADR_LOGCAPTURE_WAIT'), (N'HADR_NOTIFICATION_DEQUEUE'), (N'HADR_TIMER_TASK'), (N'HADR_WORK_QUEUE'), (N'KSOURCE_WAKEUP'), (N'LAZYWRITER_SLEEP'), @@ -2475,7 +2964,8 @@ PIVOT ) as T; ` -const sqlVolumeSpace string = `SET NOCOUNT ON; +const sqlVolumeSpace string = `SET DEADLOCK_PRIORITY -10; +SET NOCOUNT ON; SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; IF OBJECT_ID('tempdb..#volumestats') IS NOT NULL diff --git a/plugins/inputs/sqlserver/sqlserver_test.go b/plugins/inputs/sqlserver/sqlserver_test.go index 063af7595..c92353783 100644 --- a/plugins/inputs/sqlserver/sqlserver_test.go +++ b/plugins/inputs/sqlserver/sqlserver_test.go @@ -6,15 +6,47 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) +func TestSqlServer_QueriesInclusionExclusion(t *testing.T) { + cases := []map[string]interface{}{ + { + "IncludeQuery": []string{}, + "ExcludeQuery": []string{"WaitStatsCategorized", "DatabaseIO", "ServerProperties", "MemoryClerk", "Schedulers", "VolumeSpace", "Cpu"}, + "queries": []string{"PerformanceCounters", "SqlRequests"}, + "queriesTotal": 2, + }, + { + "IncludeQuery": []string{"PerformanceCounters", "SqlRequests"}, + "ExcludeQuery": []string{"SqlRequests", "WaitStatsCategorized", "DatabaseIO", "VolumeSpace", "Cpu"}, + "queries": []string{"PerformanceCounters"}, + "queriesTotal": 1, + }, + } + + for _, test := range cases { + s := SQLServer{ + QueryVersion: 2, + IncludeQuery: test["IncludeQuery"].([]string), + ExcludeQuery: test["ExcludeQuery"].([]string), + } + initQueries(&s) + assert.Equal(t, len(s.queries), test["queriesTotal"].(int)) + for _, query := range test["queries"].([]string) { + assert.Contains(t, s.queries, query) + } + } +} + func TestSqlServer_ParseMetrics(t *testing.T) { var acc testutil.Accumulator - queries = make(MapQuery) + queries := make(MapQuery) queries["PerformanceCounters"] = Query{Script: mockPerformanceCounters, ResultByRow: true} queries["WaitStatsCategorized"] = Query{Script: mockWaitStatsCategorized, ResultByRow: false} queries["CPUHistory"] = Query{Script: mockCPUHistory, ResultByRow: false} @@ -81,6 +113,64 @@ func TestSqlServer_ParseMetrics(t *testing.T) { } } +func TestSqlServer_MultipleInstance(t *testing.T) { + // Invoke Gather() from two separate configurations and + // confirm they don't interfere with each other + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + testServer := "Server=127.0.0.1;Port=1433;User Id=SA;Password=ABCabc01;app name=telegraf;log=1" + s := &SQLServer{ + Servers: []string{testServer}, + ExcludeQuery: []string{"MemoryClerk"}, + } + s2 := &SQLServer{ + Servers: []string{testServer}, + ExcludeQuery: []string{"DatabaseSize"}, + } + + var acc, acc2 testutil.Accumulator + err := s.Gather(&acc) + require.NoError(t, err) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, false) + + err = s2.Gather(&acc2) + require.NoError(t, err) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, true) + + // acc includes size metrics, and excludes memory metrics + assert.False(t, acc.HasMeasurement("Memory breakdown (%)")) + assert.True(t, acc.HasMeasurement("Log size (bytes)")) + + // acc2 includes memory metrics, and excludes size metrics + assert.True(t, acc2.HasMeasurement("Memory breakdown (%)")) + assert.False(t, acc2.HasMeasurement("Log size (bytes)")) +} + +func TestSqlServer_MultipleInit(t *testing.T) { + + s := &SQLServer{} + s2 := &SQLServer{ + ExcludeQuery: []string{"DatabaseSize"}, + } + + initQueries(s) + _, ok := s.queries["DatabaseSize"] + // acc includes size metrics + assert.True(t, ok) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, false) + + initQueries(s2) + _, ok = s2.queries["DatabaseSize"] + // acc2 excludes size metrics + assert.False(t, ok) + assert.Equal(t, s.isInitialized, true) + assert.Equal(t, s2.isInitialized, true) +} + const mockPerformanceMetrics = `measurement;servername;type;Point In Time Recovery;Available physical memory (bytes);Average pending disk IO;Average runnable tasks;Average tasks;Buffer pool rate (bytes/sec);Connection memory per connection (bytes);Memory grant pending;Page File Usage (%);Page lookup per batch request;Page split per batch request;Readahead per page read;Signal wait (%);Sql compilation per batch request;Sql recompilation per batch request;Total target memory ratio Performance metrics;WIN8-DEV;Performance metrics;0;6353158144;0;0;7;2773;415061;0;25;229371;130;10;18;188;52;14` diff --git a/plugins/inputs/stackdriver/README.md b/plugins/inputs/stackdriver/README.md new file mode 100644 index 000000000..6469b259b --- /dev/null +++ b/plugins/inputs/stackdriver/README.md @@ -0,0 +1,162 @@ +# Stackdriver Google Cloud Monitoring Input Plugin + +Query data from Google Cloud Monitoring (formerly Stackdriver) using the +[Cloud Monitoring API v3][stackdriver]. + +This plugin accesses APIs which are [chargeable][pricing]; you might incur +costs. + +### Configuration + +```toml +[[inputs.stackdriver]] + ## GCP Project + project = "erudite-bloom-151019" + + ## Include timeseries that start with the given metric type. + metric_type_prefix_include = [ + "compute.googleapis.com/", + ] + + ## Exclude timeseries that start with the given metric type. + # metric_type_prefix_exclude = [] + + ## Most metrics are updated no more than once per minute; it is recommended + ## to override the agent level interval with a value of 1m or greater. + interval = "1m" + + ## Maximum number of API calls to make per second. The quota for accounts + ## varies, it can be viewed on the API dashboard: + ## https://cloud.google.com/monitoring/quotas#quotas_and_limits + # rate_limit = 14 + + ## The delay and window options control the number of points selected on + ## each gather. When set, metrics are gathered between: + ## start: now() - delay - window + ## end: now() - delay + # + ## Collection delay; if set too low metrics may not yet be available. + # delay = "5m" + # + ## If unset, the window will start at 1m and be updated dynamically to span + ## the time between calls (approximately the length of the plugin interval). + # window = "1m" + + ## TTL for cached list of metric types. This is the maximum amount of time + ## it may take to discover new metrics. + # cache_ttl = "1h" + + ## If true, raw bucket counts are collected for distribution value types. + ## For a more lightweight collection, you may wish to disable and use + ## distribution_aggregation_aligners instead. + # gather_raw_distribution_buckets = true + + ## Aggregate functions to be used for metrics whose value type is + ## distribution. These aggregate values are recorded in in addition to raw + ## bucket counts; if they are enabled. + ## + ## For a list of aligner strings see: + ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner + # distribution_aggregation_aligners = [ + # "ALIGN_PERCENTILE_99", + # "ALIGN_PERCENTILE_95", + # "ALIGN_PERCENTILE_50", + # ] + + ## Filters can be added to reduce the number of time series matched. All + ## functions are supported: starts_with, ends_with, has_substring, and + ## one_of. Only the '=' operator is supported. + ## + ## The logical operators when combining filters are defined statically using + ## the following values: + ## filter ::= {AND } + ## resource_labels ::= {OR } + ## metric_labels ::= {OR } + ## + ## For more details, see https://cloud.google.com/monitoring/api/v3/filters + # + ## Resource labels refine the time series selection with the following expression: + ## resource.labels. = + # [[inputs.stackdriver.filter.resource_labels]] + # key = "instance_name" + # value = 'starts_with("localhost")' + # + ## Metric labels refine the time series selection with the following expression: + ## metric.labels. = + # [[inputs.stackdriver.filter.metric_labels]] + # key = "device_name" + # value = 'one_of("sda", "sdb")' +``` + +#### Authentication + +It is recommended to use a service account to authenticate with the +Stackdriver Monitoring API. [Getting Started with Authentication][auth]. + +### Metrics + +Metrics are created using one of there patterns depending on if the value type +is a scalar value, raw distribution buckets, or aligned bucket values. + +In all cases, the Stackdriver metric type is split on the last component into +the measurement and field: +``` +compute.googleapis.com/instance/disk/read_bytes_count +└────────── measurement ─────────┘ └── field ───┘ +``` + +**Scalar Values:** + +- measurement + - tags: + - resource_labels + - metric_labels + - fields: + - field + + +**Distributions:** + +Distributions are represented by a set of fields along with the bucket values +tagged with the bucket boundary. Buckets are cumulative: each bucket +represents the total number of items less than the `lt` tag. + +- measurement + - tags: + - resource_labels + - metric_labels + - fields: + - field_count + - field_mean + - field_sum_of_squared_deviation + - field_range_min + - field_range_max + ++ measurement + - tags: + - resource_labels + - metric_labels + - lt (less than) + - fields: + - field_bucket + +**Aligned Aggregations:** + +- measurement + - tags: + - resource_labels + - metric_labels + - fields: + - field_alignment_function + +### Troubleshooting + +When Telegraf is ran with `--debug`, detailed information about the performed +queries will be logged. + +### Example Output +``` +``` +[stackdriver]: https://cloud.google.com/monitoring/api/v3/ +[auth]: https://cloud.google.com/docs/authentication/getting-started +[pricing]: https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services diff --git a/plugins/inputs/stackdriver/stackdriver.go b/plugins/inputs/stackdriver/stackdriver.go new file mode 100644 index 000000000..431076743 --- /dev/null +++ b/plugins/inputs/stackdriver/stackdriver.go @@ -0,0 +1,712 @@ +package stackdriver + +import ( + "context" + "fmt" + "math" + "strconv" + "strings" + "sync" + "time" + + monitoring "cloud.google.com/go/monitoring/apiv3" + googlepbduration "github.com/golang/protobuf/ptypes/duration" + googlepbts "github.com/golang/protobuf/ptypes/timestamp" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/limiter" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/inputs" // Imports the Stackdriver Monitoring client package. + "github.com/influxdata/telegraf/selfstat" + "google.golang.org/api/iterator" + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +const ( + defaultRateLimit = 14 + description = "Gather timeseries from Google Cloud Platform v3 monitoring API" + sampleConfig = ` + ## GCP Project + project = "erudite-bloom-151019" + + ## Include timeseries that start with the given metric type. + metric_type_prefix_include = [ + "compute.googleapis.com/", + ] + + ## Exclude timeseries that start with the given metric type. + # metric_type_prefix_exclude = [] + + ## Many metrics are updated once per minute; it is recommended to override + ## the agent level interval with a value of 1m or greater. + interval = "1m" + + ## Maximum number of API calls to make per second. The quota for accounts + ## varies, it can be viewed on the API dashboard: + ## https://cloud.google.com/monitoring/quotas#quotas_and_limits + # rate_limit = 14 + + ## The delay and window options control the number of points selected on + ## each gather. When set, metrics are gathered between: + ## start: now() - delay - window + ## end: now() - delay + # + ## Collection delay; if set too low metrics may not yet be available. + # delay = "5m" + # + ## If unset, the window will start at 1m and be updated dynamically to span + ## the time between calls (approximately the length of the plugin interval). + # window = "1m" + + ## TTL for cached list of metric types. This is the maximum amount of time + ## it may take to discover new metrics. + # cache_ttl = "1h" + + ## If true, raw bucket counts are collected for distribution value types. + ## For a more lightweight collection, you may wish to disable and use + ## distribution_aggregation_aligners instead. + # gather_raw_distribution_buckets = true + + ## Aggregate functions to be used for metrics whose value type is + ## distribution. These aggregate values are recorded in in addition to raw + ## bucket counts; if they are enabled. + ## + ## For a list of aligner strings see: + ## https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#aligner + # distribution_aggregation_aligners = [ + # "ALIGN_PERCENTILE_99", + # "ALIGN_PERCENTILE_95", + # "ALIGN_PERCENTILE_50", + # ] + + ## Filters can be added to reduce the number of time series matched. All + ## functions are supported: starts_with, ends_with, has_substring, and + ## one_of. Only the '=' operator is supported. + ## + ## The logical operators when combining filters are defined statically using + ## the following values: + ## filter ::= {AND } + ## resource_labels ::= {OR } + ## metric_labels ::= {OR } + ## + ## For more details, see https://cloud.google.com/monitoring/api/v3/filters + # + ## Resource labels refine the time series selection with the following expression: + ## resource.labels. = + # [[inputs.stackdriver.filter.resource_labels]] + # key = "instance_name" + # value = 'starts_with("localhost")' + # + ## Metric labels refine the time series selection with the following expression: + ## metric.labels. = + # [[inputs.stackdriver.filter.metric_labels]] + # key = "device_name" + # value = 'one_of("sda", "sdb")' +` +) + +var ( + defaultCacheTTL = internal.Duration{Duration: 1 * time.Hour} + defaultWindow = internal.Duration{Duration: 1 * time.Minute} + defaultDelay = internal.Duration{Duration: 5 * time.Minute} +) + +type ( + // Stackdriver is the Google Stackdriver config info. + Stackdriver struct { + Project string `toml:"project"` + RateLimit int `toml:"rate_limit"` + Window internal.Duration `toml:"window"` + Delay internal.Duration `toml:"delay"` + CacheTTL internal.Duration `toml:"cache_ttl"` + MetricTypePrefixInclude []string `toml:"metric_type_prefix_include"` + MetricTypePrefixExclude []string `toml:"metric_type_prefix_exclude"` + GatherRawDistributionBuckets bool `toml:"gather_raw_distribution_buckets"` + DistributionAggregationAligners []string `toml:"distribution_aggregation_aligners"` + Filter *ListTimeSeriesFilter `toml:"filter"` + + Log telegraf.Logger + + client metricClient + timeSeriesConfCache *timeSeriesConfCache + prevEnd time.Time + } + + // ListTimeSeriesFilter contains resource labels and metric labels + ListTimeSeriesFilter struct { + ResourceLabels []*Label `json:"resource_labels"` + MetricLabels []*Label `json:"metric_labels"` + } + + // Label contains key and value + Label struct { + Key string `toml:"key"` + Value string `toml:"value"` + } + + // TimeSeriesConfCache caches generated timeseries configurations + timeSeriesConfCache struct { + TTL time.Duration + Generated time.Time + TimeSeriesConfs []*timeSeriesConf + } + + // Internal structure which holds our configuration for a particular GCP time + // series. + timeSeriesConf struct { + // The influx measurement name this time series maps to + measurement string + // The prefix to use before any influx field names that we'll write for + // this time series. (Or, if we only decide to write one field name, this + // field just holds the value of the field name.) + fieldKey string + // The GCP API request that we'll use to fetch data for this time series. + listTimeSeriesRequest *monitoringpb.ListTimeSeriesRequest + } + + // stackdriverMetricClient is a metric client for stackdriver + stackdriverMetricClient struct { + log telegraf.Logger + conn *monitoring.MetricClient + + listMetricDescriptorsCalls selfstat.Stat + listTimeSeriesCalls selfstat.Stat + } + + // metricClient is convenient for testing + metricClient interface { + ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) + ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) + Close() error + } + + lockedSeriesGrouper struct { + sync.Mutex + *metric.SeriesGrouper + } +) + +func (g *lockedSeriesGrouper) Add( + measurement string, + tags map[string]string, + tm time.Time, + field string, + fieldValue interface{}, +) error { + g.Lock() + defer g.Unlock() + return g.SeriesGrouper.Add(measurement, tags, tm, field, fieldValue) +} + +// ListMetricDescriptors implements metricClient interface +func (c *stackdriverMetricClient) ListMetricDescriptors( + ctx context.Context, + req *monitoringpb.ListMetricDescriptorsRequest, +) (<-chan *metricpb.MetricDescriptor, error) { + mdChan := make(chan *metricpb.MetricDescriptor, 1000) + + go func() { + c.log.Debugf("List metric descriptor request filter: %s", req.Filter) + defer close(mdChan) + + // Iterate over metric descriptors and send them to buffered channel + mdResp := c.conn.ListMetricDescriptors(ctx, req) + c.listMetricDescriptorsCalls.Incr(1) + for { + mdDesc, mdErr := mdResp.Next() + if mdErr != nil { + if mdErr != iterator.Done { + c.log.Errorf("Failed iterating metric descriptor responses: %q: %v", req.String(), mdErr) + } + break + } + mdChan <- mdDesc + } + }() + + return mdChan, nil +} + +// ListTimeSeries implements metricClient interface +func (c *stackdriverMetricClient) ListTimeSeries( + ctx context.Context, + req *monitoringpb.ListTimeSeriesRequest, +) (<-chan *monitoringpb.TimeSeries, error) { + tsChan := make(chan *monitoringpb.TimeSeries, 1000) + + go func() { + c.log.Debugf("List time series request filter: %s", req.Filter) + defer close(tsChan) + + // Iterate over timeseries and send them to buffered channel + tsResp := c.conn.ListTimeSeries(ctx, req) + c.listTimeSeriesCalls.Incr(1) + for { + tsDesc, tsErr := tsResp.Next() + if tsErr != nil { + if tsErr != iterator.Done { + c.log.Errorf("Failed iterating time series responses: %q: %v", req.String(), tsErr) + } + break + } + tsChan <- tsDesc + } + }() + + return tsChan, nil +} + +// Close implements metricClient interface +func (s *stackdriverMetricClient) Close() error { + return s.conn.Close() +} + +// Description implements telegraf.Input interface +func (s *Stackdriver) Description() string { + return description +} + +// SampleConfig implements telegraf.Input interface +func (s *Stackdriver) SampleConfig() string { + return sampleConfig +} + +// Gather implements telegraf.Input interface +func (s *Stackdriver) Gather(acc telegraf.Accumulator) error { + ctx := context.Background() + + if s.RateLimit == 0 { + s.RateLimit = defaultRateLimit + } + + err := s.initializeStackdriverClient(ctx) + if err != nil { + return err + } + + start, end := s.updateWindow(s.prevEnd) + s.prevEnd = end + + tsConfs, err := s.generatetimeSeriesConfs(ctx, start, end) + if err != nil { + return err + } + + lmtr := limiter.NewRateLimiter(s.RateLimit, time.Second) + defer lmtr.Stop() + + grouper := &lockedSeriesGrouper{ + SeriesGrouper: metric.NewSeriesGrouper(), + } + + var wg sync.WaitGroup + wg.Add(len(tsConfs)) + for _, tsConf := range tsConfs { + <-lmtr.C + go func(tsConf *timeSeriesConf) { + defer wg.Done() + acc.AddError(s.gatherTimeSeries(ctx, grouper, tsConf)) + }(tsConf) + } + wg.Wait() + + for _, metric := range grouper.Metrics() { + acc.AddMetric(metric) + } + + return nil +} + +// Returns the start and end time for the next collection. +func (s *Stackdriver) updateWindow(prevEnd time.Time) (time.Time, time.Time) { + var start time.Time + if s.Window.Duration != 0 { + start = time.Now().Add(-s.Delay.Duration).Add(-s.Window.Duration) + } else if prevEnd.IsZero() { + start = time.Now().Add(-s.Delay.Duration).Add(-defaultWindow.Duration) + } else { + start = prevEnd + } + end := time.Now().Add(-s.Delay.Duration) + return start, end +} + +// Generate filter string for ListTimeSeriesRequest +func (s *Stackdriver) newListTimeSeriesFilter(metricType string) string { + functions := []string{ + "starts_with", + "ends_with", + "has_substring", + "one_of", + } + filterString := fmt.Sprintf(`metric.type = "%s"`, metricType) + if s.Filter == nil { + return filterString + } + + var valueFmt string + if len(s.Filter.ResourceLabels) > 0 { + resourceLabelsFilter := make([]string, len(s.Filter.ResourceLabels)) + for i, resourceLabel := range s.Filter.ResourceLabels { + // check if resource label value contains function + if includeExcludeHelper(resourceLabel.Value, functions, nil) { + valueFmt = `resource.labels.%s = %s` + } else { + valueFmt = `resource.labels.%s = "%s"` + } + resourceLabelsFilter[i] = fmt.Sprintf(valueFmt, resourceLabel.Key, resourceLabel.Value) + } + if len(resourceLabelsFilter) == 1 { + filterString += fmt.Sprintf(" AND %s", resourceLabelsFilter[0]) + } else { + filterString += fmt.Sprintf(" AND (%s)", strings.Join(resourceLabelsFilter, " OR ")) + } + } + + if len(s.Filter.MetricLabels) > 0 { + metricLabelsFilter := make([]string, len(s.Filter.MetricLabels)) + for i, metricLabel := range s.Filter.MetricLabels { + // check if metric label value contains function + if includeExcludeHelper(metricLabel.Value, functions, nil) { + valueFmt = `metric.labels.%s = %s` + } else { + valueFmt = `metric.labels.%s = "%s"` + } + metricLabelsFilter[i] = fmt.Sprintf(valueFmt, metricLabel.Key, metricLabel.Value) + } + if len(metricLabelsFilter) == 1 { + filterString += fmt.Sprintf(" AND %s", metricLabelsFilter[0]) + } else { + filterString += fmt.Sprintf(" AND (%s)", strings.Join(metricLabelsFilter, " OR ")) + } + } + + return filterString +} + +// Create and initialize a timeSeriesConf for a given GCP metric type with +// defaults taken from the gcp_stackdriver plugin configuration. +func (s *Stackdriver) newTimeSeriesConf( + metricType string, startTime, endTime time.Time, +) *timeSeriesConf { + filter := s.newListTimeSeriesFilter(metricType) + interval := &monitoringpb.TimeInterval{ + EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, + StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + } + tsReq := &monitoringpb.ListTimeSeriesRequest{ + Name: monitoring.MetricProjectPath(s.Project), + Filter: filter, + Interval: interval, + } + cfg := &timeSeriesConf{ + measurement: metricType, + fieldKey: "value", + listTimeSeriesRequest: tsReq, + } + + // GCP metric types have at least one slash, but we'll be defensive anyway. + slashIdx := strings.LastIndex(metricType, "/") + if slashIdx > 0 { + cfg.measurement = metricType[:slashIdx] + cfg.fieldKey = metricType[slashIdx+1:] + } + + return cfg +} + +// Change this configuration to query an aggregate by specifying an "aligner". +// In GCP monitoring, "aligning" is aggregation performed *within* a time +// series, to distill a pile of data points down to a single data point for +// some given time period (here, we specify 60s as our time period). This is +// especially useful for scraping GCP "distribution" metric types, whose raw +// data amounts to a ~60 bucket histogram, which is fairly hard to query and +// visualize in the TICK stack. +func (t *timeSeriesConf) initForAggregate(alignerStr string) { + // Check if alignerStr is valid + alignerInt, isValid := monitoringpb.Aggregation_Aligner_value[alignerStr] + if !isValid { + alignerStr = monitoringpb.Aggregation_Aligner_name[alignerInt] + } + aligner := monitoringpb.Aggregation_Aligner(alignerInt) + agg := &monitoringpb.Aggregation{ + AlignmentPeriod: &googlepbduration.Duration{Seconds: 60}, + PerSeriesAligner: aligner, + } + t.fieldKey = t.fieldKey + "_" + strings.ToLower(alignerStr) + t.listTimeSeriesRequest.Aggregation = agg +} + +// IsValid checks timeseriesconf cache validity +func (c *timeSeriesConfCache) IsValid() bool { + return c.TimeSeriesConfs != nil && time.Since(c.Generated) < c.TTL +} + +func (s *Stackdriver) initializeStackdriverClient(ctx context.Context) error { + if s.client == nil { + client, err := monitoring.NewMetricClient(ctx) + if err != nil { + return fmt.Errorf("failed to create stackdriver monitoring client: %v", err) + } + + tags := map[string]string{ + "project_id": s.Project, + } + listMetricDescriptorsCalls := selfstat.Register( + "stackdriver", "list_metric_descriptors_calls", tags) + listTimeSeriesCalls := selfstat.Register( + "stackdriver", "list_timeseries_calls", tags) + + s.client = &stackdriverMetricClient{ + log: s.Log, + conn: client, + listMetricDescriptorsCalls: listMetricDescriptorsCalls, + listTimeSeriesCalls: listTimeSeriesCalls, + } + } + + return nil +} + +func includeExcludeHelper(key string, includes []string, excludes []string) bool { + if len(includes) > 0 { + for _, includeStr := range includes { + if strings.HasPrefix(key, includeStr) { + return true + } + } + return false + } + if len(excludes) > 0 { + for _, excludeStr := range excludes { + if strings.HasPrefix(key, excludeStr) { + return false + } + } + return true + } + return true +} + +// Test whether a particular GCP metric type should be scraped by this plugin +// by checking the plugin name against the configuration's +// "includeMetricTypePrefixes" and "excludeMetricTypePrefixes" +func (s *Stackdriver) includeMetricType(metricType string) bool { + k := metricType + inc := s.MetricTypePrefixInclude + exc := s.MetricTypePrefixExclude + + return includeExcludeHelper(k, inc, exc) +} + +// Generates filter for list metric descriptors request +func (s *Stackdriver) newListMetricDescriptorsFilters() []string { + if len(s.MetricTypePrefixInclude) == 0 { + return nil + } + + metricTypeFilters := make([]string, len(s.MetricTypePrefixInclude)) + for i, metricTypePrefix := range s.MetricTypePrefixInclude { + metricTypeFilters[i] = fmt.Sprintf(`metric.type = starts_with(%q)`, metricTypePrefix) + } + return metricTypeFilters +} + +// Generate a list of timeSeriesConfig structs by making a ListMetricDescriptors +// API request and filtering the result against our configuration. +func (s *Stackdriver) generatetimeSeriesConfs( + ctx context.Context, startTime, endTime time.Time, +) ([]*timeSeriesConf, error) { + if s.timeSeriesConfCache != nil && s.timeSeriesConfCache.IsValid() { + // Update interval for timeseries requests in timeseries cache + interval := &monitoringpb.TimeInterval{ + EndTime: &googlepbts.Timestamp{Seconds: endTime.Unix()}, + StartTime: &googlepbts.Timestamp{Seconds: startTime.Unix()}, + } + for _, timeSeriesConf := range s.timeSeriesConfCache.TimeSeriesConfs { + timeSeriesConf.listTimeSeriesRequest.Interval = interval + } + return s.timeSeriesConfCache.TimeSeriesConfs, nil + } + + ret := []*timeSeriesConf{} + req := &monitoringpb.ListMetricDescriptorsRequest{ + Name: monitoring.MetricProjectPath(s.Project), + } + + filters := s.newListMetricDescriptorsFilters() + if len(filters) == 0 { + filters = []string{""} + } + + for _, filter := range filters { + // Add filter for list metric descriptors if + // includeMetricTypePrefixes is specified, + // this is more efficient than iterating over + // all metric descriptors + req.Filter = filter + mdRespChan, err := s.client.ListMetricDescriptors(ctx, req) + if err != nil { + return nil, err + } + + for metricDescriptor := range mdRespChan { + metricType := metricDescriptor.Type + valueType := metricDescriptor.ValueType + + if filter == "" && !s.includeMetricType(metricType) { + continue + } + + if valueType == metricpb.MetricDescriptor_DISTRIBUTION { + if s.GatherRawDistributionBuckets { + tsConf := s.newTimeSeriesConf(metricType, startTime, endTime) + ret = append(ret, tsConf) + } + for _, alignerStr := range s.DistributionAggregationAligners { + tsConf := s.newTimeSeriesConf(metricType, startTime, endTime) + tsConf.initForAggregate(alignerStr) + ret = append(ret, tsConf) + } + } else { + ret = append(ret, s.newTimeSeriesConf(metricType, startTime, endTime)) + } + } + } + + s.timeSeriesConfCache = &timeSeriesConfCache{ + TimeSeriesConfs: ret, + Generated: time.Now(), + TTL: s.CacheTTL.Duration, + } + + return ret, nil +} + +// Do the work to gather an individual time series. Runs inside a +// timeseries-specific goroutine. +func (s *Stackdriver) gatherTimeSeries( + ctx context.Context, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, +) error { + tsReq := tsConf.listTimeSeriesRequest + + tsRespChan, err := s.client.ListTimeSeries(ctx, tsReq) + if err != nil { + return err + } + + for tsDesc := range tsRespChan { + tags := map[string]string{ + "resource_type": tsDesc.Resource.Type, + } + for k, v := range tsDesc.Resource.Labels { + tags[k] = v + } + for k, v := range tsDesc.Metric.Labels { + tags[k] = v + } + + for _, p := range tsDesc.Points { + ts := time.Unix(p.Interval.EndTime.Seconds, 0) + + if tsDesc.ValueType == metricpb.MetricDescriptor_DISTRIBUTION { + dist := p.Value.GetDistributionValue() + s.addDistribution(dist, tags, ts, grouper, tsConf) + } else { + var value interface{} + + // Types that are valid to be assigned to Value + // See: https://godoc.org/google.golang.org/genproto/googleapis/monitoring/v3#TypedValue + switch tsDesc.ValueType { + case metricpb.MetricDescriptor_BOOL: + value = p.Value.GetBoolValue() + case metricpb.MetricDescriptor_INT64: + value = p.Value.GetInt64Value() + case metricpb.MetricDescriptor_DOUBLE: + value = p.Value.GetDoubleValue() + case metricpb.MetricDescriptor_STRING: + value = p.Value.GetStringValue() + } + + grouper.Add(tsConf.measurement, tags, ts, tsConf.fieldKey, value) + } + } + } + + return nil +} + +// AddDistribution adds metrics from a distribution value type. +func (s *Stackdriver) addDistribution( + metric *distributionpb.Distribution, + tags map[string]string, ts time.Time, grouper *lockedSeriesGrouper, tsConf *timeSeriesConf, +) { + field := tsConf.fieldKey + name := tsConf.measurement + + grouper.Add(name, tags, ts, field+"_count", metric.Count) + grouper.Add(name, tags, ts, field+"_mean", metric.Mean) + grouper.Add(name, tags, ts, field+"_sum_of_squared_deviation", metric.SumOfSquaredDeviation) + + if metric.Range != nil { + grouper.Add(name, tags, ts, field+"_range_min", metric.Range.Min) + grouper.Add(name, tags, ts, field+"_range_max", metric.Range.Max) + } + + linearBuckets := metric.BucketOptions.GetLinearBuckets() + exponentialBuckets := metric.BucketOptions.GetExponentialBuckets() + explicitBuckets := metric.BucketOptions.GetExplicitBuckets() + + var numBuckets int32 + if linearBuckets != nil { + numBuckets = linearBuckets.NumFiniteBuckets + 2 + } else if exponentialBuckets != nil { + numBuckets = exponentialBuckets.NumFiniteBuckets + 2 + } else { + numBuckets = int32(len(explicitBuckets.Bounds)) + 1 + } + + var i int32 + var count int64 + for i = 0; i < numBuckets; i++ { + // The last bucket is the overflow bucket, and includes all values + // greater than the previous bound. + if i == numBuckets-1 { + tags["lt"] = "+Inf" + } else { + var upperBound float64 + if linearBuckets != nil { + upperBound = linearBuckets.Offset + (linearBuckets.Width * float64(i)) + } else if exponentialBuckets != nil { + width := math.Pow(exponentialBuckets.GrowthFactor, float64(i)) + upperBound = exponentialBuckets.Scale * width + } else if explicitBuckets != nil { + upperBound = explicitBuckets.Bounds[i] + } + tags["lt"] = strconv.FormatFloat(upperBound, 'f', -1, 64) + } + + // Add to the cumulative count; trailing buckets with value 0 are + // omitted from the response. + if i < int32(len(metric.BucketCounts)) { + count += metric.BucketCounts[i] + } + grouper.Add(name, tags, ts, field+"_bucket", count) + } +} + +func init() { + f := func() telegraf.Input { + return &Stackdriver{ + CacheTTL: defaultCacheTTL, + RateLimit: defaultRateLimit, + Delay: defaultDelay, + GatherRawDistributionBuckets: true, + DistributionAggregationAligners: []string{}, + } + } + + inputs.Add("stackdriver", f) +} diff --git a/plugins/inputs/stackdriver/stackdriver_test.go b/plugins/inputs/stackdriver/stackdriver_test.go new file mode 100644 index 000000000..348cd497b --- /dev/null +++ b/plugins/inputs/stackdriver/stackdriver_test.go @@ -0,0 +1,1127 @@ +package stackdriver + +import ( + "context" + "testing" + "time" + + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "google.golang.org/genproto/googleapis/api/distribution" + metricpb "google.golang.org/genproto/googleapis/api/metric" + "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +type Call struct { + name string + args []interface{} +} + +type MockStackdriverClient struct { + ListMetricDescriptorsF func(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) + ListTimeSeriesF func(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) + CloseF func() error + + calls []*Call +} + +func (m *MockStackdriverClient) ListMetricDescriptors( + ctx context.Context, + req *monitoringpb.ListMetricDescriptorsRequest, +) (<-chan *metricpb.MetricDescriptor, error) { + call := &Call{name: "ListMetricDescriptors", args: []interface{}{ctx, req}} + m.calls = append(m.calls, call) + return m.ListMetricDescriptorsF(ctx, req) +} + +func (m *MockStackdriverClient) ListTimeSeries( + ctx context.Context, + req *monitoringpb.ListTimeSeriesRequest, +) (<-chan *monitoringpb.TimeSeries, error) { + call := &Call{name: "ListTimeSeries", args: []interface{}{ctx, req}} + m.calls = append(m.calls, call) + return m.ListTimeSeriesF(ctx, req) +} + +func (m *MockStackdriverClient) Close() error { + call := &Call{name: "Close", args: []interface{}{}} + m.calls = append(m.calls, call) + return m.CloseF() +} + +func TestInitAndRegister(t *testing.T) { + expected := &Stackdriver{ + CacheTTL: defaultCacheTTL, + RateLimit: defaultRateLimit, + Delay: defaultDelay, + GatherRawDistributionBuckets: true, + DistributionAggregationAligners: []string{}, + } + require.Equal(t, expected, inputs.Inputs["stackdriver"]()) +} + +func createTimeSeries( + point *monitoringpb.Point, valueType metricpb.MetricDescriptor_ValueType, +) *monitoringpb.TimeSeries { + return &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{Labels: make(map[string]string)}, + Resource: &monitoredres.MonitoredResource{ + Type: "global", + Labels: map[string]string{ + "project_id": "test", + }, + }, + Points: []*monitoringpb.Point{point}, + ValueType: valueType, + } +} + +func TestGather(t *testing.T) { + now := time.Now().Round(time.Second) + tests := []struct { + name string + descriptor *metricpb.MetricDescriptor + timeseries *monitoringpb.TimeSeries + expected []telegraf.Metric + }{ + { + name: "double", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + metricpb.MetricDescriptor_DOUBLE, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage": 42.0, + }, + now), + }, + }, + { + name: "int64", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_INT64, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: 42, + }, + }, + }, + metricpb.MetricDescriptor_INT64, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage": 42, + }, + now), + }, + }, + { + name: "bool", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_BOOL, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_BoolValue{ + BoolValue: true, + }, + }, + }, + metricpb.MetricDescriptor_BOOL, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage": true, + }, + now), + }, + }, + { + name: "string", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_STRING, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_StringValue{ + StringValue: "foo", + }, + }, + }, + metricpb.MetricDescriptor_STRING, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage": "foo", + }, + now), + }, + }, + { + name: "metric labels", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + timeseries: &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Labels: map[string]string{ + "resource_type": "instance", + }, + }, + Resource: &monitoredres.MonitoredResource{ + Type: "global", + Labels: map[string]string{ + "project_id": "test", + }, + }, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + }, + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "instance", + "project_id": "test", + }, + map[string]interface{}{ + "usage": 42.0, + }, + now), + }, + }, + { + name: "linear buckets", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distribution.Distribution{ + Count: 2, + Mean: 2.0, + SumOfSquaredDeviation: 1.0, + Range: &distribution.Distribution_Range{ + Min: 0.0, + Max: 3.0, + }, + BucketCounts: []int64{0, 1, 3, 0}, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_LinearBuckets{ + LinearBuckets: &distribution.Distribution_BucketOptions_Linear{ + NumFiniteBuckets: 2, + Width: 1, + Offset: 1, + }, + }, + }, + }, + }, + }, + }, + metricpb.MetricDescriptor_DISTRIBUTION, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage_count": int64(2), + "usage_range_min": 0.0, + "usage_range_max": 3.0, + "usage_mean": 2.0, + "usage_sum_of_squared_deviation": 1.0, + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "1", + }, + map[string]interface{}{ + "usage_bucket": int64(0), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "2", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "3", + }, + map[string]interface{}{ + "usage_bucket": int64(4), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "+Inf", + }, + map[string]interface{}{ + "usage_bucket": int64(4), + }, + now), + }, + }, + { + name: "exponential buckets", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distribution.Distribution{ + Count: 2, + Mean: 2.0, + SumOfSquaredDeviation: 1.0, + Range: &distribution.Distribution_Range{ + Min: 0.0, + Max: 3.0, + }, + BucketCounts: []int64{0, 1, 3, 0}, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_ExponentialBuckets{ + ExponentialBuckets: &distribution.Distribution_BucketOptions_Exponential{ + NumFiniteBuckets: 2, + GrowthFactor: 2, + Scale: 1, + }, + }, + }, + }, + }, + }, + }, + metricpb.MetricDescriptor_DISTRIBUTION, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage_count": int64(2), + "usage_range_min": 0.0, + "usage_range_max": 3.0, + "usage_mean": 2.0, + "usage_sum_of_squared_deviation": 1.0, + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "1", + }, + map[string]interface{}{ + "usage_bucket": int64(0), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "2", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "4", + }, + map[string]interface{}{ + "usage_bucket": int64(4), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "+Inf", + }, + map[string]interface{}{ + "usage_bucket": int64(4), + }, + now), + }, + }, + { + name: "explicit buckets", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distribution.Distribution{ + Count: 4, + Mean: 2.0, + SumOfSquaredDeviation: 1.0, + Range: &distribution.Distribution_Range{ + Min: 0.0, + Max: 3.0, + }, + BucketCounts: []int64{0, 1, 3}, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{ + Bounds: []float64{1.0, 2.0}, + }, + }, + }, + }, + }, + }, + }, + metricpb.MetricDescriptor_DISTRIBUTION, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage_count": int64(4), + "usage_range_min": 0.0, + "usage_range_max": 3.0, + "usage_mean": 2.0, + "usage_sum_of_squared_deviation": 1.0, + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "1", + }, + map[string]interface{}{ + "usage_bucket": int64(0), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "2", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "+Inf", + }, + map[string]interface{}{ + "usage_bucket": int64(4), + }, + now), + }, + }, + { + name: "implicit buckets are zero", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + timeseries: createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distribution.Distribution{ + Count: 2, + Mean: 2.0, + SumOfSquaredDeviation: 1.0, + Range: &distribution.Distribution_Range{ + Min: 0.0, + Max: 3.0, + }, + BucketCounts: []int64{0, 1}, + BucketOptions: &distribution.Distribution_BucketOptions{ + Options: &distribution.Distribution_BucketOptions_LinearBuckets{ + LinearBuckets: &distribution.Distribution_BucketOptions_Linear{ + NumFiniteBuckets: 2, + Width: 1, + Offset: 1, + }, + }, + }, + }, + }, + }, + }, + metricpb.MetricDescriptor_DISTRIBUTION, + ), + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage_count": int64(2), + "usage_range_min": 0.0, + "usage_range_max": 3.0, + "usage_mean": 2.0, + "usage_sum_of_squared_deviation": 1.0, + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "1", + }, + map[string]interface{}{ + "usage_bucket": int64(0), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "2", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "3", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + "lt": "+Inf", + }, + map[string]interface{}{ + "usage_bucket": int64(1), + }, + now), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + s := &Stackdriver{ + Log: testutil.Logger{}, + Project: "test", + RateLimit: 10, + GatherRawDistributionBuckets: true, + client: &MockStackdriverClient{ + ListMetricDescriptorsF: func(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) { + ch := make(chan *metricpb.MetricDescriptor, 1) + ch <- tt.descriptor + close(ch) + return ch, nil + }, + ListTimeSeriesF: func(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) { + ch := make(chan *monitoringpb.TimeSeries, 1) + ch <- tt.timeseries + close(ch) + return ch, nil + }, + CloseF: func() error { + return nil + }, + }, + } + + err := s.Gather(&acc) + require.NoError(t, err) + + actual := []telegraf.Metric{} + for _, m := range acc.Metrics { + actual = append(actual, testutil.FromTestMetric(m)) + } + + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} + +func TestGatherAlign(t *testing.T) { + now := time.Now().Round(time.Second) + tests := []struct { + name string + descriptor *metricpb.MetricDescriptor + timeseries []*monitoringpb.TimeSeries + expected []telegraf.Metric + }{ + { + name: "align", + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + timeseries: []*monitoringpb.TimeSeries{ + createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + metricpb.MetricDescriptor_DOUBLE, + ), + createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + metricpb.MetricDescriptor_DOUBLE, + ), + createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + metricpb.MetricDescriptor_DOUBLE, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("telegraf/cpu", + map[string]string{ + "resource_type": "global", + "project_id": "test", + }, + map[string]interface{}{ + "usage_align_percentile_99": 42.0, + "usage_align_percentile_95": 42.0, + "usage_align_percentile_50": 42.0, + }, + now), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + listCall := 0 + var acc testutil.Accumulator + client := &MockStackdriverClient{ + ListMetricDescriptorsF: func(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) { + ch := make(chan *metricpb.MetricDescriptor, 1) + ch <- tt.descriptor + close(ch) + return ch, nil + }, + ListTimeSeriesF: func(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) { + ch := make(chan *monitoringpb.TimeSeries, 1) + ch <- tt.timeseries[listCall] + listCall++ + close(ch) + return ch, nil + }, + CloseF: func() error { + return nil + }, + } + + s := &Stackdriver{ + Log: testutil.Logger{}, + Project: "test", + RateLimit: 10, + GatherRawDistributionBuckets: false, + DistributionAggregationAligners: []string{ + "ALIGN_PERCENTILE_99", + "ALIGN_PERCENTILE_95", + "ALIGN_PERCENTILE_50", + }, + client: client, + } + + err := s.Gather(&acc) + require.NoError(t, err) + + actual := []telegraf.Metric{} + for _, m := range acc.Metrics { + actual = append(actual, testutil.FromTestMetric(m)) + } + + testutil.RequireMetricsEqual(t, tt.expected, actual) + + }) + } +} + +func TestListMetricDescriptorFilter(t *testing.T) { + type call struct { + name string + filter string + } + now := time.Now().Round(time.Second) + tests := []struct { + name string + stackdriver *Stackdriver + descriptor *metricpb.MetricDescriptor + calls []call + }{ + { + name: "simple", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage"`, + }, + }, + }, + { + name: "single resource labels string", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + ResourceLabels: []*Label{ + { + Key: "instance_name", + Value: `localhost`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND resource.labels.instance_name = "localhost"`, + }, + }, + }, + { + name: "single resource labels function", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + ResourceLabels: []*Label{ + { + Key: "instance_name", + Value: `starts_with("localhost")`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND resource.labels.instance_name = starts_with("localhost")`, + }, + }, + }, + { + name: "multiple resource labels", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + ResourceLabels: []*Label{ + { + Key: "instance_name", + Value: `localhost`, + }, + { + Key: "zone", + Value: `starts_with("us-")`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND (resource.labels.instance_name = "localhost" OR resource.labels.zone = starts_with("us-"))`, + }, + }, + }, + { + name: "single metric label string", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + MetricLabels: []*Label{ + { + Key: "resource_type", + Value: `instance`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND metric.labels.resource_type = "instance"`, + }, + }, + }, + { + name: "single metric label function", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + MetricLabels: []*Label{ + { + Key: "resource_id", + Value: `starts_with("abc-")`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND metric.labels.resource_id = starts_with("abc-")`, + }, + }, + }, + { + name: "multiple metric labels", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + MetricLabels: []*Label{ + { + Key: "resource_type", + Value: "instance", + }, + { + Key: "resource_id", + Value: `starts_with("abc-")`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND (metric.labels.resource_type = "instance" OR metric.labels.resource_id = starts_with("abc-"))`, + }, + }, + }, + { + name: "all labels filters", + stackdriver: &Stackdriver{ + Project: "test", + MetricTypePrefixInclude: []string{"telegraf/cpu/usage"}, + Filter: &ListTimeSeriesFilter{ + ResourceLabels: []*Label{ + { + Key: "instance_name", + Value: `localhost`, + }, + { + Key: "zone", + Value: `starts_with("us-")`, + }, + }, + MetricLabels: []*Label{ + { + Key: "resource_type", + Value: "instance", + }, + { + Key: "resource_id", + Value: `starts_with("abc-")`, + }, + }, + }, + RateLimit: 1, + }, + descriptor: &metricpb.MetricDescriptor{ + Type: "telegraf/cpu/usage", + ValueType: metricpb.MetricDescriptor_DOUBLE, + }, + calls: []call{ + { + name: "ListMetricDescriptors", + filter: `metric.type = starts_with("telegraf/cpu/usage")`, + }, { + name: "ListTimeSeries", + filter: `metric.type = "telegraf/cpu/usage" AND (resource.labels.instance_name = "localhost" OR resource.labels.zone = starts_with("us-")) AND (metric.labels.resource_type = "instance" OR metric.labels.resource_id = starts_with("abc-"))`, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator + client := &MockStackdriverClient{ + ListMetricDescriptorsF: func(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (<-chan *metricpb.MetricDescriptor, error) { + ch := make(chan *metricpb.MetricDescriptor, 1) + ch <- tt.descriptor + close(ch) + return ch, nil + }, + ListTimeSeriesF: func(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) (<-chan *monitoringpb.TimeSeries, error) { + ch := make(chan *monitoringpb.TimeSeries, 1) + ch <- createTimeSeries( + &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + EndTime: ×tamp.Timestamp{ + Seconds: now.Unix(), + }, + }, + Value: &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 42.0, + }, + }, + }, + metricpb.MetricDescriptor_DOUBLE, + ) + close(ch) + return ch, nil + }, + CloseF: func() error { + return nil + }, + } + + s := tt.stackdriver + s.client = client + + err := s.Gather(&acc) + require.NoError(t, err) + + require.Equal(t, len(client.calls), len(tt.calls)) + for i, expected := range tt.calls { + actual := client.calls[i] + require.Equal(t, expected.name, actual.name) + + switch req := actual.args[1].(type) { + case *monitoringpb.ListMetricDescriptorsRequest: + require.Equal(t, expected.filter, req.Filter) + case *monitoringpb.ListTimeSeriesRequest: + require.Equal(t, expected.filter, req.Filter) + default: + panic("unknown request type") + } + } + }) + } +} + +func TestNewListTimeSeriesFilter(t *testing.T) { +} + +func TestTimeSeriesConfCacheIsValid(t *testing.T) { +} diff --git a/plugins/inputs/statsd/README.md b/plugins/inputs/statsd/README.md index 648fa72ac..79f759817 100644 --- a/plugins/inputs/statsd/README.md +++ b/plugins/inputs/statsd/README.md @@ -10,7 +10,7 @@ ## MaxTCPConnection - applicable when protocol is set to tcp (default=250) max_tcp_connections = 250 - + ## Enable TCP keep alive probes (default=false) tcp_keep_alive = false @@ -34,18 +34,24 @@ ## Reset timings & histograms every interval (default=true) delete_timings = true - ## Percentiles to calculate for timing & histogram stats - percentiles = [90] + ## Percentiles to calculate for timing & histogram stats. + percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] ## separator to use between elements of a statsd metric metric_separator = "_" ## Parses tags in the datadog statsd format ## http://docs.datadoghq.com/guides/dogstatsd/ + ## deprecated in 1.10; use datadog_extensions option instead parse_data_dog_tags = false + ## Parses extensions to statsd in the datadog statsd format + ## currently supports metrics and datadog tags. + ## http://docs.datadoghq.com/guides/dogstatsd/ + datadog_extensions = false + ## Statsd data translation templates, more info can be read here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite + ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ # "cpu.* measurement*" # ] @@ -58,6 +64,10 @@ ## calculation of percentiles. Raising this limit increases the accuracy ## of percentiles but also increases the memory usage and cpu time. percentile_limit = 1000 + + ## Maximum socket buffer size in bytes, once the buffer fills up, metrics + ## will start dropping. Defaults to the OS default. + # read_buffer_size = 65535 ``` ### Description @@ -181,6 +191,7 @@ the accuracy of percentiles but also increases the memory usage and cpu time. - **templates** []string: Templates for transforming statsd buckets into influx measurements and tags. - **parse_data_dog_tags** boolean: Enable parsing of tags in DataDog's dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) +- **datadog_extensions** boolean: Enable parsing of DataDog's extensions to dogstatsd format (http://docs.datadoghq.com/guides/dogstatsd/) ### Statsd bucket -> InfluxDB line-protocol Templates @@ -223,5 +234,5 @@ mem.cached.localhost:256|g => mem_cached,host=localhost 256 ``` -There are many more options available, -[More details can be found here](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite) +Consult the [Template Patterns](/docs/TEMPLATE_PATTERN.md) documentation for +additional details. diff --git a/plugins/inputs/statsd/datadog.go b/plugins/inputs/statsd/datadog.go new file mode 100644 index 000000000..377db66e6 --- /dev/null +++ b/plugins/inputs/statsd/datadog.go @@ -0,0 +1,180 @@ +package statsd + +// this is adapted from datadog's apache licensed version at +// https://github.com/DataDog/datadog-agent/blob/fcfc74f106ab1bd6991dfc6a7061c558d934158a/pkg/dogstatsd/parser.go#L173 + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +const ( + priorityNormal = "normal" + priorityLow = "low" + + eventInfo = "info" + eventWarning = "warning" + eventError = "error" + eventSuccess = "success" +) + +var uncommenter = strings.NewReplacer("\\n", "\n") + +func (s *Statsd) parseEventMessage(now time.Time, message string, defaultHostname string) error { + // _e{title.length,text.length}:title|text + // [ + // |d:date_happened + // |p:priority + // |h:hostname + // |t:alert_type + // |s:source_type_nam + // |#tag1,tag2 + // ] + // + // + // tag is key:value + messageRaw := strings.SplitN(message, ":", 2) + if len(messageRaw) < 2 || len(messageRaw[0]) < 7 || len(messageRaw[1]) < 3 { + return fmt.Errorf("Invalid message format") + } + header := messageRaw[0] + message = messageRaw[1] + + rawLen := strings.SplitN(header[3:], ",", 2) + if len(rawLen) != 2 { + return fmt.Errorf("Invalid message format") + } + + titleLen, err := strconv.ParseInt(rawLen[0], 10, 64) + if err != nil { + return fmt.Errorf("Invalid message format, could not parse title.length: '%s'", rawLen[0]) + } + if len(rawLen[1]) < 1 { + return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0]) + } + textLen, err := strconv.ParseInt(rawLen[1][:len(rawLen[1])-1], 10, 64) + if err != nil { + return fmt.Errorf("Invalid message format, could not parse text.length: '%s'", rawLen[0]) + } + if titleLen+textLen+1 > int64(len(message)) { + return fmt.Errorf("Invalid message format, title.length and text.length exceed total message length") + } + + rawTitle := message[:titleLen] + rawText := message[titleLen+1 : titleLen+1+textLen] + message = message[titleLen+1+textLen:] + + if len(rawTitle) == 0 || len(rawText) == 0 { + return fmt.Errorf("Invalid event message format: empty 'title' or 'text' field") + } + + name := rawTitle + tags := make(map[string]string, strings.Count(message, ",")+2) // allocate for the approximate number of tags + fields := make(map[string]interface{}, 9) + fields["alert_type"] = eventInfo // default event type + fields["text"] = uncommenter.Replace(string(rawText)) + if defaultHostname != "" { + tags["source"] = defaultHostname + } + fields["priority"] = priorityNormal + ts := now + if len(message) < 2 { + s.acc.AddFields(name, fields, tags, ts) + return nil + } + + rawMetadataFields := strings.Split(message[1:], "|") + for i := range rawMetadataFields { + if len(rawMetadataFields[i]) < 2 { + return errors.New("too short metadata field") + } + switch rawMetadataFields[i][:2] { + case "d:": + ts, err := strconv.ParseInt(rawMetadataFields[i][2:], 10, 64) + if err != nil { + continue + } + fields["ts"] = ts + case "p:": + switch rawMetadataFields[i][2:] { + case priorityLow: + fields["priority"] = priorityLow + case priorityNormal: // we already used this as a default + default: + continue + } + case "h:": + tags["source"] = rawMetadataFields[i][2:] + case "t:": + switch rawMetadataFields[i][2:] { + case eventError, eventWarning, eventSuccess, eventInfo: + fields["alert_type"] = rawMetadataFields[i][2:] // already set for info + default: + continue + } + case "k:": + tags["aggregation_key"] = rawMetadataFields[i][2:] + case "s:": + fields["source_type_name"] = rawMetadataFields[i][2:] + default: + if rawMetadataFields[i][0] == '#' { + parseDataDogTags(tags, rawMetadataFields[i][1:]) + } else { + return fmt.Errorf("unknown metadata type: '%s'", rawMetadataFields[i]) + } + } + } + // Use source tag because host is reserved tag key in Telegraf. + // In datadog the host tag and `h:` are interchangable, so we have to chech for the host tag. + if host, ok := tags["host"]; ok { + delete(tags, "host") + tags["source"] = host + } + s.acc.AddFields(name, fields, tags, ts) + return nil +} + +func parseDataDogTags(tags map[string]string, message string) { + if len(message) == 0 { + return + } + + start, i := 0, 0 + var k string + var inVal bool // check if we are parsing the value part of the tag + for i = range message { + if message[i] == ',' { + if k == "" { + k = message[start:i] + tags[k] = "true" // this is because influx doesn't support empty tags + start = i + 1 + continue + } + v := message[start:i] + if v == "" { + v = "true" + } + tags[k] = v + start = i + 1 + k, inVal = "", false // reset state vars + } else if message[i] == ':' && !inVal { + k = message[start:i] + start = i + 1 + inVal = true + } + } + if k == "" && start < i+1 { + tags[message[start:i+1]] = "true" + } + // grab the last value + if k != "" { + if start < i+1 { + tags[k] = message[start : i+1] + return + } + tags[k] = "true" + } +} diff --git a/plugins/inputs/statsd/datadog_test.go b/plugins/inputs/statsd/datadog_test.go new file mode 100644 index 000000000..9800d9e67 --- /dev/null +++ b/plugins/inputs/statsd/datadog_test.go @@ -0,0 +1,484 @@ +package statsd + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestEventGather(t *testing.T) { + now := time.Now() + type expected struct { + title string + tags map[string]string + fields map[string]interface{} + } + tests := []struct { + name string + message string + hostname string + now time.Time + err bool + expected expected + }{{ + name: "basic", + message: "_e{10,9}:test title|test text", + hostname: "default-hostname", + now: now, + err: false, + expected: expected{ + title: "test title", + tags: map[string]string{"source": "default-hostname"}, + fields: map[string]interface{}{ + "priority": priorityNormal, + "alert_type": "info", + "text": "test text", + }, + }, + }, + { + name: "escape some stuff", + message: "_e{10,24}:test title|test\\line1\\nline2\\nline3", + hostname: "default-hostname", + now: now.Add(1), + err: false, + expected: expected{ + title: "test title", + tags: map[string]string{"source": "default-hostname"}, + fields: map[string]interface{}{ + "priority": priorityNormal, + "alert_type": "info", + "text": "test\\line1\nline2\nline3", + }, + }, + }, + { + name: "custom time", + message: "_e{10,9}:test title|test text|d:21", + hostname: "default-hostname", + now: now.Add(2), + err: false, + expected: expected{ + title: "test title", + tags: map[string]string{"source": "default-hostname"}, + fields: map[string]interface{}{ + "priority": priorityNormal, + "alert_type": "info", + "text": "test text", + "ts": int64(21), + }, + }, + }, + } + acc := &testutil.Accumulator{} + s := NewTestStatsd() + require.NoError(t, s.Start(acc)) + defer s.Stop() + + for i := range tests { + t.Run(tests[i].name, func(t *testing.T) { + err := s.parseEventMessage(tests[i].now, tests[i].message, tests[i].hostname) + if tests[i].err { + require.NotNil(t, err) + } else { + require.Nil(t, err) + } + require.Equal(t, uint64(i+1), acc.NMetrics()) + + require.Nil(t, err) + require.Equal(t, tests[i].expected.title, acc.Metrics[i].Measurement) + require.Equal(t, tests[i].expected.tags, acc.Metrics[i].Tags) + require.Equal(t, tests[i].expected.fields, acc.Metrics[i].Fields) + }) + } +} + +// These tests adapted from tests in +// https://github.com/DataDog/datadog-agent/blob/master/pkg/dogstatsd/parser_test.go +// to ensure compatibility with the datadog-agent parser + +func TestEvents(t *testing.T) { + now := time.Now() + type args struct { + now time.Time + message string + hostname string + } + type expected struct { + title string + text interface{} + now time.Time + ts interface{} + priority string + source string + alertType interface{} + aggregationKey string + sourceTypeName interface{} + checkTags map[string]string + } + + tests := []struct { + name string + args args + expected expected + }{ + { + name: "event minimal", + args: args{ + now: now, + message: "_e{10,9}:test title|test text", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now, + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "", + }, + }, + { + name: "event multilines text", + args: args{ + now: now.Add(1), + message: "_e{10,24}:test title|test\\line1\\nline2\\nline3", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test\\line1\nline2\nline3", + now: now.Add(1), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "", + }, + }, + { + name: "event pipe in title", + args: args{ + now: now.Add(2), + message: "_e{10,24}:test|title|test\\line1\\nline2\\nline3", + hostname: "default-hostname", + }, + expected: expected{ + title: "test|title", + text: "test\\line1\nline2\nline3", + now: now.Add(2), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "", + }, + }, + { + name: "event metadata timestamp", + args: args{ + now: now.Add(3), + message: "_e{10,9}:test title|test text|d:21", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(3), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "", + ts: int64(21), + }, + }, + { + name: "event metadata priority", + args: args{ + now: now.Add(4), + message: "_e{10,9}:test title|test text|p:low", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(4), + priority: priorityLow, + source: "default-hostname", + alertType: eventInfo, + }, + }, + { + name: "event metadata hostname", + args: args{ + now: now.Add(5), + message: "_e{10,9}:test title|test text|h:localhost", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(5), + priority: priorityNormal, + source: "localhost", + alertType: eventInfo, + }, + }, + { + name: "event metadata hostname in tag", + args: args{ + now: now.Add(6), + message: "_e{10,9}:test title|test text|#host:localhost", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(6), + priority: priorityNormal, + source: "localhost", + alertType: eventInfo, + }, + }, + { + name: "event metadata empty host tag", + args: args{ + now: now.Add(7), + message: "_e{10,9}:test title|test text|#host:,other:tag", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(7), + priority: priorityNormal, + source: "true", + alertType: eventInfo, + checkTags: map[string]string{"other": "tag", "source": "true"}, + }, + }, + { + name: "event metadata alert type", + args: args{ + now: now.Add(8), + message: "_e{10,9}:test title|test text|t:warning", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(8), + priority: priorityNormal, + source: "default-hostname", + alertType: eventWarning, + }, + }, + { + name: "event metadata aggregation key", + args: args{ + now: now.Add(9), + message: "_e{10,9}:test title|test text|k:some aggregation key", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(9), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "some aggregation key", + }, + }, + { + name: "event metadata aggregation key", + args: args{ + now: now.Add(10), + message: "_e{10,9}:test title|test text|k:some aggregation key", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(10), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + aggregationKey: "some aggregation key", + }, + }, + { + name: "event metadata source type", + args: args{ + now: now.Add(11), + message: "_e{10,9}:test title|test text|s:this is the source", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(11), + priority: priorityNormal, + source: "default-hostname", + sourceTypeName: "this is the source", + alertType: eventInfo, + }, + }, + { + name: "event metadata source type", + args: args{ + now: now.Add(11), + message: "_e{10,9}:test title|test text|s:this is the source", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(11), + priority: priorityNormal, + source: "default-hostname", + sourceTypeName: "this is the source", + alertType: eventInfo, + }, + }, + { + name: "event metadata source tags", + args: args{ + now: now.Add(11), + message: "_e{10,9}:test title|test text|#tag1,tag2:test", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(11), + priority: priorityNormal, + source: "default-hostname", + alertType: eventInfo, + checkTags: map[string]string{"tag1": "true", "tag2": "test", "source": "default-hostname"}, + }, + }, + { + name: "event metadata multiple", + args: args{ + now: now.Add(11), + message: "_e{10,9}:test title|test text|t:warning|d:12345|p:low|h:some.host|k:aggKey|s:source test|#tag1,tag2:test", + hostname: "default-hostname", + }, + expected: expected{ + title: "test title", + text: "test text", + now: now.Add(11), + priority: priorityLow, + source: "some.host", + ts: int64(12345), + alertType: eventWarning, + aggregationKey: "aggKey", + sourceTypeName: "source test", + checkTags: map[string]string{"aggregation_key": "aggKey", "tag1": "true", "tag2": "test", "source": "some.host"}, + }, + }, + } + s := NewTestStatsd() + acc := &testutil.Accumulator{} + require.NoError(t, s.Start(acc)) + defer s.Stop() + for i := range tests { + t.Run(tests[i].name, func(t *testing.T) { + acc.ClearMetrics() + err := s.parseEventMessage(tests[i].args.now, tests[i].args.message, tests[i].args.hostname) + require.Nil(t, err) + m := acc.Metrics[0] + require.Equal(t, tests[i].expected.title, m.Measurement) + require.Equal(t, tests[i].expected.text, m.Fields["text"]) + require.Equal(t, tests[i].expected.now, m.Time) + require.Equal(t, tests[i].expected.ts, m.Fields["ts"]) + require.Equal(t, tests[i].expected.priority, m.Fields["priority"]) + require.Equal(t, tests[i].expected.source, m.Tags["source"]) + require.Equal(t, tests[i].expected.alertType, m.Fields["alert_type"]) + require.Equal(t, tests[i].expected.aggregationKey, m.Tags["aggregation_key"]) + require.Equal(t, tests[i].expected.sourceTypeName, m.Fields["source_type_name"]) + if tests[i].expected.checkTags != nil { + require.Equal(t, tests[i].expected.checkTags, m.Tags) + } + }) + } +} + +func TestEventError(t *testing.T) { + now := time.Now() + s := NewTestStatsd() + acc := &testutil.Accumulator{} + require.NoError(t, s.Start(acc)) + defer s.Stop() + + // missing length header + err := s.parseEventMessage(now, "_e:title|text", "default-hostname") + require.Error(t, err) + + // greater length than packet + err = s.parseEventMessage(now, "_e{10,10}:title|text", "default-hostname") + require.Error(t, err) + + // zero length + err = s.parseEventMessage(now, "_e{0,0}:a|a", "default-hostname") + require.Error(t, err) + + // missing title or text length + err = s.parseEventMessage(now, "_e{5555:title|text", "default-hostname") + require.Error(t, err) + + // missing wrong len format + err = s.parseEventMessage(now, "_e{a,1}:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e{1,a}:title|text", "default-hostname") + require.Error(t, err) + + // missing title or text length + err = s.parseEventMessage(now, "_e{5,}:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e{100,:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e,100:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e{,4}:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e{}:title|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e{,}:title|text", "default-hostname") + require.Error(t, err) + + // not enough information + err = s.parseEventMessage(now, "_e|text", "default-hostname") + require.Error(t, err) + + err = s.parseEventMessage(now, "_e:|text", "default-hostname") + require.Error(t, err) + + // invalid timestamp + err = s.parseEventMessage(now, "_e{5,4}:title|text|d:abc", "default-hostname") + require.NoError(t, err) + + // invalid priority + err = s.parseEventMessage(now, "_e{5,4}:title|text|p:urgent", "default-hostname") + require.NoError(t, err) + + // invalid priority + err = s.parseEventMessage(now, "_e{5,4}:title|text|p:urgent", "default-hostname") + require.NoError(t, err) + + // invalid alert type + err = s.parseEventMessage(now, "_e{5,4}:title|text|t:test", "default-hostname") + require.NoError(t, err) + + // unknown metadata + err = s.parseEventMessage(now, "_e{5,4}:title|text|x:1234", "default-hostname") + require.Error(t, err) +} diff --git a/plugins/inputs/statsd/running_stats.go b/plugins/inputs/statsd/running_stats.go index 2395ab143..e33749b2c 100644 --- a/plugins/inputs/statsd/running_stats.go +++ b/plugins/inputs/statsd/running_stats.go @@ -49,7 +49,7 @@ func (rs *RunningStats) AddValue(v float64) { } // These are used for the running mean and variance - rs.n += 1 + rs.n++ rs.ex += v - rs.k rs.ex2 += (v - rs.k) * (v - rs.k) @@ -99,7 +99,7 @@ func (rs *RunningStats) Count() int64 { return rs.n } -func (rs *RunningStats) Percentile(n int) float64 { +func (rs *RunningStats) Percentile(n float64) float64 { if n > 100 { n = 100 } @@ -109,16 +109,16 @@ func (rs *RunningStats) Percentile(n int) float64 { rs.sorted = true } - i := int(float64(len(rs.perc)) * float64(n) / float64(100)) + i := float64(len(rs.perc)) * n / float64(100) return rs.perc[clamp(i, 0, len(rs.perc)-1)] } -func clamp(i int, min int, max int) int { - if i < min { +func clamp(i float64, min int, max int) int { + if i < float64(min) { return min } - if i > max { + if i > float64(max) { return max } - return i + return int(i) } diff --git a/plugins/inputs/statsd/running_stats_test.go b/plugins/inputs/statsd/running_stats_test.go index 4571f76d7..a52209c56 100644 --- a/plugins/inputs/statsd/running_stats_test.go +++ b/plugins/inputs/statsd/running_stats_test.go @@ -26,6 +26,9 @@ func TestRunningStats_Single(t *testing.T) { if rs.Percentile(100) != 10.1 { t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100)) } + if rs.Percentile(99.95) != 10.1 { + t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(99.95)) + } if rs.Percentile(90) != 10.1 { t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90)) } @@ -67,6 +70,9 @@ func TestRunningStats_Duplicate(t *testing.T) { if rs.Percentile(100) != 10.1 { t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(100)) } + if rs.Percentile(99.95) != 10.1 { + t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(99.95)) + } if rs.Percentile(90) != 10.1 { t.Errorf("Expected %v, got %v", 10.1, rs.Percentile(90)) } @@ -108,12 +114,21 @@ func TestRunningStats(t *testing.T) { if rs.Percentile(100) != 45 { t.Errorf("Expected %v, got %v", 45, rs.Percentile(100)) } + if rs.Percentile(99.98) != 45 { + t.Errorf("Expected %v, got %v", 45, rs.Percentile(99.98)) + } if rs.Percentile(90) != 32 { t.Errorf("Expected %v, got %v", 32, rs.Percentile(90)) } + if rs.Percentile(50.1) != 11 { + t.Errorf("Expected %v, got %v", 11, rs.Percentile(50.1)) + } if rs.Percentile(50) != 11 { t.Errorf("Expected %v, got %v", 11, rs.Percentile(50)) } + if rs.Percentile(49.9) != 10 { + t.Errorf("Expected %v, got %v", 10, rs.Percentile(49.9)) + } if rs.Percentile(0) != 5 { t.Errorf("Expected %v, got %v", 5, rs.Percentile(0)) } diff --git a/plugins/inputs/statsd/statsd.go b/plugins/inputs/statsd/statsd.go index 3e5a73aa3..9c5780d00 100644 --- a/plugins/inputs/statsd/statsd.go +++ b/plugins/inputs/statsd/statsd.go @@ -5,7 +5,6 @@ import ( "bytes" "errors" "fmt" - "log" "net" "sort" "strconv" @@ -13,16 +12,15 @@ import ( "sync" "time" - "github.com/influxdata/telegraf/plugins/parsers/graphite" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/parsers/graphite" "github.com/influxdata/telegraf/selfstat" ) const ( - // UDP packet limit, see + // UDP_MAX_PACKET_SIZE is the UDP packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure UDP_MAX_PACKET_SIZE int = 64 * 1024 @@ -33,15 +31,11 @@ const ( defaultSeparator = "_" defaultAllowPendingMessage = 10000 MaxTCPConnections = 250 + + parserGoRoutines = 5 ) -var dropwarn = "E! Error: statsd message queue full. " + - "We have dropped %d messages so far. " + - "You may want to increase allowed_pending_messages in the config\n" - -var malformedwarn = "E! Statsd over TCP has received %d malformed packets" + - " thus far." - +// Statsd allows the importing of statsd and dogstatsd data. type Statsd struct { // Protocol used on listener - udp or tcp Protocol string `toml:"protocol"` @@ -55,7 +49,7 @@ type Statsd struct { // Percentiles specifies the percentiles that will be calculated for timing // and histogram stats. - Percentiles []int + Percentiles []internal.Number PercentileLimit int DeleteGauges bool @@ -68,7 +62,12 @@ type Statsd struct { MetricSeparator string // This flag enables parsing of tags in the dogstatsd extension to the // statsd protocol (http://docs.datadoghq.com/guides/dogstatsd/) - ParseDataDogTags bool + ParseDataDogTags bool // depreciated in 1.10; use datadog_extensions + + // Parses extensions to statsd in the datadog statsd format + // currently supports metrics and datadog tags. + // http://docs.datadoghq.com/guides/dogstatsd/ + DataDogExtensions bool `toml:"datadog_extensions"` // UDPPacketSize is deprecated, it's only here for legacy support // we now always create 1 max size buffer and then copy only what we need @@ -76,6 +75,8 @@ type Statsd struct { // see https://github.com/influxdata/telegraf/pull/992 UDPPacketSize int `toml:"udp_packet_size"` + ReadBufferSize int `toml:"read_buffer_size"` + sync.Mutex // Lock for preventing a data race during resource cleanup cleanup sync.Mutex @@ -90,7 +91,7 @@ type Statsd struct { malformed int // Channel for all incoming statsd packets - in chan *bytes.Buffer + in chan input done chan struct{} // Cache gauges, counters & sets so they can be aggregated as they arrive @@ -123,13 +124,25 @@ type Statsd struct { MaxConnections selfstat.Stat CurrentConnections selfstat.Stat TotalConnections selfstat.Stat - PacketsRecv selfstat.Stat - BytesRecv selfstat.Stat + TCPPacketsRecv selfstat.Stat + TCPBytesRecv selfstat.Stat + UDPPacketsRecv selfstat.Stat + UDPPacketsDrop selfstat.Stat + UDPBytesRecv selfstat.Stat + ParseTimeNS selfstat.Stat + + Log telegraf.Logger // A pool of byte slices to handle parsing bufPool sync.Pool } +type input struct { + *bytes.Buffer + time.Time + Addr string +} + // One statsd metric, form is :||@ type metric struct { name string @@ -204,7 +217,7 @@ const sampleConfig = ` delete_timings = true ## Percentiles to calculate for timing & histogram stats - percentiles = [90] + percentiles = [50.0, 90.0, 99.0, 99.9, 99.95, 100.0] ## separator to use between elements of a statsd metric metric_separator = "_" @@ -213,8 +226,11 @@ const sampleConfig = ` ## http://docs.datadoghq.com/guides/dogstatsd/ parse_data_dog_tags = false + ## Parses datadog extensions to the statsd format + datadog_extensions = false + ## Statsd data translation templates, more info can be read here: - ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite + ## https://github.com/influxdata/telegraf/blob/master/docs/TEMPLATE_PATTERN.md # templates = [ # "cpu.* measurement*" # ] @@ -238,12 +254,12 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { defer s.Unlock() now := time.Now() - for _, metric := range s.timings { + for _, m := range s.timings { // Defining a template to parse field names for timers allows us to split // out multiple fields per timer. In this case we prefix each stat with the // field name and store these all in a single measurement. fields := make(map[string]interface{}) - for fieldName, stats := range metric.fields { + for fieldName, stats := range m.fields { var prefix string if fieldName != defaultFieldName { prefix = fieldName + "_" @@ -255,46 +271,52 @@ func (s *Statsd) Gather(acc telegraf.Accumulator) error { fields[prefix+"lower"] = stats.Lower() fields[prefix+"count"] = stats.Count() for _, percentile := range s.Percentiles { - name := fmt.Sprintf("%s%v_percentile", prefix, percentile) - fields[name] = stats.Percentile(percentile) + name := fmt.Sprintf("%s%v_percentile", prefix, percentile.Value) + fields[name] = stats.Percentile(percentile.Value) } } - acc.AddFields(metric.name, fields, metric.tags, now) + acc.AddFields(m.name, fields, m.tags, now) } if s.DeleteTimings { s.timings = make(map[string]cachedtimings) } - for _, metric := range s.gauges { - acc.AddGauge(metric.name, metric.fields, metric.tags, now) + for _, m := range s.gauges { + acc.AddGauge(m.name, m.fields, m.tags, now) } if s.DeleteGauges { s.gauges = make(map[string]cachedgauge) } - for _, metric := range s.counters { - acc.AddCounter(metric.name, metric.fields, metric.tags, now) + for _, m := range s.counters { + acc.AddCounter(m.name, m.fields, m.tags, now) } if s.DeleteCounters { s.counters = make(map[string]cachedcounter) } - for _, metric := range s.sets { + for _, m := range s.sets { fields := make(map[string]interface{}) - for field, set := range metric.fields { + for field, set := range m.fields { fields[field] = int64(len(set)) } - acc.AddFields(metric.name, fields, metric.tags, now) + acc.AddFields(m.name, fields, m.tags, now) } if s.DeleteSets { s.sets = make(map[string]cachedset) } - return nil } -func (s *Statsd) Start(_ telegraf.Accumulator) error { +func (s *Statsd) Start(ac telegraf.Accumulator) error { + if s.ParseDataDogTags { + s.DataDogExtensions = true + s.Log.Warn("'parse_data_dog_tags' config option is deprecated, please use 'datadog_extensions' instead") + } + + s.acc = ac + // Make data structures s.gauges = make(map[string]cachedgauge) s.counters = make(map[string]cachedcounter) @@ -311,10 +333,14 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { s.MaxConnections.Set(int64(s.MaxTCPConnections)) s.CurrentConnections = selfstat.Register("statsd", "tcp_current_connections", tags) s.TotalConnections = selfstat.Register("statsd", "tcp_total_connections", tags) - s.PacketsRecv = selfstat.Register("statsd", "tcp_packets_received", tags) - s.BytesRecv = selfstat.Register("statsd", "tcp_bytes_received", tags) + s.TCPPacketsRecv = selfstat.Register("statsd", "tcp_packets_received", tags) + s.TCPBytesRecv = selfstat.Register("statsd", "tcp_bytes_received", tags) + s.UDPPacketsRecv = selfstat.Register("statsd", "udp_packets_received", tags) + s.UDPPacketsDrop = selfstat.Register("statsd", "udp_packets_dropped", tags) + s.UDPBytesRecv = selfstat.Register("statsd", "udp_bytes_received", tags) + s.ParseTimeNS = selfstat.Register("statsd", "parse_time_ns", tags) - s.in = make(chan *bytes.Buffer, s.AllowedPendingMessages) + s.in = make(chan input, s.AllowedPendingMessages) s.done = make(chan struct{}) s.accept = make(chan bool, s.MaxTCPConnections) s.conns = make(map[string]*net.TCPConn) @@ -328,46 +354,73 @@ func (s *Statsd) Start(_ telegraf.Accumulator) error { } if s.ConvertNames { - log.Printf("I! WARNING statsd: convert_names config option is deprecated," + - " please use metric_separator instead") + s.Log.Warn("'convert_names' config option is deprecated, please use 'metric_separator' instead") } if s.MetricSeparator == "" { s.MetricSeparator = defaultSeparator } - s.wg.Add(2) - // Start the UDP listener if s.isUDP() { - go s.udpListen() + address, err := net.ResolveUDPAddr(s.Protocol, s.ServiceAddress) + if err != nil { + return err + } + + conn, err := net.ListenUDP(s.Protocol, address) + if err != nil { + return err + } + + s.Log.Infof("UDP listening on %q", conn.LocalAddr().String()) + s.UDPlistener = conn + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.udpListen(conn) + }() } else { - go s.tcpListen() + address, err := net.ResolveTCPAddr("tcp", s.ServiceAddress) + if err != nil { + return err + } + listener, err := net.ListenTCP("tcp", address) + if err != nil { + return err + } + + s.Log.Infof("TCP listening on %q", listener.Addr().String()) + s.TCPlistener = listener + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.tcpListen(listener) + }() } - // Start the line parser - go s.parser() - log.Printf("I! Started the statsd service on %s\n", s.ServiceAddress) + + for i := 1; i <= parserGoRoutines; i++ { + // Start the line parser + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.parser() + }() + } + s.Log.Infof("Started the statsd service on %q", s.ServiceAddress) return nil } // tcpListen() starts listening for udp packets on the configured port. -func (s *Statsd) tcpListen() error { - defer s.wg.Done() - // Start listener - var err error - address, _ := net.ResolveTCPAddr("tcp", s.ServiceAddress) - s.TCPlistener, err = net.ListenTCP("tcp", address) - if err != nil { - log.Fatalf("ERROR: ListenTCP - %s", err) - return err - } - log.Println("I! TCP Statsd listening on: ", s.TCPlistener.Addr().String()) +func (s *Statsd) tcpListen(listener *net.TCPListener) error { for { select { case <-s.done: return nil default: // Accept connection: - conn, err := s.TCPlistener.AcceptTCP() + conn, err := listener.AcceptTCP() if err != nil { return err } @@ -401,15 +454,10 @@ func (s *Statsd) tcpListen() error { } // udpListen starts listening for udp packets on the configured port. -func (s *Statsd) udpListen() error { - defer s.wg.Done() - var err error - address, _ := net.ResolveUDPAddr(s.Protocol, s.ServiceAddress) - s.UDPlistener, err = net.ListenUDP(s.Protocol, address) - if err != nil { - log.Fatalf("ERROR: ListenUDP - %s", err) +func (s *Statsd) udpListen(conn *net.UDPConn) error { + if s.ReadBufferSize > 0 { + s.UDPlistener.SetReadBuffer(s.ReadBufferSize) } - log.Println("I! Statsd UDP listener listening on: ", s.UDPlistener.LocalAddr().String()) buf := make([]byte, UDP_MAX_PACKET_SIZE) for { @@ -417,21 +465,31 @@ func (s *Statsd) udpListen() error { case <-s.done: return nil default: - n, _, err := s.UDPlistener.ReadFromUDP(buf) - if err != nil && !strings.Contains(err.Error(), "closed network") { - log.Printf("E! Error READ: %s\n", err.Error()) - continue + n, addr, err := conn.ReadFromUDP(buf) + if err != nil { + if !strings.Contains(err.Error(), "closed network") { + s.Log.Errorf("Error reading: %s", err.Error()) + continue + } + return err } + s.UDPPacketsRecv.Incr(1) + s.UDPBytesRecv.Incr(int64(n)) b := s.bufPool.Get().(*bytes.Buffer) b.Reset() b.Write(buf[:n]) - select { - case s.in <- b: + case s.in <- input{ + Buffer: b, + Time: time.Now(), + Addr: addr.IP.String()}: default: + s.UDPPacketsDrop.Incr(1) s.drops++ if s.drops == 1 || s.AllowedPendingMessages == 0 || s.drops%s.AllowedPendingMessages == 0 { - log.Printf(dropwarn, s.drops) + s.Log.Errorf("Statsd message queue full. "+ + "We have dropped %d messages so far. "+ + "You may want to increase allowed_pending_messages in the config", s.drops) } } } @@ -442,20 +500,26 @@ func (s *Statsd) udpListen() error { // packet into statsd strings and then calls parseStatsdLine, which parses a // single statsd metric into a struct. func (s *Statsd) parser() error { - defer s.wg.Done() for { select { case <-s.done: return nil - case buf := <-s.in: - lines := strings.Split(buf.String(), "\n") - s.bufPool.Put(buf) + case in := <-s.in: + start := time.Now() + lines := strings.Split(in.Buffer.String(), "\n") + s.bufPool.Put(in.Buffer) for _, line := range lines { line = strings.TrimSpace(line) - if line != "" { + switch { + case line == "": + case s.DataDogExtensions && strings.HasPrefix(line, "_e"): + s.parseEventMessage(in.Time, line, in.Addr) + default: s.parseStatsdLine(line) } } + elapsed := time.Since(start) + s.ParseTimeNS.Set(elapsed.Nanoseconds()) } } } @@ -467,7 +531,7 @@ func (s *Statsd) parseStatsdLine(line string) error { defer s.Unlock() lineTags := make(map[string]string) - if s.ParseDataDogTags { + if s.DataDogExtensions { recombinedSegments := make([]string, 0) // datadog tags look like this: // users.online:1|c|@0.5|#country:china,environment:production @@ -478,24 +542,7 @@ func (s *Statsd) parseStatsdLine(line string) error { for _, segment := range pipesplit { if len(segment) > 0 && segment[0] == '#' { // we have ourselves a tag; they are comma separated - tagstr := segment[1:] - tags := strings.Split(tagstr, ",") - for _, tag := range tags { - ts := strings.SplitN(tag, ":", 2) - var k, v string - switch len(ts) { - case 1: - // just a tag - k = ts[0] - v = "" - case 2: - k = ts[0] - v = ts[1] - } - if k != "" { - lineTags[k] = v - } - } + parseDataDogTags(lineTags, segment[1:]) } else { recombinedSegments = append(recombinedSegments, segment) } @@ -506,8 +553,8 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate splitting the line on ":" bits := strings.Split(line, ":") if len(bits) < 2 { - log.Printf("E! Error: splitting ':', Unable to parse metric: %s\n", line) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("Splitting ':', unable to parse metric: %s", line) + return errors.New("error Parsing statsd line") } // Extract bucket name from individual metric bits @@ -522,22 +569,22 @@ func (s *Statsd) parseStatsdLine(line string) error { // Validate splitting the bit on "|" pipesplit := strings.Split(bit, "|") if len(pipesplit) < 2 { - log.Printf("E! Error: splitting '|', Unable to parse metric: %s\n", line) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("Splitting '|', unable to parse metric: %s", line) + return errors.New("error parsing statsd line") } else if len(pipesplit) > 2 { sr := pipesplit[2] - errmsg := "E! Error: parsing sample rate, %s, it must be in format like: " + - "@0.1, @0.5, etc. Ignoring sample rate for line: %s\n" + if strings.Contains(sr, "@") && len(sr) > 1 { samplerate, err := strconv.ParseFloat(sr[1:], 64) if err != nil { - log.Printf(errmsg, err.Error(), line) + s.Log.Errorf("Parsing sample rate: %s", err.Error()) } else { // sample rate successfully parsed m.samplerate = samplerate } } else { - log.Printf(errmsg, "", line) + s.Log.Debugf("Sample rate must be in format like: "+ + "@0.1, @0.5, etc. Ignoring sample rate for line: %s", line) } } @@ -546,15 +593,15 @@ func (s *Statsd) parseStatsdLine(line string) error { case "g", "c", "s", "ms", "h": m.mtype = pipesplit[1] default: - log.Printf("E! Error: Statsd Metric type %s unsupported", pipesplit[1]) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("Metric type %q unsupported", pipesplit[1]) + return errors.New("error parsing statsd line") } // Parse the value if strings.HasPrefix(pipesplit[0], "-") || strings.HasPrefix(pipesplit[0], "+") { if m.mtype != "g" && m.mtype != "c" { - log.Printf("E! Error: +- values are only supported for gauges & counters: %s\n", line) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("+- values are only supported for gauges & counters, unable to parse metric: %s", line) + return errors.New("error parsing statsd line") } m.additive = true } @@ -563,8 +610,8 @@ func (s *Statsd) parseStatsdLine(line string) error { case "g", "ms", "h": v, err := strconv.ParseFloat(pipesplit[0], 64) if err != nil { - log.Printf("E! Error: parsing value to float64: %s\n", line) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("Parsing value to float64, unable to parse metric: %s", line) + return errors.New("error parsing statsd line") } m.floatvalue = v case "c": @@ -573,8 +620,8 @@ func (s *Statsd) parseStatsdLine(line string) error { if err != nil { v2, err2 := strconv.ParseFloat(pipesplit[0], 64) if err2 != nil { - log.Printf("E! Error: parsing value to int64: %s\n", line) - return errors.New("Error Parsing statsd line") + s.Log.Errorf("Parsing value to int64, unable to parse metric: %s", line) + return errors.New("error parsing statsd line") } v = int64(v2) } @@ -601,7 +648,6 @@ func (s *Statsd) parseStatsdLine(line string) error { case "h": m.tags["metric_type"] = "histogram" } - if len(lineTags) > 0 { for k, v := range lineTags { m.tags[k] = v @@ -787,6 +833,11 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { s.CurrentConnections.Incr(-1) }() + var remoteIP string + if addr, ok := conn.RemoteAddr().(*net.TCPAddr); ok { + remoteIP = addr.IP.String() + } + var n int scanner := bufio.NewScanner(conn) for { @@ -801,8 +852,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { if n == 0 { continue } - s.BytesRecv.Incr(int64(n)) - s.PacketsRecv.Incr(1) + s.TCPBytesRecv.Incr(int64(n)) + s.TCPPacketsRecv.Incr(1) b := s.bufPool.Get().(*bytes.Buffer) b.Reset() @@ -810,11 +861,13 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { b.WriteByte('\n') select { - case s.in <- b: + case s.in <- input{Buffer: b, Time: time.Now(), Addr: remoteIP}: default: s.drops++ if s.drops == 1 || s.drops%s.AllowedPendingMessages == 0 { - log.Printf(dropwarn, s.drops) + s.Log.Errorf("Statsd message queue full. "+ + "We have dropped %d messages so far. "+ + "You may want to increase allowed_pending_messages in the config", s.drops) } } } @@ -824,9 +877,8 @@ func (s *Statsd) handler(conn *net.TCPConn, id string) { // refuser refuses a TCP connection func (s *Statsd) refuser(conn *net.TCPConn) { conn.Close() - log.Printf("I! Refused TCP Connection from %s", conn.RemoteAddr()) - log.Printf("I! WARNING: Maximum TCP Connections reached, you may want to" + - " adjust max_tcp_connections") + s.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr()) + s.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections") } // forget a TCP connection @@ -845,7 +897,7 @@ func (s *Statsd) remember(id string, conn *net.TCPConn) { func (s *Statsd) Stop() { s.Lock() - log.Println("I! Stopping the statsd service") + s.Log.Infof("Stopping the statsd service") close(s.done) if s.isUDP() { s.UDPlistener.Close() @@ -871,7 +923,7 @@ func (s *Statsd) Stop() { s.Lock() close(s.in) - log.Println("I! Stopped Statsd listener service on ", s.ServiceAddress) + s.Log.Infof("Stopped listener service on %q", s.ServiceAddress) s.Unlock() } diff --git a/plugins/inputs/statsd/statsd_test.go b/plugins/inputs/statsd/statsd_test.go index 3fbc45640..f76681134 100644 --- a/plugins/inputs/statsd/statsd_test.go +++ b/plugins/inputs/statsd/statsd_test.go @@ -1,41 +1,30 @@ package statsd import ( - "bytes" - "errors" "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "net" + "sync" "testing" "time" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) const ( - testMsg = "test.tcp.msg:100|c" + testMsg = "test.tcp.msg:100|c" + producerThreads = 10 ) -func newTestTcpListener() (*Statsd, chan *bytes.Buffer) { - in := make(chan *bytes.Buffer, 1500) - listener := &Statsd{ - Protocol: "tcp", - ServiceAddress: "localhost:8125", - AllowedPendingMessages: 10000, - MaxTCPConnections: 250, - in: in, - done: make(chan struct{}), - } - return listener, in -} - func NewTestStatsd() *Statsd { - s := Statsd{} + s := Statsd{Log: testutil.Logger{}} // Make data structures s.done = make(chan struct{}) - s.in = make(chan *bytes.Buffer, s.AllowedPendingMessages) + s.in = make(chan input, s.AllowedPendingMessages) s.gauges = make(map[string]cachedgauge) s.counters = make(map[string]cachedcounter) s.sets = make(map[string]cachedset) @@ -46,9 +35,10 @@ func NewTestStatsd() *Statsd { return &s } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestConcurrentConns(t *testing.T) { listener := Statsd{ + Log: testutil.Logger{}, Protocol: "tcp", ServiceAddress: "localhost:8125", AllowedPendingMessages: 10000, @@ -76,9 +66,10 @@ func TestConcurrentConns(t *testing.T) { assert.Zero(t, acc.NFields()) } -// Test that MaxTCPConections is respected when max==1 +// Test that MaxTCPConnections is respected when max==1 func TestConcurrentConns1(t *testing.T) { listener := Statsd{ + Log: testutil.Logger{}, Protocol: "tcp", ServiceAddress: "localhost:8125", AllowedPendingMessages: 10000, @@ -104,9 +95,10 @@ func TestConcurrentConns1(t *testing.T) { assert.Zero(t, acc.NFields()) } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestCloseConcurrentConns(t *testing.T) { listener := Statsd{ + Log: testutil.Logger{}, Protocol: "tcp", ServiceAddress: "localhost:8125", AllowedPendingMessages: 10000, @@ -128,6 +120,7 @@ func TestCloseConcurrentConns(t *testing.T) { // benchmark how long it takes to accept & process 100,000 metrics: func BenchmarkUDP(b *testing.B) { listener := Statsd{ + Log: testutil.Logger{}, Protocol: "udp", ServiceAddress: "localhost:8125", AllowedPendingMessages: 250000, @@ -146,18 +139,34 @@ func BenchmarkUDP(b *testing.B) { if err != nil { panic(err) } - for i := 0; i < 250000; i++ { - fmt.Fprintf(conn, testMsg) + + var wg sync.WaitGroup + for i := 1; i <= producerThreads; i++ { + wg.Add(1) + go sendRequests(conn, &wg) } + wg.Wait() + // wait for 250,000 metrics to get added to accumulator - time.Sleep(time.Millisecond) + for len(listener.in) > 0 { + fmt.Printf("Left in buffer: %v \n", len(listener.in)) + time.Sleep(time.Millisecond) + } listener.Stop() } } +func sendRequests(conn net.Conn, wg *sync.WaitGroup) { + defer wg.Done() + for i := 0; i < 25000; i++ { + fmt.Fprintf(conn, testMsg) + } +} + // benchmark how long it takes to accept & process 100,000 metrics: func BenchmarkTCP(b *testing.B) { listener := Statsd{ + Log: testutil.Logger{}, Protocol: "tcp", ServiceAddress: "localhost:8125", AllowedPendingMessages: 250000, @@ -177,11 +186,16 @@ func BenchmarkTCP(b *testing.B) { if err != nil { panic(err) } - for i := 0; i < 250000; i++ { - fmt.Fprintf(conn, testMsg) + var wg sync.WaitGroup + for i := 1; i <= producerThreads; i++ { + wg.Add(1) + go sendRequests(conn, &wg) } + wg.Wait() // wait for 250,000 metrics to get added to accumulator - time.Sleep(time.Millisecond) + for len(listener.in) > 0 { + time.Sleep(time.Millisecond) + } listener.Stop() } } @@ -189,7 +203,7 @@ func BenchmarkTCP(b *testing.B) { // Valid lines should be parsed and their values should be cached func TestParse_ValidLines(t *testing.T) { s := NewTestStatsd() - valid_lines := []string{ + validLines := []string{ "valid:45|c", "valid:45|s", "valid:45|g", @@ -197,7 +211,7 @@ func TestParse_ValidLines(t *testing.T) { "valid.timer:45|h", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -210,7 +224,7 @@ func TestParse_Gauges(t *testing.T) { s := NewTestStatsd() // Test that gauge +- values work - valid_lines := []string{ + validLines := []string{ "plus.minus:100|g", "plus.minus:-10|g", "plus.minus:+30|g", @@ -228,7 +242,7 @@ func TestParse_Gauges(t *testing.T) { "scientific.notation.minus:4.7E-5|g", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -274,7 +288,7 @@ func TestParse_Gauges(t *testing.T) { } for _, test := range validations { - err := test_validate_gauge(test.name, test.value, s.gauges) + err := testValidateGauge(test.name, test.value, s.gauges) if err != nil { t.Error(err.Error()) } @@ -286,7 +300,7 @@ func TestParse_Sets(t *testing.T) { s := NewTestStatsd() // Test that sets work - valid_lines := []string{ + validLines := []string{ "unique.user.ids:100|s", "unique.user.ids:100|s", "unique.user.ids:100|s", @@ -306,7 +320,7 @@ func TestParse_Sets(t *testing.T) { "string.sets:bar|s", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -336,7 +350,7 @@ func TestParse_Sets(t *testing.T) { } for _, test := range validations { - err := test_validate_set(test.name, test.value, s.sets) + err := testValidateSet(test.name, test.value, s.sets) if err != nil { t.Error(err.Error()) } @@ -348,7 +362,7 @@ func TestParse_Counters(t *testing.T) { s := NewTestStatsd() // Test that counters work - valid_lines := []string{ + validLines := []string{ "small.inc:1|c", "big.inc:100|c", "big.inc:1|c", @@ -363,7 +377,7 @@ func TestParse_Counters(t *testing.T) { "negative.test:-5|c", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -401,7 +415,7 @@ func TestParse_Counters(t *testing.T) { } for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) + err := testValidateCounter(test.name, test.value, s.counters) if err != nil { t.Error(err.Error()) } @@ -411,11 +425,11 @@ func TestParse_Counters(t *testing.T) { // Tests low-level functionality of timings func TestParse_Timings(t *testing.T) { s := NewTestStatsd() - s.Percentiles = []int{90} + s.Percentiles = []internal.Number{{Value: 90.0}} acc := &testutil.Accumulator{} // Test that counters work - valid_lines := []string{ + validLines := []string{ "test.timing:1|ms", "test.timing:11|ms", "test.timing:1|ms", @@ -423,7 +437,7 @@ func TestParse_Timings(t *testing.T) { "test.timing:1|ms", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -464,7 +478,7 @@ func TestParseScientificNotation(t *testing.T) { // Invalid lines should return an error func TestParse_InvalidLines(t *testing.T) { s := NewTestStatsd() - invalid_lines := []string{ + invalidLines := []string{ "i.dont.have.a.pipe:45g", "i.dont.have.a.colon45|c", "invalid.metric.type:45|e", @@ -475,7 +489,7 @@ func TestParse_InvalidLines(t *testing.T) { "invalid.value:d11|c", "invalid.value:1d1|c", } - for _, line := range invalid_lines { + for _, line := range invalidLines { err := s.parseStatsdLine(line) if err == nil { t.Errorf("Parsing line %s should have resulted in an error\n", line) @@ -486,21 +500,21 @@ func TestParse_InvalidLines(t *testing.T) { // Invalid sample rates should be ignored and not applied func TestParse_InvalidSampleRate(t *testing.T) { s := NewTestStatsd() - invalid_lines := []string{ + invalidLines := []string{ "invalid.sample.rate:45|c|0.1", "invalid.sample.rate.2:45|c|@foo", "invalid.sample.rate:45|g|@0.1", "invalid.sample.rate:45|s|@0.1", } - for _, line := range invalid_lines { + for _, line := range invalidLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } } - counter_validations := []struct { + counterValidations := []struct { name string value int64 cache map[string]cachedcounter @@ -517,19 +531,19 @@ func TestParse_InvalidSampleRate(t *testing.T) { }, } - for _, test := range counter_validations { - err := test_validate_counter(test.name, test.value, test.cache) + for _, test := range counterValidations { + err := testValidateCounter(test.name, test.value, test.cache) if err != nil { t.Error(err.Error()) } } - err := test_validate_gauge("invalid_sample_rate", 45, s.gauges) + err := testValidateGauge("invalid_sample_rate", 45, s.gauges) if err != nil { t.Error(err.Error()) } - err = test_validate_set("invalid_sample_rate", 1, s.sets) + err = testValidateSet("invalid_sample_rate", 1, s.sets) if err != nil { t.Error(err.Error()) } @@ -538,12 +552,12 @@ func TestParse_InvalidSampleRate(t *testing.T) { // Names should be parsed like . -> _ func TestParse_DefaultNameParsing(t *testing.T) { s := NewTestStatsd() - valid_lines := []string{ + validLines := []string{ "valid:1|c", "valid.foo-bar:11|c", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -565,7 +579,7 @@ func TestParse_DefaultNameParsing(t *testing.T) { } for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) + err := testValidateCounter(test.name, test.value, s.counters) if err != nil { t.Error(err.Error()) } @@ -607,7 +621,7 @@ func TestParse_Template(t *testing.T) { // Validate counters for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) + err := testValidateCounter(test.name, test.value, s.counters) if err != nil { t.Error(err.Error()) } @@ -649,7 +663,7 @@ func TestParse_TemplateFilter(t *testing.T) { // Validate counters for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) + err := testValidateCounter(test.name, test.value, s.counters) if err != nil { t.Error(err.Error()) } @@ -687,7 +701,7 @@ func TestParse_TemplateSpecificity(t *testing.T) { // Validate counters for _, test := range validations { - err := test_validate_counter(test.name, test.value, s.counters) + err := testValidateCounter(test.name, test.value, s.counters) if err != nil { t.Error(err.Error()) } @@ -723,7 +737,7 @@ func TestParse_TemplateFields(t *testing.T) { } } - counter_tests := []struct { + counterTests := []struct { name string value int64 field string @@ -745,14 +759,14 @@ func TestParse_TemplateFields(t *testing.T) { }, } // Validate counters - for _, test := range counter_tests { - err := test_validate_counter(test.name, test.value, s.counters, test.field) + for _, test := range counterTests { + err := testValidateCounter(test.name, test.value, s.counters, test.field) if err != nil { t.Error(err.Error()) } } - gauge_tests := []struct { + gaugeTests := []struct { name string value float64 field string @@ -769,14 +783,14 @@ func TestParse_TemplateFields(t *testing.T) { }, } // Validate gauges - for _, test := range gauge_tests { - err := test_validate_gauge(test.name, test.value, s.gauges, test.field) + for _, test := range gaugeTests { + err := testValidateGauge(test.name, test.value, s.gauges, test.field) if err != nil { t.Error(err.Error()) } } - set_tests := []struct { + setTests := []struct { name string value int64 field string @@ -793,8 +807,8 @@ func TestParse_TemplateFields(t *testing.T) { }, } // Validate sets - for _, test := range set_tests { - err := test_validate_set(test.name, test.value, s.sets, test.field) + for _, test := range setTests { + err := testValidateSet(test.name, test.value, s.sets, test.field) if err != nil { t.Error(err.Error()) } @@ -861,83 +875,125 @@ func TestParse_Tags(t *testing.T) { } } -// Test that DataDog tags are parsed func TestParse_DataDogTags(t *testing.T) { - s := NewTestStatsd() - s.ParseDataDogTags = true - - lines := []string{ - "my_counter:1|c|#host:localhost,environment:prod,endpoint:/:tenant?/oauth/ro", - "my_gauge:10.1|g|#live", - "my_set:1|s|#host:localhost", - "my_timer:3|ms|@0.1|#live,host:localhost", - } - - testTags := map[string]map[string]string{ - "my_counter": map[string]string{ - "host": "localhost", - "environment": "prod", - "endpoint": "/:tenant?/oauth/ro", + tests := []struct { + name string + line string + expected []telegraf.Metric + }{ + { + name: "counter", + line: "my_counter:1|c|#host:localhost,environment:prod,endpoint:/:tenant?/oauth/ro", + expected: []telegraf.Metric{ + testutil.MustMetric( + "my_counter", + map[string]string{ + "endpoint": "/:tenant?/oauth/ro", + "environment": "prod", + "host": "localhost", + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 1, + }, + time.Now(), + telegraf.Counter, + ), + }, }, - - "my_gauge": map[string]string{ - "live": "", + { + name: "gauge", + line: "my_gauge:10.1|g|#live", + expected: []telegraf.Metric{ + testutil.MustMetric( + "my_gauge", + map[string]string{ + "live": "true", + "metric_type": "gauge", + }, + map[string]interface{}{ + "value": 10.1, + }, + time.Now(), + telegraf.Gauge, + ), + }, }, - - "my_set": map[string]string{ - "host": "localhost", + { + name: "set", + line: "my_set:1|s|#host:localhost", + expected: []telegraf.Metric{ + testutil.MustMetric( + "my_set", + map[string]string{ + "host": "localhost", + "metric_type": "set", + }, + map[string]interface{}{ + "value": 1, + }, + time.Now(), + ), + }, }, - - "my_timer": map[string]string{ - "live": "", - "host": "localhost", + { + name: "timer", + line: "my_timer:3|ms|@0.1|#live,host:localhost", + expected: []telegraf.Metric{ + testutil.MustMetric( + "my_timer", + map[string]string{ + "host": "localhost", + "live": "true", + "metric_type": "timing", + }, + map[string]interface{}{ + "count": 10, + "lower": float64(3), + "mean": float64(3), + "stddev": float64(0), + "sum": float64(30), + "upper": float64(3), + }, + time.Now(), + ), + }, + }, + { + name: "empty tag set", + line: "cpu:42|c|#", + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + telegraf.Counter, + ), + }, }, } - for _, line := range lines { - err := s.parseStatsdLine(line) - if err != nil { - t.Errorf("Parsing line %s should not have resulted in an error\n", line) - } - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var acc testutil.Accumulator - sourceTags := map[string]map[string]string{ - "my_gauge": tagsForItem(s.gauges), - "my_counter": tagsForItem(s.counters), - "my_set": tagsForItem(s.sets), - "my_timer": tagsForItem(s.timings), - } + s := NewTestStatsd() + s.DataDogExtensions = true - for statName, tags := range testTags { - for k, v := range tags { - otherValue := sourceTags[statName][k] - if sourceTags[statName][k] != v { - t.Errorf("Error with %s, tag %s: %s != %s", statName, k, v, otherValue) - } - } - } -} + err := s.parseStatsdLine(tt.line) + require.NoError(t, err) + err = s.Gather(&acc) + require.NoError(t, err) -func tagsForItem(m interface{}) map[string]string { - switch m.(type) { - case map[string]cachedcounter: - for _, v := range m.(map[string]cachedcounter) { - return v.tags - } - case map[string]cachedgauge: - for _, v := range m.(map[string]cachedgauge) { - return v.tags - } - case map[string]cachedset: - for _, v := range m.(map[string]cachedset) { - return v.tags - } - case map[string]cachedtimings: - for _, v := range m.(map[string]cachedtimings) { - return v.tags - } + testutil.RequireMetricsEqual(t, tt.expected, acc.GetTelegrafMetrics(), + testutil.SortMetrics(), testutil.IgnoreTime()) + }) } - return nil } // Test that statsd buckets are parsed to measurement names properly @@ -945,8 +1001,8 @@ func TestParseName(t *testing.T) { s := NewTestStatsd() tests := []struct { - in_name string - out_name string + inName string + outName string }{ { "foobar", @@ -963,9 +1019,9 @@ func TestParseName(t *testing.T) { } for _, test := range tests { - name, _, _ := s.parseName(test.in_name) - if name != test.out_name { - t.Errorf("Expected: %s, got %s", test.out_name, name) + name, _, _ := s.parseName(test.inName) + if name != test.outName { + t.Errorf("Expected: %s, got %s", test.outName, name) } } @@ -973,8 +1029,8 @@ func TestParseName(t *testing.T) { s.MetricSeparator = "." tests = []struct { - in_name string - out_name string + inName string + outName string }{ { "foobar", @@ -991,9 +1047,9 @@ func TestParseName(t *testing.T) { } for _, test := range tests { - name, _, _ := s.parseName(test.in_name) - if name != test.out_name { - t.Errorf("Expected: %s, got %s", test.out_name, name) + name, _, _ := s.parseName(test.inName) + if name != test.outName { + t.Errorf("Expected: %s, got %s", test.outName, name) } } } @@ -1004,12 +1060,12 @@ func TestParse_MeasurementsWithSameName(t *testing.T) { s := NewTestStatsd() // Test that counters work - valid_lines := []string{ + validLines := []string{ "test.counter,host=localhost:1|c", "test.counter,host=localhost,region=west:1|c", } - for _, line := range valid_lines { + for _, line := range validLines { err := s.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) @@ -1024,7 +1080,7 @@ func TestParse_MeasurementsWithSameName(t *testing.T) { // Test that measurements with multiple bits, are treated as different outputs // but are equal to their single-measurement representation func TestParse_MeasurementsWithMultipleValues(t *testing.T) { - single_lines := []string{ + singleLines := []string{ "valid.multiple:0|ms|@0.1", "valid.multiple:0|ms|", "valid.multiple:1|ms", @@ -1050,7 +1106,7 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { "valid.multiple.mixed:1|g", } - multiple_lines := []string{ + multipleLines := []string{ "valid.multiple:0|ms|@0.1:0|ms|:1|ms", "valid.multiple.duplicate:1|c:1|c:2|c:1|c", "valid.multiple.duplicate:1|h:1|h:2|h:1|h", @@ -1059,28 +1115,28 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { "valid.multiple.mixed:1|c:1|ms:2|s:1|g", } - s_single := NewTestStatsd() - s_multiple := NewTestStatsd() + sSingle := NewTestStatsd() + sMultiple := NewTestStatsd() - for _, line := range single_lines { - err := s_single.parseStatsdLine(line) + for _, line := range singleLines { + err := sSingle.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } } - for _, line := range multiple_lines { - err := s_multiple.parseStatsdLine(line) + for _, line := range multipleLines { + err := sMultiple.parseStatsdLine(line) if err != nil { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } } - if len(s_single.timings) != 3 { - t.Errorf("Expected 3 measurement, found %d", len(s_single.timings)) + if len(sSingle.timings) != 3 { + t.Errorf("Expected 3 measurement, found %d", len(sSingle.timings)) } - if cachedtiming, ok := s_single.timings["metric_type=timingvalid_multiple"]; !ok { + if cachedtiming, ok := sSingle.timings["metric_type=timingvalid_multiple"]; !ok { t.Errorf("Expected cached measurement with hash 'metric_type=timingvalid_multiple' not found") } else { if cachedtiming.name != "valid_multiple" { @@ -1100,63 +1156,63 @@ func TestParse_MeasurementsWithMultipleValues(t *testing.T) { } } - // test if s_single and s_multiple did compute the same stats for valid.multiple.duplicate - if err := test_validate_set("valid_multiple_duplicate", 2, s_single.sets); err != nil { + // test if sSingle and sMultiple did compute the same stats for valid.multiple.duplicate + if err := testValidateSet("valid_multiple_duplicate", 2, sSingle.sets); err != nil { t.Error(err.Error()) } - if err := test_validate_set("valid_multiple_duplicate", 2, s_multiple.sets); err != nil { + if err := testValidateSet("valid_multiple_duplicate", 2, sMultiple.sets); err != nil { t.Error(err.Error()) } - if err := test_validate_counter("valid_multiple_duplicate", 5, s_single.counters); err != nil { + if err := testValidateCounter("valid_multiple_duplicate", 5, sSingle.counters); err != nil { t.Error(err.Error()) } - if err := test_validate_counter("valid_multiple_duplicate", 5, s_multiple.counters); err != nil { + if err := testValidateCounter("valid_multiple_duplicate", 5, sMultiple.counters); err != nil { t.Error(err.Error()) } - if err := test_validate_gauge("valid_multiple_duplicate", 1, s_single.gauges); err != nil { + if err := testValidateGauge("valid_multiple_duplicate", 1, sSingle.gauges); err != nil { t.Error(err.Error()) } - if err := test_validate_gauge("valid_multiple_duplicate", 1, s_multiple.gauges); err != nil { + if err := testValidateGauge("valid_multiple_duplicate", 1, sMultiple.gauges); err != nil { t.Error(err.Error()) } - // test if s_single and s_multiple did compute the same stats for valid.multiple.mixed - if err := test_validate_set("valid_multiple_mixed", 1, s_single.sets); err != nil { + // test if sSingle and sMultiple did compute the same stats for valid.multiple.mixed + if err := testValidateSet("valid_multiple_mixed", 1, sSingle.sets); err != nil { t.Error(err.Error()) } - if err := test_validate_set("valid_multiple_mixed", 1, s_multiple.sets); err != nil { + if err := testValidateSet("valid_multiple_mixed", 1, sMultiple.sets); err != nil { t.Error(err.Error()) } - if err := test_validate_counter("valid_multiple_mixed", 1, s_single.counters); err != nil { + if err := testValidateCounter("valid_multiple_mixed", 1, sSingle.counters); err != nil { t.Error(err.Error()) } - if err := test_validate_counter("valid_multiple_mixed", 1, s_multiple.counters); err != nil { + if err := testValidateCounter("valid_multiple_mixed", 1, sMultiple.counters); err != nil { t.Error(err.Error()) } - if err := test_validate_gauge("valid_multiple_mixed", 1, s_single.gauges); err != nil { + if err := testValidateGauge("valid_multiple_mixed", 1, sSingle.gauges); err != nil { t.Error(err.Error()) } - if err := test_validate_gauge("valid_multiple_mixed", 1, s_multiple.gauges); err != nil { + if err := testValidateGauge("valid_multiple_mixed", 1, sMultiple.gauges); err != nil { t.Error(err.Error()) } } // Tests low-level functionality of timings when multiple fields is enabled // and a measurement template has been defined which can parse field names -func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) { +func TestParse_TimingsMultipleFieldsWithTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{"measurement.field"} - s.Percentiles = []int{90} + s.Percentiles = []internal.Number{{Value: 90.0}} acc := &testutil.Accumulator{} validLines := []string{ @@ -1204,10 +1260,10 @@ func TestParse_Timings_MultipleFieldsWithTemplate(t *testing.T) { // Tests low-level functionality of timings when multiple fields is enabled // but a measurement template hasn't been defined so we can't parse field names // In this case the behaviour should be the same as normal behaviour -func TestParse_Timings_MultipleFieldsWithoutTemplate(t *testing.T) { +func TestParse_TimingsMultipleFieldsWithoutTemplate(t *testing.T) { s := NewTestStatsd() s.Templates = []string{} - s.Percentiles = []int{90} + s.Percentiles = []internal.Number{{Value: 90.0}} acc := &testutil.Accumulator{} validLines := []string{ @@ -1420,14 +1476,14 @@ func TestParse_Gauges_Delete(t *testing.T) { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } - err = test_validate_gauge("current_users", 100, s.gauges) + err = testValidateGauge("current_users", 100, s.gauges) if err != nil { t.Error(err.Error()) } s.Gather(fakeacc) - err = test_validate_gauge("current_users", 100, s.gauges) + err = testValidateGauge("current_users", 100, s.gauges) if err == nil { t.Error("current_users_gauge metric should have been deleted") } @@ -1446,14 +1502,14 @@ func TestParse_Sets_Delete(t *testing.T) { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } - err = test_validate_set("unique_user_ids", 1, s.sets) + err = testValidateSet("unique_user_ids", 1, s.sets) if err != nil { t.Error(err.Error()) } s.Gather(fakeacc) - err = test_validate_set("unique_user_ids", 1, s.sets) + err = testValidateSet("unique_user_ids", 1, s.sets) if err == nil { t.Error("unique_user_ids_set metric should have been deleted") } @@ -1472,14 +1528,14 @@ func TestParse_Counters_Delete(t *testing.T) { t.Errorf("Parsing line %s should not have resulted in an error\n", line) } - err = test_validate_counter("total_users", 100, s.counters) + err = testValidateCounter("total_users", 100, s.counters) if err != nil { t.Error(err.Error()) } s.Gather(fakeacc) - err = test_validate_counter("total_users", 100, s.counters) + err = testValidateCounter("total_users", 100, s.counters) if err == nil { t.Error("total_users_counter metric should have been deleted") } @@ -1504,8 +1560,7 @@ func TestParseKeyValue(t *testing.T) { } // Test utility functions - -func test_validate_set( +func testValidateSet( name string, value int64, cache map[string]cachedset, @@ -1527,17 +1582,16 @@ func test_validate_set( } } if !found { - return errors.New(fmt.Sprintf("Test Error: Metric name %s not found\n", name)) + return fmt.Errorf("test Error: Metric name %s not found", name) } if value != int64(len(metric.fields[f])) { - return errors.New(fmt.Sprintf("Measurement: %s, expected %d, actual %d\n", - name, value, len(metric.fields[f]))) + return fmt.Errorf("measurement: %s, expected %d, actual %d", name, value, len(metric.fields[f])) } return nil } -func test_validate_counter( +func testValidateCounter( name string, valueExpected int64, cache map[string]cachedcounter, @@ -1559,17 +1613,16 @@ func test_validate_counter( } } if !found { - return errors.New(fmt.Sprintf("Test Error: Metric name %s not found\n", name)) + return fmt.Errorf("test Error: Metric name %s not found", name) } if valueExpected != valueActual { - return errors.New(fmt.Sprintf("Measurement: %s, expected %d, actual %d\n", - name, valueExpected, valueActual)) + return fmt.Errorf("measurement: %s, expected %d, actual %d", name, valueExpected, valueActual) } return nil } -func test_validate_gauge( +func testValidateGauge( name string, valueExpected float64, cache map[string]cachedgauge, @@ -1591,12 +1644,104 @@ func test_validate_gauge( } } if !found { - return errors.New(fmt.Sprintf("Test Error: Metric name %s not found\n", name)) + return fmt.Errorf("test Error: Metric name %s not found", name) } if valueExpected != valueActual { - return errors.New(fmt.Sprintf("Measurement: %s, expected %f, actual %f\n", - name, valueExpected, valueActual)) + return fmt.Errorf("Measurement: %s, expected %f, actual %f", name, valueExpected, valueActual) } return nil } + +func TestTCP(t *testing.T) { + statsd := Statsd{ + Log: testutil.Logger{}, + Protocol: "tcp", + ServiceAddress: "localhost:0", + AllowedPendingMessages: 10000, + MaxTCPConnections: 2, + } + var acc testutil.Accumulator + require.NoError(t, statsd.Start(&acc)) + defer statsd.Stop() + + addr := statsd.TCPlistener.Addr().String() + + conn, err := net.Dial("tcp", addr) + _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) + require.NoError(t, err) + err = conn.Close() + require.NoError(t, err) + + for { + err = statsd.Gather(&acc) + require.NoError(t, err) + + if len(acc.Metrics) > 0 { + break + } + } + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + telegraf.Counter, + ), + }, + acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), + ) +} + +func TestUdp(t *testing.T) { + statsd := Statsd{ + Log: testutil.Logger{}, + Protocol: "udp", + ServiceAddress: "localhost:8125", + AllowedPendingMessages: 250000, + } + var acc testutil.Accumulator + require.NoError(t, statsd.Start(&acc)) + defer statsd.Stop() + + conn, err := net.Dial("udp", "127.0.0.1:8125") + _, err = conn.Write([]byte("cpu.time_idle:42|c\n")) + require.NoError(t, err) + err = conn.Close() + require.NoError(t, err) + + for { + err = statsd.Gather(&acc) + require.NoError(t, err) + + if len(acc.Metrics) > 0 { + break + } + } + + testutil.RequireMetricsEqual(t, + []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{ + "metric_type": "counter", + }, + map[string]interface{}{ + "value": 42, + }, + time.Now(), + telegraf.Counter, + ), + }, + acc.GetTelegrafMetrics(), + testutil.IgnoreTime(), + ) +} diff --git a/plugins/inputs/suricata/README.md b/plugins/inputs/suricata/README.md new file mode 100644 index 000000000..5b4f16c00 --- /dev/null +++ b/plugins/inputs/suricata/README.md @@ -0,0 +1,127 @@ +# Suricata Input Plugin + +This plugin reports internal performance counters of the Suricata IDS/IPS +engine, such as captured traffic volume, memory usage, uptime, flow counters, +and much more. It provides a socket for the Suricata log output to write JSON +stats output to, and processes the incoming data to fit Telegraf's format. + +### Configuration + +```toml +[[input.suricata]] + ## Data sink for Suricata stats log. + # This is expected to be a filename of a + # unix socket to be created for listening. + source = "/var/run/suricata-stats.sock" + + # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" + # becomes "detect_alert" when delimiter is "_". + delimiter = "_" +``` + +### Metrics + +Fields in the 'suricata' measurement follow the JSON format used by Suricata's +stats output. +See http://suricata.readthedocs.io/en/latest/performance/statistics.html for +more information. + +All fields are numeric. +- suricata + - tags: + - thread: `Global` for global statistics (if enabled), thread IDs (e.g. `W#03-enp0s31f6`) for thread-specific statistics + - fields: + - app_layer_flow_dcerpc_udp + - app_layer_flow_dns_tcp + - app_layer_flow_dns_udp + - app_layer_flow_enip_udp + - app_layer_flow_failed_tcp + - app_layer_flow_failed_udp + - app_layer_flow_http + - app_layer_flow_ssh + - app_layer_flow_tls + - app_layer_tx_dns_tcp + - app_layer_tx_dns_udp + - app_layer_tx_enip_udp + - app_layer_tx_http + - app_layer_tx_smtp + - capture_kernel_drops + - capture_kernel_packets + - decoder_avg_pkt_size + - decoder_bytes + - decoder_ethernet + - decoder_gre + - decoder_icmpv4 + - decoder_icmpv4_ipv4_unknown_ver + - decoder_icmpv6 + - decoder_invalid + - decoder_ipv4 + - decoder_ipv6 + - decoder_max_pkt_size + - decoder_pkts + - decoder_tcp + - decoder_tcp_hlen_too_small + - decoder_tcp_invalid_optlen + - decoder_teredo + - decoder_udp + - decoder_vlan + - detect_alert + - dns_memcap_global + - dns_memuse + - flow_memuse + - flow_mgr_closed_pruned + - flow_mgr_est_pruned + - flow_mgr_flows_checked + - flow_mgr_flows_notimeout + - flow_mgr_flows_removed + - flow_mgr_flows_timeout + - flow_mgr_flows_timeout_inuse + - flow_mgr_new_pruned + - flow_mgr_rows_checked + - flow_mgr_rows_empty + - flow_mgr_rows_maxlen + - flow_mgr_rows_skipped + - flow_spare + - flow_tcp_reuse + - http_memuse + - tcp_memuse + - tcp_pseudo + - tcp_reassembly_gap + - tcp_reassembly_memuse + - tcp_rst + - tcp_sessions + - tcp_syn + - tcp_synack + - ... + + +#### Suricata configuration + +Suricata needs to deliver the 'stats' event type to a given unix socket for +this plugin to pick up. This can be done, for example, by creating an additional +output in the Suricata configuration file: + +```yaml +- eve-log: + enabled: yes + filetype: unix_stream + filename: /tmp/suricata-stats.sock + types: + - stats: + threads: yes +``` + +### Example Output + +```text +suricata,host=myhost,thread=FM#01 flow_mgr_rows_empty=0,flow_mgr_rows_checked=65536,flow_mgr_closed_pruned=0,flow_emerg_mode_over=0,flow_mgr_flows_timeout_inuse=0,flow_mgr_rows_skipped=65535,flow_mgr_bypassed_pruned=0,flow_mgr_flows_removed=0,flow_mgr_est_pruned=0,flow_mgr_flows_notimeout=1,flow_mgr_flows_checked=1,flow_mgr_rows_busy=0,flow_spare=10000,flow_mgr_rows_maxlen=1,flow_mgr_new_pruned=0,flow_emerg_mode_entered=0,flow_tcp_reuse=0,flow_mgr_flows_timeout=0 1568368562545197545 +suricata,host=myhost,thread=W#04-wlp4s0 decoder_ltnull_pkt_too_small=0,decoder_ipraw_invalid_ip_version=0,defrag_ipv4_reassembled=0,tcp_no_flow=0,app_layer_flow_tls=1,decoder_udp=25,defrag_ipv6_fragments=0,defrag_ipv4_fragments=0,decoder_tcp=59,decoder_vlan=0,decoder_pkts=84,decoder_vlan_qinq=0,decoder_avg_pkt_size=574,flow_memcap=0,defrag_max_frag_hits=0,tcp_ssn_memcap_drop=0,capture_kernel_packets=84,app_layer_flow_dcerpc_udp=0,app_layer_tx_dns_tcp=0,tcp_rst=0,decoder_icmpv4=0,app_layer_tx_tls=0,decoder_ipv4=84,decoder_erspan=0,decoder_ltnull_unsupported_type=0,decoder_invalid=0,app_layer_flow_ssh=0,capture_kernel_drops=0,app_layer_flow_ftp=0,app_layer_tx_http=0,tcp_pseudo_failed=0,defrag_ipv6_reassembled=0,defrag_ipv6_timeouts=0,tcp_pseudo=0,tcp_sessions=1,decoder_ethernet=84,decoder_raw=0,decoder_sctp=0,app_layer_flow_dns_udp=1,decoder_gre=0,app_layer_flow_http=0,app_layer_flow_imap=0,tcp_segment_memcap_drop=0,detect_alert=0,app_layer_flow_failed_tcp=0,decoder_teredo=0,decoder_mpls=0,decoder_ppp=0,decoder_max_pkt_size=1422,decoder_ipv6=0,tcp_reassembly_gap=0,app_layer_flow_dcerpc_tcp=0,decoder_ipv4_in_ipv6=0,tcp_stream_depth_reached=0,app_layer_flow_dns_tcp=0,app_layer_flow_smtp=0,tcp_syn=1,decoder_sll=0,tcp_invalid_checksum=0,app_layer_tx_dns_udp=1,decoder_bytes=48258,defrag_ipv4_timeouts=0,app_layer_flow_msn=0,decoder_pppoe=0,decoder_null=0,app_layer_flow_failed_udp=3,app_layer_tx_smtp=0,decoder_icmpv6=0,decoder_ipv6_in_ipv6=0,tcp_synack=1,app_layer_flow_smb=0,decoder_dce_pkt_too_small=0 1568368562545174807 +suricata,host=myhost,thread=W#01-wlp4s0 tcp_synack=0,app_layer_flow_imap=0,decoder_ipv4_in_ipv6=0,decoder_max_pkt_size=684,decoder_gre=0,defrag_ipv4_timeouts=0,tcp_invalid_checksum=0,decoder_ipv4=53,flow_memcap=0,app_layer_tx_http=0,app_layer_tx_smtp=0,decoder_null=0,tcp_no_flow=0,app_layer_tx_tls=0,app_layer_flow_ssh=0,app_layer_flow_smtp=0,decoder_pppoe=0,decoder_teredo=0,decoder_ipraw_invalid_ip_version=0,decoder_ltnull_pkt_too_small=0,tcp_rst=0,decoder_ppp=0,decoder_ipv6=29,app_layer_flow_dns_udp=3,decoder_vlan=0,app_layer_flow_dcerpc_tcp=0,tcp_syn=0,defrag_ipv4_fragments=0,defrag_ipv6_timeouts=0,decoder_raw=0,defrag_ipv6_reassembled=0,tcp_reassembly_gap=0,tcp_sessions=0,decoder_udp=44,tcp_segment_memcap_drop=0,app_layer_tx_dns_udp=3,app_layer_flow_tls=0,decoder_tcp=37,defrag_ipv4_reassembled=0,app_layer_flow_failed_udp=6,app_layer_flow_ftp=0,decoder_icmpv6=1,tcp_stream_depth_reached=0,capture_kernel_drops=0,decoder_sll=0,decoder_bytes=15883,decoder_ethernet=91,tcp_pseudo=0,app_layer_flow_http=0,decoder_sctp=0,decoder_pkts=91,decoder_avg_pkt_size=174,decoder_erspan=0,app_layer_flow_msn=0,app_layer_flow_smb=0,capture_kernel_packets=91,decoder_icmpv4=0,decoder_ipv6_in_ipv6=0,tcp_ssn_memcap_drop=0,decoder_vlan_qinq=0,decoder_ltnull_unsupported_type=0,decoder_invalid=0,defrag_max_frag_hits=0,tcp_pseudo_failed=0,detect_alert=0,app_layer_tx_dns_tcp=0,app_layer_flow_failed_tcp=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_dns_tcp=0,defrag_ipv6_fragments=0,decoder_mpls=0,decoder_dce_pkt_too_small=0 1568368562545148438 +suricata,host=myhost flow_memuse=7094464,tcp_memuse=3276800,tcp_reassembly_memuse=12332832,dns_memuse=0,dns_memcap_state=0,dns_memcap_global=0,http_memuse=0,http_memcap=0 1568368562545144569 +suricata,host=myhost,thread=W#07-wlp4s0 app_layer_tx_http=0,app_layer_tx_dns_tcp=0,decoder_vlan=0,decoder_pppoe=0,decoder_sll=0,decoder_tcp=0,flow_memcap=0,app_layer_flow_msn=0,tcp_no_flow=0,tcp_rst=0,tcp_segment_memcap_drop=0,tcp_sessions=0,detect_alert=0,defrag_ipv6_reassembled=0,decoder_ipraw_invalid_ip_version=0,decoder_erspan=0,decoder_icmpv4=0,app_layer_tx_dns_udp=2,decoder_ltnull_pkt_too_small=0,decoder_bytes=1998,decoder_ipv6=1,defrag_ipv4_fragments=0,defrag_ipv6_fragments=0,app_layer_tx_smtp=0,decoder_ltnull_unsupported_type=0,decoder_max_pkt_size=342,app_layer_flow_ftp=0,decoder_ipv6_in_ipv6=0,defrag_ipv4_reassembled=0,defrag_ipv6_timeouts=0,app_layer_flow_dns_tcp=0,decoder_avg_pkt_size=181,defrag_ipv4_timeouts=0,tcp_stream_depth_reached=0,decoder_mpls=0,app_layer_flow_dns_udp=2,tcp_ssn_memcap_drop=0,app_layer_flow_dcerpc_tcp=0,app_layer_flow_failed_udp=2,app_layer_flow_smb=0,app_layer_flow_failed_tcp=0,decoder_invalid=0,decoder_null=0,decoder_gre=0,decoder_ethernet=11,app_layer_flow_ssh=0,defrag_max_frag_hits=0,capture_kernel_drops=0,tcp_pseudo_failed=0,app_layer_flow_smtp=0,decoder_udp=10,decoder_sctp=0,decoder_teredo=0,decoder_icmpv6=1,tcp_pseudo=0,tcp_synack=0,app_layer_tx_tls=0,app_layer_flow_imap=0,capture_kernel_packets=11,decoder_pkts=11,decoder_raw=0,decoder_ppp=0,tcp_syn=0,tcp_invalid_checksum=0,app_layer_flow_tls=0,decoder_ipv4_in_ipv6=0,app_layer_flow_http=0,decoder_dce_pkt_too_small=0,decoder_ipv4=10,decoder_vlan_qinq=0,tcp_reassembly_gap=0,app_layer_flow_dcerpc_udp=0 1568368562545110847 +suricata,host=myhost,thread=W#06-wlp4s0 app_layer_tx_smtp=0,decoder_ipv6_in_ipv6=0,decoder_dce_pkt_too_small=0,tcp_segment_memcap_drop=0,tcp_sessions=1,decoder_ppp=0,tcp_pseudo_failed=0,app_layer_tx_dns_tcp=0,decoder_invalid=0,defrag_ipv4_timeouts=0,app_layer_flow_smb=0,app_layer_flow_ssh=0,decoder_bytes=19407,decoder_null=0,app_layer_flow_tls=1,decoder_avg_pkt_size=473,decoder_pkts=41,decoder_pppoe=0,decoder_tcp=32,defrag_ipv4_reassembled=0,tcp_reassembly_gap=0,decoder_raw=0,flow_memcap=0,defrag_ipv6_timeouts=0,app_layer_flow_smtp=0,app_layer_tx_http=0,decoder_sll=0,decoder_udp=8,decoder_ltnull_pkt_too_small=0,decoder_ltnull_unsupported_type=0,decoder_ipv4_in_ipv6=0,decoder_vlan=0,decoder_max_pkt_size=1422,tcp_no_flow=0,app_layer_flow_failed_tcp=0,app_layer_flow_dns_tcp=0,app_layer_flow_ftp=0,decoder_icmpv4=0,defrag_max_frag_hits=0,tcp_rst=0,app_layer_flow_msn=0,app_layer_flow_failed_udp=2,app_layer_flow_dns_udp=0,app_layer_flow_dcerpc_udp=0,decoder_ipv4=39,decoder_ethernet=41,defrag_ipv6_reassembled=0,tcp_ssn_memcap_drop=0,app_layer_tx_tls=0,decoder_gre=0,decoder_vlan_qinq=0,tcp_pseudo=0,app_layer_flow_imap=0,app_layer_flow_dcerpc_tcp=0,defrag_ipv4_fragments=0,defrag_ipv6_fragments=0,tcp_synack=1,app_layer_flow_http=0,app_layer_tx_dns_udp=0,capture_kernel_packets=41,decoder_ipv6=2,tcp_invalid_checksum=0,tcp_stream_depth_reached=0,decoder_ipraw_invalid_ip_version=0,decoder_icmpv6=1,tcp_syn=1,detect_alert=0,capture_kernel_drops=0,decoder_teredo=0,decoder_erspan=0,decoder_sctp=0,decoder_mpls=0 1568368562545084670 +suricata,host=myhost,thread=W#02-wlp4s0 decoder_tcp=53,tcp_rst=3,tcp_reassembly_gap=0,defrag_ipv6_timeouts=0,tcp_ssn_memcap_drop=0,app_layer_flow_dcerpc_tcp=0,decoder_max_pkt_size=1422,decoder_ipv6_in_ipv6=0,tcp_no_flow=0,app_layer_flow_ftp=0,app_layer_flow_ssh=0,decoder_pkts=82,decoder_sctp=0,tcp_invalid_checksum=0,app_layer_flow_dns_tcp=0,decoder_ipraw_invalid_ip_version=0,decoder_bytes=26441,decoder_erspan=0,tcp_pseudo_failed=0,tcp_syn=1,app_layer_tx_http=0,app_layer_tx_smtp=0,decoder_teredo=0,decoder_ipv4=80,defrag_ipv4_fragments=0,tcp_stream_depth_reached=0,app_layer_flow_smb=0,capture_kernel_packets=82,decoder_null=0,decoder_ltnull_pkt_too_small=0,decoder_ppp=0,decoder_icmpv6=1,app_layer_flow_dns_udp=2,app_layer_flow_http=0,app_layer_tx_dns_udp=3,decoder_mpls=0,decoder_sll=0,defrag_ipv4_reassembled=0,tcp_segment_memcap_drop=0,app_layer_flow_imap=0,decoder_ltnull_unsupported_type=0,decoder_icmpv4=0,decoder_raw=0,defrag_ipv4_timeouts=0,app_layer_flow_failed_udp=8,decoder_gre=0,capture_kernel_drops=0,defrag_ipv6_reassembled=0,tcp_pseudo=0,app_layer_flow_tls=1,decoder_avg_pkt_size=322,decoder_dce_pkt_too_small=0,decoder_ethernet=82,defrag_ipv6_fragments=0,tcp_sessions=1,tcp_synack=1,app_layer_tx_dns_tcp=0,decoder_vlan=0,flow_memcap=0,decoder_vlan_qinq=0,decoder_udp=28,decoder_invalid=0,detect_alert=0,app_layer_flow_failed_tcp=0,app_layer_tx_tls=0,decoder_pppoe=0,decoder_ipv6=2,decoder_ipv4_in_ipv6=0,defrag_max_frag_hits=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_smtp=0,app_layer_flow_msn=0 1568368562545061864 +suricata,host=myhost,thread=W#08-wlp4s0 decoder_dce_pkt_too_small=0,app_layer_tx_dns_tcp=0,decoder_pkts=58,decoder_ppp=0,decoder_raw=0,decoder_ipv4_in_ipv6=0,decoder_max_pkt_size=1392,tcp_invalid_checksum=0,tcp_syn=0,decoder_ipv4=51,decoder_ipv6_in_ipv6=0,decoder_tcp=0,decoder_ltnull_pkt_too_small=0,flow_memcap=0,decoder_udp=58,tcp_ssn_memcap_drop=0,tcp_pseudo=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_dns_udp=5,app_layer_tx_http=0,capture_kernel_drops=0,decoder_vlan=0,tcp_segment_memcap_drop=0,app_layer_flow_ftp=0,app_layer_flow_imap=0,app_layer_flow_http=0,app_layer_flow_tls=0,decoder_icmpv4=0,decoder_sctp=0,defrag_ipv4_timeouts=0,tcp_reassembly_gap=0,detect_alert=0,decoder_ethernet=58,tcp_pseudo_failed=0,decoder_teredo=0,defrag_ipv4_reassembled=0,tcp_sessions=0,app_layer_flow_msn=0,decoder_ipraw_invalid_ip_version=0,tcp_no_flow=0,app_layer_flow_dns_tcp=0,decoder_null=0,defrag_ipv4_fragments=0,app_layer_flow_dcerpc_tcp=0,app_layer_flow_failed_udp=8,app_layer_tx_tls=0,decoder_bytes=15800,decoder_ipv6=7,tcp_stream_depth_reached=0,decoder_invalid=0,decoder_ltnull_unsupported_type=0,app_layer_tx_dns_udp=6,decoder_pppoe=0,decoder_avg_pkt_size=272,decoder_erspan=0,defrag_ipv6_timeouts=0,app_layer_flow_failed_tcp=0,decoder_gre=0,decoder_sll=0,defrag_max_frag_hits=0,app_layer_flow_ssh=0,capture_kernel_packets=58,decoder_mpls=0,decoder_vlan_qinq=0,tcp_rst=0,app_layer_flow_smb=0,app_layer_tx_smtp=0,decoder_icmpv6=0,defrag_ipv6_fragments=0,defrag_ipv6_reassembled=0,tcp_synack=0,app_layer_flow_smtp=0 1568368562545035575 +suricata,host=myhost,thread=W#05-wlp4s0 tcp_reassembly_gap=0,capture_kernel_drops=0,decoder_ltnull_unsupported_type=0,tcp_sessions=0,tcp_stream_depth_reached=0,tcp_pseudo_failed=0,app_layer_flow_failed_tcp=0,app_layer_tx_dns_tcp=0,decoder_null=0,decoder_dce_pkt_too_small=0,decoder_udp=7,tcp_rst=3,app_layer_flow_dns_tcp=0,decoder_invalid=0,defrag_ipv4_reassembled=0,tcp_synack=0,app_layer_flow_ftp=0,decoder_bytes=3117,decoder_pppoe=0,app_layer_flow_dcerpc_tcp=0,app_layer_flow_smb=0,decoder_ipv6_in_ipv6=0,decoder_ipraw_invalid_ip_version=0,app_layer_flow_imap=0,app_layer_tx_dns_udp=2,decoder_ppp=0,decoder_ipv4=21,decoder_tcp=14,flow_memcap=0,tcp_syn=0,tcp_invalid_checksum=0,decoder_teredo=0,decoder_ltnull_pkt_too_small=0,defrag_max_frag_hits=0,app_layer_tx_tls=0,decoder_pkts=24,decoder_sll=0,defrag_ipv6_fragments=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_smtp=0,decoder_icmpv6=3,defrag_ipv6_timeouts=0,decoder_ipv6=3,decoder_raw=0,defrag_ipv6_reassembled=0,tcp_no_flow=0,detect_alert=0,app_layer_flow_tls=0,decoder_ethernet=24,decoder_vlan=0,decoder_icmpv4=0,decoder_ipv4_in_ipv6=0,app_layer_flow_failed_udp=1,decoder_mpls=0,decoder_max_pkt_size=653,decoder_sctp=0,defrag_ipv4_timeouts=0,tcp_ssn_memcap_drop=0,app_layer_flow_dns_udp=1,app_layer_tx_smtp=0,capture_kernel_packets=24,decoder_vlan_qinq=0,decoder_gre=0,app_layer_flow_ssh=0,app_layer_flow_msn=0,defrag_ipv4_fragments=0,app_layer_flow_http=0,tcp_segment_memcap_drop=0,tcp_pseudo=0,app_layer_tx_http=0,decoder_erspan=0,decoder_avg_pkt_size=129 1568368562545009684 +suricata,host=myhost,thread=W#03-wlp4s0 app_layer_flow_failed_tcp=0,decoder_teredo=0,decoder_ipv6_in_ipv6=0,tcp_pseudo_failed=0,tcp_stream_depth_reached=0,tcp_syn=0,decoder_gre=0,tcp_segment_memcap_drop=0,tcp_ssn_memcap_drop=0,app_layer_tx_smtp=0,decoder_raw=0,decoder_ltnull_pkt_too_small=0,tcp_sessions=0,tcp_reassembly_gap=0,app_layer_flow_ssh=0,app_layer_flow_imap=0,decoder_ipv4=463,decoder_ethernet=463,capture_kernel_packets=463,decoder_pppoe=0,defrag_ipv4_reassembled=0,app_layer_flow_tls=0,app_layer_flow_dcerpc_udp=0,app_layer_flow_dns_udp=0,decoder_vlan=0,decoder_ipraw_invalid_ip_version=0,decoder_mpls=0,tcp_no_flow=0,decoder_avg_pkt_size=445,decoder_udp=432,flow_memcap=0,app_layer_tx_dns_udp=0,app_layer_flow_msn=0,app_layer_flow_http=0,app_layer_flow_dcerpc_tcp=0,decoder_ipv6=0,decoder_ipv4_in_ipv6=0,defrag_ipv4_timeouts=0,defrag_ipv4_fragments=0,defrag_ipv6_timeouts=0,decoder_sctp=0,defrag_ipv6_fragments=0,app_layer_flow_dns_tcp=0,app_layer_tx_tls=0,defrag_max_frag_hits=0,decoder_bytes=206345,decoder_vlan_qinq=0,decoder_invalid=0,decoder_ppp=0,tcp_rst=0,detect_alert=0,capture_kernel_drops=0,app_layer_flow_failed_udp=4,decoder_null=0,decoder_icmpv4=0,decoder_icmpv6=0,decoder_ltnull_unsupported_type=0,defrag_ipv6_reassembled=0,tcp_invalid_checksum=0,tcp_synack=0,decoder_tcp=31,tcp_pseudo=0,app_layer_flow_smb=0,app_layer_flow_smtp=0,decoder_max_pkt_size=1463,decoder_dce_pkt_too_small=0,app_layer_tx_http=0,decoder_pkts=463,decoder_sll=0,app_layer_flow_ftp=0,app_layer_tx_dns_tcp=0,decoder_erspan=0 1568368562544966078 +``` diff --git a/plugins/inputs/suricata/suricata.go b/plugins/inputs/suricata/suricata.go new file mode 100644 index 000000000..17c0b5715 --- /dev/null +++ b/plugins/inputs/suricata/suricata.go @@ -0,0 +1,229 @@ +package suricata + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "net" + "strings" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const ( + // InBufSize is the input buffer size for JSON received via socket. + // Set to 10MB, as depending on the number of threads the output might be + // large. + InBufSize = 10 * 1024 * 1024 +) + +// Suricata is a Telegraf input plugin for Suricata runtime statistics. +type Suricata struct { + Source string `toml:"source"` + Delimiter string `toml:"delimiter"` + + inputListener *net.UnixListener + cancel context.CancelFunc + + Log telegraf.Logger `toml:"-"` + + wg sync.WaitGroup +} + +// Description returns the plugin description. +func (s *Suricata) Description() string { + return "Suricata stats plugin" +} + +const sampleConfig = ` + ## Data sink for Suricata stats log + # This is expected to be a filename of a + # unix socket to be created for listening. + source = "/var/run/suricata-stats.sock" + + # Delimiter for flattening field keys, e.g. subitem "alert" of "detect" + # becomes "detect_alert" when delimiter is "_". + delimiter = "_" +` + +// SampleConfig returns a sample TOML section to illustrate configuration +// options. +func (s *Suricata) SampleConfig() string { + return sampleConfig +} + +// Start initiates background collection of JSON data from the socket +// provided to Suricata. +func (s *Suricata) Start(acc telegraf.Accumulator) error { + var err error + s.inputListener, err = net.ListenUnix("unix", &net.UnixAddr{ + Name: s.Source, + Net: "unix", + }) + if err != nil { + return err + } + ctx, cancel := context.WithCancel(context.Background()) + s.cancel = cancel + s.inputListener.SetUnlinkOnClose(true) + s.wg.Add(1) + go func() { + defer s.wg.Done() + go s.handleServerConnection(ctx, acc) + }() + return nil +} + +// Stop causes the plugin to cease collecting JSON data from the socket provided +// to Suricata. +func (s *Suricata) Stop() { + s.inputListener.Close() + if s.cancel != nil { + s.cancel() + } + s.wg.Wait() +} + +func (s *Suricata) readInput(ctx context.Context, acc telegraf.Accumulator, conn net.Conn) error { + reader := bufio.NewReaderSize(conn, InBufSize) + for { + select { + case <-ctx.Done(): + return nil + default: + line, rerr := reader.ReadBytes('\n') + if rerr != nil { + return rerr + } else if len(line) > 0 { + s.parse(acc, line) + } + } + } +} + +func (s *Suricata) handleServerConnection(ctx context.Context, acc telegraf.Accumulator) { + var err error + for { + select { + case <-ctx.Done(): + return + default: + var conn net.Conn + conn, err = s.inputListener.Accept() + if err != nil { + if !strings.HasSuffix(err.Error(), ": use of closed network connection") { + acc.AddError(err) + } + continue + } + err = s.readInput(ctx, acc, conn) + // we want to handle EOF as an opportunity to wait for a new + // connection -- this could, for example, happen when Suricata is + // restarted while Telegraf is running. + if err != io.EOF { + acc.AddError(err) + return + } + } + } +} + +func flexFlatten(outmap map[string]interface{}, field string, v interface{}, delimiter string) error { + switch t := v.(type) { + case map[string]interface{}: + for k, v := range t { + var err error + if field == "" { + err = flexFlatten(outmap, k, v, delimiter) + } else { + err = flexFlatten(outmap, fmt.Sprintf("%s%s%s", field, delimiter, k), v, delimiter) + } + if err != nil { + return err + } + } + case float64: + outmap[field] = v.(float64) + default: + return fmt.Errorf("Unsupported type %T encountered", t) + } + return nil +} + +func (s *Suricata) parse(acc telegraf.Accumulator, sjson []byte) { + // initial parsing + var result map[string]interface{} + err := json.Unmarshal([]byte(sjson), &result) + if err != nil { + acc.AddError(err) + return + } + + // check for presence of relevant stats + if _, ok := result["stats"]; !ok { + s.Log.Debug("Input does not contain necessary 'stats' sub-object") + return + } + + if _, ok := result["stats"].(map[string]interface{}); !ok { + s.Log.Debug("The 'stats' sub-object does not have required structure") + return + } + + fields := make(map[string](map[string]interface{})) + totalmap := make(map[string]interface{}) + for k, v := range result["stats"].(map[string]interface{}) { + if k == "threads" { + if v, ok := v.(map[string]interface{}); ok { + for k, t := range v { + outmap := make(map[string]interface{}) + if threadStruct, ok := t.(map[string]interface{}); ok { + err = flexFlatten(outmap, "", threadStruct, s.Delimiter) + if err != nil { + s.Log.Debug(err) + // we skip this thread as something did not parse correctly + continue + } + fields[k] = outmap + } + } + } else { + s.Log.Debug("The 'threads' sub-object does not have required structure") + } + } else { + err = flexFlatten(totalmap, k, v, s.Delimiter) + if err != nil { + s.Log.Debug(err.Error()) + // we skip this subitem as something did not parse correctly + } + } + } + fields["total"] = totalmap + + for k := range fields { + if k == "Global" { + acc.AddFields("suricata", fields[k], nil) + } else { + acc.AddFields("suricata", fields[k], map[string]string{"thread": k}) + } + } +} + +// Gather measures and submits one full set of telemetry to Telegraf. +// Not used here, submission is completely input-driven. +func (s *Suricata) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("suricata", func() telegraf.Input { + return &Suricata{ + Source: "/var/run/suricata-stats.sock", + Delimiter: "_", + } + }) +} diff --git a/plugins/inputs/suricata/suricata_test.go b/plugins/inputs/suricata/suricata_test.go new file mode 100644 index 000000000..02f298b97 --- /dev/null +++ b/plugins/inputs/suricata/suricata_test.go @@ -0,0 +1,290 @@ +package suricata + +import ( + "fmt" + "io/ioutil" + "log" + "math/rand" + "net" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var ex2 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"capture":{"kernel_packets":905344474,"kernel_drops":78355440,"kernel_packets_delta":2376742,"kernel_drops_delta":82049}}}` +var ex3 = `{"timestamp":"2017-03-06T07:43:39.000397+0000","event_type":"stats","stats":{"threads": { "W#05-wlp4s0": { "capture":{"kernel_packets":905344474,"kernel_drops":78355440}}}}}` + +func TestSuricataLarge(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + data, err := ioutil.ReadFile("testdata/test1.json") + require.NoError(t, err) + + c, err := net.Dial("unix", tmpfn) + require.NoError(t, err) + c.Write([]byte(data)) + c.Write([]byte("\n")) + c.Close() + + acc.Wait(1) +} + +func TestSuricata(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + c, err := net.Dial("unix", tmpfn) + require.NoError(t, err) + c.Write([]byte(ex2)) + c.Write([]byte("\n")) + c.Close() + + acc.Wait(1) + + s = Suricata{ + Source: tmpfn, + Delimiter: ".", + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + + expected := []telegraf.Metric{ + testutil.MustMetric( + "suricata", + map[string]string{ + "thread": "total", + }, + map[string]interface{}{ + "capture.kernel_packets": float64(905344474), + "capture.kernel_drops": float64(78355440), + "capture.kernel_packets_delta": float64(2376742), + "capture.kernel_drops_delta": float64(82049), + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestThreadStats(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Delimiter: ".", + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + + acc := testutil.Accumulator{} + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + c, err := net.Dial("unix", tmpfn) + require.NoError(t, err) + c.Write([]byte("")) + c.Write([]byte("\n")) + c.Write([]byte("foobard}\n")) + c.Write([]byte(ex3)) + c.Write([]byte("\n")) + c.Close() + acc.Wait(1) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "suricata", + map[string]string{ + "thread": "W#05-wlp4s0", + }, + map[string]interface{}{ + "capture.kernel_packets": float64(905344474), + "capture.kernel_drops": float64(78355440), + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), testutil.IgnoreTime()) +} + +func TestSuricataInvalid(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + acc.SetDebug(true) + + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + c, err := net.Dial("unix", tmpfn) + require.NoError(t, err) + c.Write([]byte("sfjiowef")) + c.Write([]byte("\n")) + c.Close() + + acc.WaitError(1) +} + +func TestSuricataInvalidPath(t *testing.T) { + tmpfn := fmt.Sprintf("/t%d/X", rand.Int63()) + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + + acc := testutil.Accumulator{} + require.Error(t, s.Start(&acc)) +} + +func TestSuricataTooLongLine(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + c, err := net.Dial("unix", tmpfn) + require.NoError(t, err) + c.Write([]byte(strings.Repeat("X", 20000000))) + c.Write([]byte("\n")) + c.Close() + + acc.WaitError(1) + +} + +func TestSuricataEmptyJSON(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + c, err := net.Dial("unix", tmpfn) + if err != nil { + log.Println(err) + + } + c.Write([]byte("\n")) + c.Close() + + acc.WaitError(1) +} + +func TestSuricataDisconnectSocket(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + + require.NoError(t, s.Start(&acc)) + defer s.Stop() + + c, err := net.Dial("unix", tmpfn) + require.NoError(t, err) + c.Write([]byte(ex2)) + c.Write([]byte("\n")) + c.Close() + + c, err = net.Dial("unix", tmpfn) + require.NoError(t, err) + c.Write([]byte(ex3)) + c.Write([]byte("\n")) + c.Close() + + acc.Wait(2) +} + +func TestSuricataStartStop(t *testing.T) { + dir, err := ioutil.TempDir("", "test") + require.NoError(t, err) + defer os.RemoveAll(dir) + tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) + + s := Suricata{ + Source: tmpfn, + Log: testutil.Logger{ + Name: "inputs.suricata", + }, + } + acc := testutil.Accumulator{} + require.NoError(t, s.Start(&acc)) + s.Stop() +} diff --git a/plugins/inputs/suricata/suricata_testutil.go b/plugins/inputs/suricata/suricata_testutil.go new file mode 100644 index 000000000..55aa2bb9b --- /dev/null +++ b/plugins/inputs/suricata/suricata_testutil.go @@ -0,0 +1,38 @@ +package suricata + +import ( + "bytes" + "sync" +) + +// A thread-safe Buffer wrapper to enable concurrent access to log output. +type buffer struct { + b bytes.Buffer + m sync.Mutex +} + +func (b *buffer) Read(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Read(p) +} +func (b *buffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Write(p) +} +func (b *buffer) String() string { + b.m.Lock() + defer b.m.Unlock() + return b.b.String() +} +func (b *buffer) Reset() { + b.m.Lock() + defer b.m.Unlock() + b.b.Reset() +} +func (b *buffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.b.Bytes() +} diff --git a/plugins/inputs/suricata/testdata/test1.json b/plugins/inputs/suricata/testdata/test1.json new file mode 100644 index 000000000..31208c4d1 --- /dev/null +++ b/plugins/inputs/suricata/testdata/test1.json @@ -0,0 +1 @@ +{ "timestamp": "2019-08-08T16:26:33.000244+0200", "event_type": "stats", "stats": { "uptime": 15, "capture": { "kernel_packets": 135, "kernel_packets_delta": 74, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 141, "pkts_delta": 63, "bytes": 26018, "bytes_delta": 13415, "invalid": 0, "invalid_delta": 0, "ipv4": 132, "ipv4_delta": 58, "ipv6": 4, "ipv6_delta": 2, "ethernet": 141, "ethernet_delta": 63, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 79, "tcp_delta": 35, "udp": 53, "udp_delta": 23, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 4, "icmpv6_delta": 2, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 184, "avg_pkt_size_delta": 23, "max_pkt_size": 1422, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0, "spare": 10000, "spare_delta": 0, "emerg_mode_entered": 0, "emerg_mode_entered_delta": 0, "emerg_mode_over": 0, "emerg_mode_over_delta": 0, "tcp_reuse": 0, "tcp_reuse_delta": 0, "memuse": 7083520, "memuse_delta": 4608 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 1, "sessions_delta": 1, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 1, "syn_delta": 1, "synack": 1, "synack_delta": 1, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0, "memuse": 3276800, "memuse_delta": 0, "reassembly_memuse": 12332832, "reassembly_memuse_delta": 0 }, "detect": { "alert": 2, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 1, "tls_delta": 1, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 5, "dns_udp_delta": 2, "failed_udp": 12, "failed_udp_delta": 6 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 12, "dns_udp_delta": 2 } }, "flow_mgr": { "closed_pruned": 0, "closed_pruned_delta": 0, "new_pruned": 0, "new_pruned_delta": 0, "est_pruned": 0, "est_pruned_delta": 0, "bypassed_pruned": 0, "bypassed_pruned_delta": 0, "flows_checked": 1, "flows_checked_delta": 1, "flows_notimeout": 1, "flows_notimeout_delta": 1, "flows_timeout": 0, "flows_timeout_delta": 0, "flows_timeout_inuse": 0, "flows_timeout_inuse_delta": 0, "flows_removed": 0, "flows_removed_delta": 0, "rows_checked": 65536, "rows_checked_delta": 0, "rows_skipped": 65535, "rows_skipped_delta": -1, "rows_empty": 0, "rows_empty_delta": 0, "rows_busy": 0, "rows_busy_delta": 0, "rows_maxlen": 1, "rows_maxlen_delta": 1 }, "dns": { "memuse": 1402, "memuse_delta": 595, "memcap_state": 0, "memcap_state_delta": 0, "memcap_global": 0, "memcap_global_delta": 0 }, "http": { "memuse": 0, "memuse_delta": 0, "memcap": 0, "memcap_delta": 0 }, "threads": { "W#01-wlp4s0": { "capture": { "kernel_packets": 25, "kernel_packets_delta": 22, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 25, "pkts_delta": 22, "bytes": 7026, "bytes_delta": 6828, "invalid": 0, "invalid_delta": 0, "ipv4": 19, "ipv4_delta": 19, "ipv6": 1, "ipv6_delta": 0, "ethernet": 25, "ethernet_delta": 22, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 17, "tcp_delta": 17, "udp": 2, "udp_delta": 2, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 281, "avg_pkt_size_delta": 215, "max_pkt_size": 1422, "max_pkt_size_delta": 1336, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 1, "sessions_delta": 1, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 1, "syn_delta": 1, "synack": 1, "synack_delta": 1, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 1, "tls_delta": 1, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 1 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#02-wlp4s0": { "capture": { "kernel_packets": 32, "kernel_packets_delta": 21, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 32, "pkts_delta": 19, "bytes": 5378, "bytes_delta": 3085, "invalid": 0, "invalid_delta": 0, "ipv4": 32, "ipv4_delta": 19, "ipv6": 0, "ipv6_delta": 0, "ethernet": 32, "ethernet_delta": 19, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 25, "tcp_delta": 12, "udp": 7, "udp_delta": 7, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 168, "avg_pkt_size_delta": -8, "max_pkt_size": 626, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 2, "failed_udp_delta": 2 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#03-wlp4s0": { "capture": { "kernel_packets": 44, "kernel_packets_delta": 9, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 45, "pkts_delta": 9, "bytes": 9392, "bytes_delta": 1718, "invalid": 0, "invalid_delta": 0, "ipv4": 45, "ipv4_delta": 9, "ipv6": 0, "ipv6_delta": 0, "ethernet": 45, "ethernet_delta": 9, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 33, "tcp_delta": 2, "udp": 12, "udp_delta": 7, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 208, "avg_pkt_size_delta": -5, "max_pkt_size": 1422, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 1, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 5, "failed_udp_delta": 2 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#04-wlp4s0": { "capture": { "kernel_packets": 4, "kernel_packets_delta": 0, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 10, "pkts_delta": 0, "bytes": 740, "bytes_delta": 0, "invalid": 0, "invalid_delta": 0, "ipv4": 10, "ipv4_delta": 0, "ipv6": 0, "ipv6_delta": 0, "ethernet": 10, "ethernet_delta": 0, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 0, "tcp_delta": 0, "udp": 10, "udp_delta": 0, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 74, "avg_pkt_size_delta": 0, "max_pkt_size": 86, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 1, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 1, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 4, "dns_udp_delta": 0 } } }, "W#05-wlp4s0": { "capture": { "kernel_packets": 14, "kernel_packets_delta": 11, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 14, "pkts_delta": 4, "bytes": 1723, "bytes_delta": 797, "invalid": 0, "invalid_delta": 0, "ipv4": 13, "ipv4_delta": 3, "ipv6": 1, "ipv6_delta": 1, "ethernet": 14, "ethernet_delta": 4, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 2, "tcp_delta": 2, "udp": 11, "udp_delta": 1, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 1, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 123, "avg_pkt_size_delta": 31, "max_pkt_size": 478, "max_pkt_size_delta": 299, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 1, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 4, "dns_udp_delta": 0 } } }, "W#06-wlp4s0": { "capture": { "kernel_packets": 11, "kernel_packets_delta": 8, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 11, "pkts_delta": 6, "bytes": 1254, "bytes_delta": 696, "invalid": 0, "invalid_delta": 0, "ipv4": 10, "ipv4_delta": 6, "ipv6": 1, "ipv6_delta": 0, "ethernet": 11, "ethernet_delta": 6, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 2, "tcp_delta": 2, "udp": 8, "udp_delta": 4, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 114, "avg_pkt_size_delta": 3, "max_pkt_size": 215, "max_pkt_size_delta": 62, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 2, "dns_udp_delta": 1, "failed_udp": 1, "failed_udp_delta": 1 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 3, "dns_udp_delta": 1 } } }, "W#07-wlp4s0": { "capture": { "kernel_packets": 1, "kernel_packets_delta": 0, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 1, "pkts_delta": 0, "bytes": 214, "bytes_delta": 0, "invalid": 0, "invalid_delta": 0, "ipv4": 1, "ipv4_delta": 0, "ipv6": 0, "ipv6_delta": 0, "ethernet": 1, "ethernet_delta": 0, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 0, "tcp_delta": 0, "udp": 1, "udp_delta": 0, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 0, "icmpv6_delta": 0, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 214, "avg_pkt_size_delta": 0, "max_pkt_size": 214, "max_pkt_size_delta": 0, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0, "failed_udp": 1, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 0, "dns_udp_delta": 0 } } }, "W#08-wlp4s0": { "capture": { "kernel_packets": 4, "kernel_packets_delta": 3, "kernel_drops": 0, "kernel_drops_delta": 0 }, "decoder": { "pkts": 3, "pkts_delta": 3, "bytes": 291, "bytes_delta": 291, "invalid": 0, "invalid_delta": 0, "ipv4": 2, "ipv4_delta": 2, "ipv6": 1, "ipv6_delta": 1, "ethernet": 3, "ethernet_delta": 3, "raw": 0, "raw_delta": 0, "null": 0, "null_delta": 0, "sll": 0, "sll_delta": 0, "tcp": 0, "tcp_delta": 0, "udp": 2, "udp_delta": 2, "sctp": 0, "sctp_delta": 0, "icmpv4": 0, "icmpv4_delta": 0, "icmpv6": 1, "icmpv6_delta": 1, "ppp": 0, "ppp_delta": 0, "pppoe": 0, "pppoe_delta": 0, "gre": 0, "gre_delta": 0, "vlan": 0, "vlan_delta": 0, "vlan_qinq": 0, "vlan_qinq_delta": 0, "teredo": 0, "teredo_delta": 0, "ipv4_in_ipv6": 0, "ipv4_in_ipv6_delta": 0, "ipv6_in_ipv6": 0, "ipv6_in_ipv6_delta": 0, "mpls": 0, "mpls_delta": 0, "avg_pkt_size": 97, "avg_pkt_size_delta": 97, "max_pkt_size": 134, "max_pkt_size_delta": 134, "erspan": 0, "erspan_delta": 0, "ipraw": { "invalid_ip_version": 0, "invalid_ip_version_delta": 0 }, "ltnull": { "pkt_too_small": 0, "pkt_too_small_delta": 0, "unsupported_type": 0, "unsupported_type_delta": 0 }, "dce": { "pkt_too_small": 0, "pkt_too_small_delta": 0 } }, "flow": { "memcap": 0, "memcap_delta": 0 }, "defrag": { "ipv4": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "ipv6": { "fragments": 0, "fragments_delta": 0, "reassembled": 0, "reassembled_delta": 0, "timeouts": 0, "timeouts_delta": 0 }, "max_frag_hits": 0, "max_frag_hits_delta": 0 }, "tcp": { "sessions": 0, "sessions_delta": 0, "ssn_memcap_drop": 0, "ssn_memcap_drop_delta": 0, "pseudo": 0, "pseudo_delta": 0, "pseudo_failed": 0, "pseudo_failed_delta": 0, "invalid_checksum": 0, "invalid_checksum_delta": 0, "no_flow": 0, "no_flow_delta": 0, "syn": 0, "syn_delta": 0, "synack": 0, "synack_delta": 0, "rst": 0, "rst_delta": 0, "segment_memcap_drop": 0, "segment_memcap_drop_delta": 0, "stream_depth_reached": 0, "stream_depth_reached_delta": 0, "reassembly_gap": 0, "reassembly_gap_delta": 0 }, "detect": { "alert": 0, "alert_delta": 0 }, "app_layer": { "flow": { "http": 0, "http_delta": 0, "ftp": 0, "ftp_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "ssh": 0, "ssh_delta": 0, "imap": 0, "imap_delta": 0, "msn": 0, "msn_delta": 0, "smb": 0, "smb_delta": 0, "dcerpc_tcp": 0, "dcerpc_tcp_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "failed_tcp": 0, "failed_tcp_delta": 0, "dcerpc_udp": 0, "dcerpc_udp_delta": 0, "dns_udp": 1, "dns_udp_delta": 1, "failed_udp": 0, "failed_udp_delta": 0 }, "tx": { "http": 0, "http_delta": 0, "smtp": 0, "smtp_delta": 0, "tls": 0, "tls_delta": 0, "dns_tcp": 0, "dns_tcp_delta": 0, "dns_udp": 1, "dns_udp_delta": 1 } } }, "FM#01": { "flow_mgr": { "closed_pruned": 0, "closed_pruned_delta": 0, "new_pruned": 0, "new_pruned_delta": 0, "est_pruned": 0, "est_pruned_delta": 0, "bypassed_pruned": 0, "bypassed_pruned_delta": 0, "flows_checked": 1, "flows_checked_delta": 1, "flows_notimeout": 1, "flows_notimeout_delta": 1, "flows_timeout": 0, "flows_timeout_delta": 0, "flows_timeout_inuse": 0, "flows_timeout_inuse_delta": 0, "flows_removed": 0, "flows_removed_delta": 0, "rows_checked": 65536, "rows_checked_delta": 0, "rows_skipped": 65535, "rows_skipped_delta": -1, "rows_empty": 0, "rows_empty_delta": 0, "rows_busy": 0, "rows_busy_delta": 0, "rows_maxlen": 1, "rows_maxlen_delta": 1 }, "flow": { "spare": 10000, "spare_delta": 0, "emerg_mode_entered": 0, "emerg_mode_entered_delta": 0, "emerg_mode_over": 0, "emerg_mode_over_delta": 0, "tcp_reuse": 0, "tcp_reuse_delta": 0 } }, "Global": { "tcp": { "memuse": 3276800, "memuse_delta": 0, "reassembly_memuse": 12332832, "reassembly_memuse_delta": 0 }, "dns": { "memuse": 1402, "memuse_delta": 595, "memcap_state": 0, "memcap_state_delta": 0, "memcap_global": 0, "memcap_global_delta": 0 }, "http": { "memuse": 0, "memuse_delta": 0, "memcap": 0, "memcap_delta": 0 }, "flow": { "memuse": 7083520, "memuse_delta": 4608 } } } }} \ No newline at end of file diff --git a/plugins/inputs/system/SWAP_README.md b/plugins/inputs/swap/README.md similarity index 57% rename from plugins/inputs/system/SWAP_README.md rename to plugins/inputs/swap/README.md index a5444ff2a..983892871 100644 --- a/plugins/inputs/system/SWAP_README.md +++ b/plugins/inputs/swap/README.md @@ -16,12 +16,12 @@ For more information on what swap memory is, read [All about Linux swap space](h - swap - fields: - - free (int) - - total (int) - - used (int) - - used_percent (float) - - in (int) - - out (int) + - free (int, bytes): free swap memory + - total (int, bytes): total swap memory + - used (int, bytes): used swap memory + - used_percent (float, percent): percentage of swap memory used + - in (int, bytes): data swapped in since last boot calculated from page number + - out (int, bytes): data swapped out since last boot calculated from page number ### Example Output: diff --git a/plugins/inputs/system/swap.go b/plugins/inputs/swap/swap.go similarity index 88% rename from plugins/inputs/system/swap.go rename to plugins/inputs/swap/swap.go index f1f7c8e23..eabb40a03 100644 --- a/plugins/inputs/system/swap.go +++ b/plugins/inputs/swap/swap.go @@ -1,14 +1,15 @@ -package system +package swap import ( "fmt" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" ) type SwapStats struct { - ps PS + ps system.PS } func (_ *SwapStats) Description() string { @@ -40,7 +41,7 @@ func (s *SwapStats) Gather(acc telegraf.Accumulator) error { } func init() { - ps := newSystemPS() + ps := system.NewSystemPS() inputs.Add("swap", func() telegraf.Input { return &SwapStats{ps: ps} }) diff --git a/plugins/inputs/system/swap_test.go b/plugins/inputs/swap/swap_test.go similarity index 89% rename from plugins/inputs/system/swap_test.go rename to plugins/inputs/swap/swap_test.go index ec9a0fe54..3f97b354e 100644 --- a/plugins/inputs/system/swap_test.go +++ b/plugins/inputs/swap/swap_test.go @@ -1,15 +1,16 @@ -package system +package swap import ( "testing" + "github.com/influxdata/telegraf/plugins/inputs/system" "github.com/influxdata/telegraf/testutil" "github.com/shirou/gopsutil/mem" "github.com/stretchr/testify/require" ) func TestSwapStats(t *testing.T) { - var mps MockPS + var mps system.MockPS var err error defer mps.AssertExpectations(t) var acc testutil.Accumulator diff --git a/plugins/inputs/synproxy/README.md b/plugins/inputs/synproxy/README.md new file mode 100644 index 000000000..4e275886f --- /dev/null +++ b/plugins/inputs/synproxy/README.md @@ -0,0 +1,49 @@ +# Synproxy Input Plugin + +The synproxy plugin gathers the synproxy counters. Synproxy is a Linux netfilter module used for SYN attack mitigation. +The use of synproxy is documented in `man iptables-extensions` under the SYNPROXY section. + + +### Configuration + +The synproxy plugin does not need any configuration + +```toml +[[inputs.synproxy]] + # no configuration +``` + +### Metrics + +The following synproxy counters are gathered + +- synproxy + - fields: + - cookie_invalid (uint32, packets, counter) - Invalid cookies + - cookie_retrans (uint32, packets, counter) - Cookies retransmitted + - cookie_valid (uint32, packets, counter) - Valid cookies + - entries (uint32, packets, counter) - Entries + - syn_received (uint32, packets, counter) - SYN received + - conn_reopened (uint32, packets, counter) - Connections reopened + +### Sample Queries + +Get the number of packets per 5 minutes for the measurement in the last hour from InfluxDB: +``` +SELECT difference(last("cookie_invalid")) AS "cookie_invalid", difference(last("cookie_retrans")) AS "cookie_retrans", difference(last("cookie_valid")) AS "cookie_valid", difference(last("entries")) AS "entries", difference(last("syn_received")) AS "syn_received", difference(last("conn_reopened")) AS "conn_reopened" FROM synproxy WHERE time > NOW() - 1h GROUP BY time(5m) FILL(null); +``` + +### Troubleshooting + +Execute the following CLI command in Linux to test the synproxy counters: +``` +cat /proc/net/stat/synproxy +``` + +### Example Output + +This section shows example output in Line Protocol format. + +``` +synproxy,host=Filter-GW01,rack=filter-node1 conn_reopened=0i,cookie_invalid=235i,cookie_retrans=0i,cookie_valid=8814i,entries=0i,syn_received=8742i 1549550634000000000 +``` diff --git a/plugins/inputs/synproxy/synproxy.go b/plugins/inputs/synproxy/synproxy.go new file mode 100644 index 000000000..6a5b2b323 --- /dev/null +++ b/plugins/inputs/synproxy/synproxy.go @@ -0,0 +1,40 @@ +package synproxy + +import ( + "os" + "path" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +type Synproxy struct { + Log telegraf.Logger `toml:"-"` + + // Synproxy stats filename (proc filesystem) + statFile string +} + +func (k *Synproxy) Description() string { + return "Get synproxy counter statistics from procfs" +} + +func (k *Synproxy) SampleConfig() string { + return "" +} + +func getHostProc() string { + procPath := "/proc" + if os.Getenv("HOST_PROC") != "" { + procPath = os.Getenv("HOST_PROC") + } + return procPath +} + +func init() { + inputs.Add("synproxy", func() telegraf.Input { + return &Synproxy{ + statFile: path.Join(getHostProc(), "/net/stat/synproxy"), + } + }) +} diff --git a/plugins/inputs/synproxy/synproxy_linux.go b/plugins/inputs/synproxy/synproxy_linux.go new file mode 100644 index 000000000..bcc972938 --- /dev/null +++ b/plugins/inputs/synproxy/synproxy_linux.go @@ -0,0 +1,90 @@ +// +build linux + +package synproxy + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" + + "github.com/influxdata/telegraf" +) + +func (k *Synproxy) Gather(acc telegraf.Accumulator) error { + data, err := k.getSynproxyStat() + if err != nil { + return err + } + + acc.AddCounter("synproxy", data, map[string]string{}) + return nil +} + +func inSlice(haystack []string, needle string) bool { + for _, val := range haystack { + if needle == val { + return true + } + } + return false +} + +func (k *Synproxy) getSynproxyStat() (map[string]interface{}, error) { + var hname []string + counters := []string{"entries", "syn_received", "cookie_invalid", "cookie_valid", "cookie_retrans", "conn_reopened"} + fields := make(map[string]interface{}) + + // Open synproxy file in proc filesystem + file, err := os.Open(k.statFile) + if err != nil { + return nil, err + } + defer file.Close() + + // Initialise expected fields + for _, val := range counters { + fields[val] = uint32(0) + } + + scanner := bufio.NewScanner(file) + // Read header row + if scanner.Scan() { + line := scanner.Text() + // Parse fields separated by whitespace + dataFields := strings.Fields(line) + for _, val := range dataFields { + if !inSlice(counters, val) { + val = "" + } + hname = append(hname, val) + } + } + if len(hname) == 0 { + return nil, fmt.Errorf("invalid data") + } + // Read data rows + for scanner.Scan() { + line := scanner.Text() + // Parse fields separated by whitespace + dataFields := strings.Fields(line) + // If number of data fields do not match number of header fields + if len(dataFields) != len(hname) { + return nil, fmt.Errorf("invalid number of columns in data, expected %d found %d", len(hname), + len(dataFields)) + } + for i, val := range dataFields { + // Convert from hexstring to int32 + x, err := strconv.ParseUint(val, 16, 32) + // If field is not a valid hexstring + if err != nil { + return nil, fmt.Errorf("invalid value '%s' found", val) + } + if hname[i] != "" { + fields[hname[i]] = fields[hname[i]].(uint32) + uint32(x) + } + } + } + return fields, nil +} diff --git a/plugins/inputs/synproxy/synproxy_notlinux.go b/plugins/inputs/synproxy/synproxy_notlinux.go new file mode 100644 index 000000000..71a223644 --- /dev/null +++ b/plugins/inputs/synproxy/synproxy_notlinux.go @@ -0,0 +1,23 @@ +// +build !linux + +package synproxy + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +func (k *Synproxy) Init() error { + k.Log.Warn("Current platform is not supported") + return nil +} + +func (k *Synproxy) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("synproxy", func() telegraf.Input { + return &Synproxy{} + }) +} diff --git a/plugins/inputs/synproxy/synproxy_test.go b/plugins/inputs/synproxy/synproxy_test.go new file mode 100644 index 000000000..83d752ff1 --- /dev/null +++ b/plugins/inputs/synproxy/synproxy_test.go @@ -0,0 +1,169 @@ +// +build linux + +package synproxy + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/influxdata/telegraf/testutil" + + "github.com/stretchr/testify/assert" +) + +func TestSynproxyFileNormal(t *testing.T) { + testSynproxyFileData(t, synproxyFileNormal, synproxyResultNormal) +} + +func TestSynproxyFileOverflow(t *testing.T) { + testSynproxyFileData(t, synproxyFileOverflow, synproxyResultOverflow) +} + +func TestSynproxyFileExtended(t *testing.T) { + testSynproxyFileData(t, synproxyFileExtended, synproxyResultNormal) +} + +func TestSynproxyFileAltered(t *testing.T) { + testSynproxyFileData(t, synproxyFileAltered, synproxyResultNormal) +} + +func TestSynproxyFileHeaderMismatch(t *testing.T) { + tmpfile := makeFakeSynproxyFile([]byte(synproxyFileHeaderMismatch)) + defer os.Remove(tmpfile) + + k := Synproxy{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid number of columns in data") +} + +func TestSynproxyFileInvalidHex(t *testing.T) { + tmpfile := makeFakeSynproxyFile([]byte(synproxyFileInvalidHex)) + defer os.Remove(tmpfile) + + k := Synproxy{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid value") +} + +func TestNoSynproxyFile(t *testing.T) { + tmpfile := makeFakeSynproxyFile([]byte(synproxyFileNormal)) + // Remove file to generate "no such file" error + os.Remove(tmpfile) + + k := Synproxy{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.Error(t, err) +} + +// Valid Synproxy file +const synproxyFileNormal = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened +00000000 00007a88 00002af7 00007995 00000000 00000000 +00000000 0000892c 000015e3 00008852 00000000 00000000 +00000000 00007a80 00002ccc 0000796a 00000000 00000000 +00000000 000079f7 00002bf5 0000790a 00000000 00000000 +00000000 00007a08 00002c9a 00007901 00000000 00000000 +00000000 00007cfc 00002b36 000078fd 00000000 00000000 +00000000 000079c2 00002c2b 000078d6 00000000 00000000 +00000000 0000798a 00002ba8 000078a0 00000000 00000000` + +const synproxyFileOverflow = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened +00000000 80000001 e0000000 80000001 00000000 00000000 +00000000 80000003 f0000009 80000003 00000000 00000000` + +const synproxyFileHeaderMismatch = `entries syn_received cookie_invalid cookie_valid cookie_retrans +00000000 00000002 00000000 00000002 00000000 00000000 +00000000 00000004 00000015 00000004 00000000 00000000 +00000000 00000003 00000000 00000003 00000000 00000000 +00000000 00000002 00000000 00000002 00000000 00000000 +00000000 00000003 00000009 00000003 00000000 00000000 +00000000 00000003 00000009 00000003 00000000 00000000 +00000000 00000001 00000000 00000001 00000000 00000000 +00000000 00000003 00000009 00000003 00000000 00000000` + +const synproxyFileInvalidHex = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened +entries 00000002 00000000 00000002 00000000 00000000 +00000000 00000003 00000009 00000003 00000000 00000000` + +const synproxyFileExtended = `entries syn_received cookie_invalid cookie_valid cookie_retrans conn_reopened new_counter +00000000 00007a88 00002af7 00007995 00000000 00000000 00000000 +00000000 0000892c 000015e3 00008852 00000000 00000000 00000000 +00000000 00007a80 00002ccc 0000796a 00000000 00000000 00000000 +00000000 000079f7 00002bf5 0000790a 00000000 00000000 00000000 +00000000 00007a08 00002c9a 00007901 00000000 00000000 00000000 +00000000 00007cfc 00002b36 000078fd 00000000 00000000 00000000 +00000000 000079c2 00002c2b 000078d6 00000000 00000000 00000000 +00000000 0000798a 00002ba8 000078a0 00000000 00000000 00000000` + +const synproxyFileAltered = `entries cookie_invalid cookie_valid syn_received conn_reopened +00000000 00002af7 00007995 00007a88 00000000 +00000000 000015e3 00008852 0000892c 00000000 +00000000 00002ccc 0000796a 00007a80 00000000 +00000000 00002bf5 0000790a 000079f7 00000000 +00000000 00002c9a 00007901 00007a08 00000000 +00000000 00002b36 000078fd 00007cfc 00000000 +00000000 00002c2b 000078d6 000079c2 00000000 +00000000 00002ba8 000078a0 0000798a 00000000` + +var synproxyResultNormal = map[string]interface{}{ + "entries": uint32(0x00000000), + "syn_received": uint32(0x0003e27b), + "cookie_invalid": uint32(0x0001493e), + "cookie_valid": uint32(0x0003d7cf), + "cookie_retrans": uint32(0x00000000), + "conn_reopened": uint32(0x00000000), +} + +var synproxyResultOverflow = map[string]interface{}{ + "entries": uint32(0x00000000), + "syn_received": uint32(0x00000004), + "cookie_invalid": uint32(0xd0000009), + "cookie_valid": uint32(0x00000004), + "cookie_retrans": uint32(0x00000000), + "conn_reopened": uint32(0x00000000), +} + +func testSynproxyFileData(t *testing.T, fileData string, telegrafData map[string]interface{}) { + tmpfile := makeFakeSynproxyFile([]byte(fileData)) + defer os.Remove(tmpfile) + + k := Synproxy{ + statFile: tmpfile, + } + + acc := testutil.Accumulator{} + err := k.Gather(&acc) + assert.NoError(t, err) + + acc.AssertContainsFields(t, "synproxy", telegrafData) +} + +func makeFakeSynproxyFile(content []byte) string { + tmpfile, err := ioutil.TempFile("", "synproxy_test") + if err != nil { + panic(err) + } + + if _, err := tmpfile.Write(content); err != nil { + panic(err) + } + if err := tmpfile.Close(); err != nil { + panic(err) + } + + return tmpfile.Name() +} diff --git a/plugins/inputs/syslog/README.md b/plugins/inputs/syslog/README.md index 107727947..32c5f2717 100644 --- a/plugins/inputs/syslog/README.md +++ b/plugins/inputs/syslog/README.md @@ -1,8 +1,10 @@ # Syslog Input Plugin The syslog plugin listens for syslog messages transmitted over -[UDP](https://tools.ietf.org/html/rfc5426) or -[TCP](https://tools.ietf.org/html/rfc5425). +a Unix Domain socket, +[UDP](https://tools.ietf.org/html/rfc5426), +[TCP](https://tools.ietf.org/html/rfc6587), or +[TLS](https://tools.ietf.org/html/rfc5425); with or without the octet counting framing. Syslog messages should be formatted according to [RFC 5424](https://tools.ietf.org/html/rfc5424). @@ -11,10 +13,12 @@ Syslog messages should be formatted according to ```toml [[inputs.syslog]] - ## Specify an ip or hostname with port - eg., tcp://localhost:6514, tcp://10.0.0.1:6514 ## Protocol, address and port to host the syslog receiver. ## If no host is specified, then localhost is used. ## If no port is specified, 6514 is used (RFC5425#section-4.1). + ## ex: server = "tcp://localhost:6514" + ## server = "udp://:6514" + ## server = "unix:///var/run/telegraf-syslog.sock" server = "tcp://:6514" ## TLS Config @@ -33,9 +37,19 @@ Syslog messages should be formatted according to ## Only applies to stream sockets (e.g. TCP). # max_connections = 1024 - ## Read timeout (default = 500ms). + ## Read timeout is the maximum time allowed for reading a single message (default = 5s). ## 0 means unlimited. - # read_timeout = 500ms + # read_timeout = "5s" + + ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). + ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). + ## Must be one of "octect-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-transparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" ## Whether to parse in best effort mode or not (default = false). ## By default best effort parsing is off. @@ -49,31 +63,20 @@ Syslog messages should be formatted according to # sdparam_separator = "_" ``` -#### Best Effort +#### Message transport + +The `framing` option only applies to streams. It governs the way we expect to receive messages within the stream. +Namely, with the [`"octet counting"`](https://tools.ietf.org/html/rfc5425#section-4.3) technique (default) or with the [`"non-transparent"`](https://tools.ietf.org/html/rfc6587#section-3.4.2) framing. + +The `trailer` option only applies when `framing` option is `"non-transparent"`. It must have one of the following values: `"LF"` (default), or `"NUL"`. + +#### Best effort The [`best_effort`](https://github.com/influxdata/go-syslog#best-effort-mode) option instructs the parser to extract partial but valid info from syslog -messages. If unset only full messages will be collected. +messages. If unset only full messages will be collected. -### Metrics - -- syslog - - tags - - severity (string) - - facility (string) - - hostname (string) - - appname (string) - - fields - - version (integer) - - severity_code (integer) - - facility_code (integer) - - timestamp (integer) - - procid (string) - - msgid (string) - - sdid (bool) - - *Structured Data* (string) - -### Rsyslog Integration +#### Rsyslog Integration Rsyslog can be configured to forward logging messages to Telegraf by configuring [remote logging](https://www.rsyslog.com/doc/v8-stable/configuration/actions.html#remote-machine). @@ -93,6 +96,68 @@ $ActionQueueSaveOnShutdown on # save in-memory data if rsyslog shuts down # forward over tcp with octet framing according to RFC 5425 *.* @@(o)127.0.0.1:6514;RSYSLOG_SyslogProtocol23Format + +# uncomment to use udp according to RFC 5424 +#*.* @127.0.0.1:6514;RSYSLOG_SyslogProtocol23Format +``` + +You can alternately use `advanced` format (aka RainerScript): +``` +# forward over tcp with octet framing according to RFC 5425 +action(type="omfwd" Protocol="tcp" TCP_Framing="octet-counted" Target="127.0.0.1" Port="6514" Template="RSYSLOG_SyslogProtocol23Format") + +# uncomment to use udp according to RFC 5424 +#action(type="omfwd" Protocol="udp" Target="127.0.0.1" Port="6514" Template="RSYSLOG_SyslogProtocol23Format") ``` To complete TLS setup please refer to [rsyslog docs](https://www.rsyslog.com/doc/v8-stable/tutorials/tls.html). + +### Metrics + +- syslog + - tags + - severity (string) + - facility (string) + - hostname (string) + - appname (string) + - fields + - version (integer) + - severity_code (integer) + - facility_code (integer) + - timestamp (integer): the time recorded in the syslog message + - procid (string) + - msgid (string) + - sdid (bool) + - *Structured Data* (string) + - timestamp: the time the messages was received + +#### Structured Data + +Structured data produces field keys by combining the `SD_ID` with the `PARAM_NAME` combined using the `sdparam_separator` as in the following example: +``` +170 <165>1 2018-10-01:14:15.000Z mymachine.example.com evntslog - ID47 [exampleSDID@32473 iut="3" eventSource="Application" eventID="1011"] An application event log entry... +``` +``` +syslog,appname=evntslog,facility=local4,hostname=mymachine.example.com,severity=notice exampleSDID@32473_eventID="1011",exampleSDID@32473_eventSource="Application",exampleSDID@32473_iut="3",facility_code=20i,message="An application event log entry...",msgid="ID47",severity_code=5i,timestamp=1065910455003000000i,version=1i 1538421339749472344 +``` + +### Troubleshooting + +You can send debugging messages directly to the input plugin using netcat: + +```sh +# TCP with octet framing +echo "57 <13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc 127.0.0.1 6514 + +# UDP +echo "<13>1 2018-10-01T12:00:00.0Z example.org root - - - test" | nc -u 127.0.0.1 6514 +``` + +#### RFC3164 + +RFC3164 encoded messages are not currently supported. You may see the following error if a message encoded in this format: +``` +E! Error in plugin [inputs.syslog]: expecting a version value in the range 1-999 [col 5] +``` + +You can use rsyslog to translate RFC3164 syslog messages into RFC5424 format. diff --git a/plugins/inputs/syslog/commons_test.go b/plugins/inputs/syslog/commons_test.go new file mode 100644 index 000000000..10f2ddf51 --- /dev/null +++ b/plugins/inputs/syslog/commons_test.go @@ -0,0 +1,65 @@ +package syslog + +import ( + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + framing "github.com/influxdata/telegraf/internal/syslog" + "github.com/influxdata/telegraf/testutil" +) + +var ( + pki = testutil.NewPKI("../../../testutil/pki") +) + +type testCasePacket struct { + name string + data []byte + wantBestEffort telegraf.Metric + wantStrict telegraf.Metric + werr bool +} + +type testCaseStream struct { + name string + data []byte + wantBestEffort []telegraf.Metric + wantStrict []telegraf.Metric + werr int // how many errors we expect in the strict mode? +} + +func newUDPSyslogReceiver(address string, bestEffort bool) *Syslog { + return &Syslog{ + Address: address, + now: func() time.Time { + return defaultTime + }, + BestEffort: bestEffort, + Separator: "_", + } +} + +func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool, f framing.Framing) *Syslog { + d := &internal.Duration{ + Duration: defaultReadTimeout, + } + s := &Syslog{ + Address: address, + now: func() time.Time { + return defaultTime + }, + Framing: f, + ReadTimeout: d, + BestEffort: bestEffort, + Separator: "_", + } + if keepAlive != nil { + s.KeepAlivePeriod = keepAlive + } + if maxConn > 0 { + s.MaxConnections = maxConn + } + + return s +} diff --git a/plugins/inputs/syslog/nontransparent_test.go b/plugins/inputs/syslog/nontransparent_test.go new file mode 100644 index 000000000..d0352c6ae --- /dev/null +++ b/plugins/inputs/syslog/nontransparent_test.go @@ -0,0 +1,296 @@ +package syslog + +import ( + "crypto/tls" + "io/ioutil" + "net" + "os" + "path/filepath" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + framing "github.com/influxdata/telegraf/internal/syslog" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func getTestCasesForNonTransparent() []testCaseStream { + testCases := []testCaseStream{ + { + name: "1st/avg/ok", + data: []byte(`<29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`), + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + map[string]interface{}{ + "version": uint16(1), + "timestamp": time.Unix(1456029177, 0).UnixNano(), + "procid": "2341", + "msgid": "2", + "message": `"GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`, + "origin": true, + "meta_sequence": "14125553", + "meta_service": "someservice", + "severity_code": 5, + "facility_code": 3, + }, + defaultTime, + ), + }, + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + map[string]interface{}{ + "version": uint16(1), + "timestamp": time.Unix(1456029177, 0).UnixNano(), + "procid": "2341", + "msgid": "2", + "message": `"GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`, + "origin": true, + "meta_sequence": "14125553", + "meta_service": "someservice", + "severity_code": 5, + "facility_code": 3, + }, + defaultTime, + ), + }, + werr: 1, + }, + { + name: "1st/min/ok//2nd/min/ok", + data: []byte("<1>2 - - - - - -\n<4>11 - - - - - -\n"), + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(2), + "severity_code": 1, + "facility_code": 0, + }, + defaultTime, + ), + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "warning", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(11), + "severity_code": 4, + "facility_code": 0, + }, + defaultTime.Add(time.Nanosecond), + ), + }, + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(2), + "severity_code": 1, + "facility_code": 0, + }, + defaultTime, + ), + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "warning", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(11), + "severity_code": 4, + "facility_code": 0, + }, + defaultTime.Add(time.Nanosecond), + ), + }, + }, + } + return testCases +} + +func testStrictNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { + for _, tc := range getTestCasesForNonTransparent() { + t.Run(tc.name, func(t *testing.T) { + // Creation of a strict mode receiver + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, framing.NonTransparent) + require.NotNil(t, receiver) + if wantTLS { + receiver.ServerConfig = *pki.TLSServerConfig() + } + require.Equal(t, receiver.KeepAlivePeriod, keepAlive) + acc := &testutil.Accumulator{} + require.NoError(t, receiver.Start(acc)) + defer receiver.Stop() + + // Connect + var conn net.Conn + var err error + if wantTLS { + config, e := pki.TLSClientConfig().TLSConfig() + require.NoError(t, e) + config.ServerName = "localhost" + conn, err = tls.Dial(protocol, address, config) + } else { + conn, err = net.Dial(protocol, address) + defer conn.Close() + } + require.NotNil(t, conn) + require.NoError(t, err) + + // Clear + acc.ClearMetrics() + acc.Errors = make([]error, 0) + + // Write + _, err = conn.Write(tc.data) + conn.Close() + require.NoError(t, err) + + // Wait that the the number of data points is accumulated + // Since the receiver is running concurrently + if tc.wantStrict != nil { + acc.Wait(len(tc.wantStrict)) + } + + // Wait the parsing error + acc.WaitError(tc.werr) + + // Verify + if len(acc.Errors) != tc.werr { + t.Fatalf("Got unexpected errors. want error = %v, errors = %v\n", tc.werr, acc.Errors) + } + testutil.RequireMetricsEqual(t, tc.wantStrict, acc.GetTelegrafMetrics()) + }) + } +} + +func testBestEffortNonTransparent(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { + for _, tc := range getTestCasesForNonTransparent() { + t.Run(tc.name, func(t *testing.T) { + // Creation of a best effort mode receiver + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, framing.NonTransparent) + require.NotNil(t, receiver) + if wantTLS { + receiver.ServerConfig = *pki.TLSServerConfig() + } + require.Equal(t, receiver.KeepAlivePeriod, keepAlive) + acc := &testutil.Accumulator{} + require.NoError(t, receiver.Start(acc)) + defer receiver.Stop() + + // Connect + var conn net.Conn + var err error + if wantTLS { + config, e := pki.TLSClientConfig().TLSConfig() + require.NoError(t, e) + config.ServerName = "localhost" + conn, err = tls.Dial(protocol, address, config) + } else { + conn, err = net.Dial(protocol, address) + } + require.NotNil(t, conn) + require.NoError(t, err) + + // Clear + acc.ClearMetrics() + acc.Errors = make([]error, 0) + + // Write + _, err = conn.Write(tc.data) + require.NoError(t, err) + conn.Close() + + // Wait that the the number of data points is accumulated + // Since the receiver is running concurrently + if tc.wantBestEffort != nil { + acc.Wait(len(tc.wantBestEffort)) + } + + testutil.RequireMetricsEqual(t, tc.wantStrict, acc.GetTelegrafMetrics()) + }) + } +} + +func TestNonTransparentStrict_tcp(t *testing.T) { + testStrictNonTransparent(t, "tcp", address, false, nil) +} + +func TestNonTransparentBestEffort_tcp(t *testing.T) { + testBestEffortNonTransparent(t, "tcp", address, false, nil) +} + +func TestNonTransparentStrict_tcp_tls(t *testing.T) { + testStrictNonTransparent(t, "tcp", address, true, nil) +} + +func TestNonTransparentBestEffort_tcp_tls(t *testing.T) { + testBestEffortNonTransparent(t, "tcp", address, true, nil) +} + +func TestNonTransparentStrictWithKeepAlive_tcp_tls(t *testing.T) { + testStrictNonTransparent(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) +} + +func TestNonTransparentStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { + testStrictNonTransparent(t, "tcp", address, true, &internal.Duration{Duration: 0}) +} + +func TestNonTransparentStrict_unix(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "telegraf") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") + testStrictNonTransparent(t, "unix", sock, false, nil) +} + +func TestNonTransparentBestEffort_unix(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "telegraf") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") + testBestEffortNonTransparent(t, "unix", sock, false, nil) +} + +func TestNonTransparentStrict_unix_tls(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "telegraf") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") + testStrictNonTransparent(t, "unix", sock, true, nil) +} + +func TestNonTransparentBestEffort_unix_tls(t *testing.T) { + tmpdir, err := ioutil.TempDir("", "telegraf") + require.NoError(t, err) + defer os.RemoveAll(tmpdir) + sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") + testBestEffortNonTransparent(t, "unix", sock, true, nil) +} diff --git a/plugins/inputs/syslog/rfc5425_test.go b/plugins/inputs/syslog/octetcounting_test.go similarity index 59% rename from plugins/inputs/syslog/rfc5425_test.go rename to plugins/inputs/syslog/octetcounting_test.go index 1b69e6023..210b64dbe 100644 --- a/plugins/inputs/syslog/rfc5425_test.go +++ b/plugins/inputs/syslog/octetcounting_test.go @@ -10,33 +10,28 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + framing "github.com/influxdata/telegraf/internal/syslog" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -var ( - pki = testutil.NewPKI("../../../testutil/pki") -) - -type testCase5425 struct { - name string - data []byte - wantBestEffort []testutil.Metric - wantStrict []testutil.Metric - werr int // how many errors we expect in the strict mode? -} - -func getTestCasesForRFC5425() []testCase5425 { - testCases := []testCase5425{ +func getTestCasesForOctetCounting() []testCaseStream { + testCases := []testCaseStream{ { name: "1st/avg/ok", data: []byte(`188 <29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`), - wantStrict: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + map[string]interface{}{ "version": uint16(1), "timestamp": time.Unix(1456029177, 0).UnixNano(), "procid": "2341", @@ -48,19 +43,19 @@ func getTestCasesForRFC5425() []testCase5425 { "severity_code": 5, "facility_code": 3, }, - Tags: map[string]string{ - "severity": "notice", - "facility": "daemon", - "hostname": "web1", - "appname": "someservice", - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + map[string]interface{}{ "version": uint16(1), "timestamp": time.Unix(1456029177, 0).UnixNano(), "procid": "2341", @@ -72,236 +67,236 @@ func getTestCasesForRFC5425() []testCase5425 { "severity_code": 5, "facility_code": 3, }, - Tags: map[string]string{ - "severity": "notice", - "facility": "daemon", - "hostname": "web1", - "appname": "someservice", - }, - Time: defaultTime, - }, + defaultTime, + ), }, }, { name: "1st/min/ok//2nd/min/ok", data: []byte("16 <1>2 - - - - - -17 <4>11 - - - - - -"), - wantStrict: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(2), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", + defaultTime, + ), + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "warning", "facility": "kern", }, - Time: defaultTime, - }, - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(11), "severity_code": 4, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "warning", - "facility": "kern", - }, - Time: defaultTime.Add(time.Nanosecond), - }, + defaultTime.Add(time.Nanosecond), + ), }, - wantBestEffort: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(2), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", + defaultTime, + ), + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "warning", "facility": "kern", }, - Time: defaultTime, - }, - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(11), "severity_code": 4, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "warning", - "facility": "kern", - }, - Time: defaultTime.Add(time.Nanosecond), - }, + defaultTime.Add(time.Nanosecond), + ), }, }, { name: "1st/utf8/ok", data: []byte("23 <1>1 - - - - - - hellø"), - wantStrict: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "message": "hellø", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "message": "hellø", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, }, { name: "1st/nl/ok", // newline data: []byte("28 <1>3 - - - - - - hello\nworld"), - wantStrict: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(3), "message": "hello\nworld", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(3), "message": "hello\nworld", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, }, { name: "1st/uf/ko", // underflow (msglen less than provided octets) data: []byte("16 <1>2"), wantStrict: nil, - wantBestEffort: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(2), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, werr: 1, }, { name: "1st/min/ok", data: []byte("16 <1>1 - - - - - -"), - wantStrict: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, }, { name: "1st/uf/mf", // The first "underflow" message breaks also the second one data: []byte("16 <1>217 <11>1 - - - - - -"), wantStrict: nil, - wantBestEffort: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(217), "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, werr: 1, }, // { - // name: "1st/of/ko", // overflow (msglen greather then max allowed octets) + // name: "1st/of/ko", // overflow (msglen greater than max allowed octets) // data: []byte(fmt.Sprintf("8193 <%d>%d %s %s %s %s %s 12 %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)), // want: []testutil.Metric{}, // }, { name: "1st/max/ok", data: []byte(fmt.Sprintf("8192 <%d>%d %s %s %s %s %s - %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)), - wantStrict: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantStrict: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "debug", + "facility": "local7", + "hostname": maxH, + "appname": maxA, + }, + map[string]interface{}{ "version": maxV, "timestamp": time.Unix(1514764799, 999999000).UnixNano(), "message": message7681, @@ -310,19 +305,19 @@ func getTestCasesForRFC5425() []testCase5425 { "facility_code": 23, "severity_code": 7, }, - Tags: map[string]string{ - "severity": "debug", - "facility": "local7", - "hostname": maxH, - "appname": maxA, - }, - Time: defaultTime, - }, + defaultTime, + ), }, - wantBestEffort: []testutil.Metric{ - testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "debug", + "facility": "local7", + "hostname": maxH, + "appname": maxA, + }, + map[string]interface{}{ "version": maxV, "timestamp": time.Unix(1514764799, 999999000).UnixNano(), "message": message7681, @@ -331,14 +326,8 @@ func getTestCasesForRFC5425() []testCase5425 { "facility_code": 23, "severity_code": 7, }, - Tags: map[string]string{ - "severity": "debug", - "facility": "local7", - "hostname": maxH, - "appname": maxA, - }, - Time: defaultTime, - }, + defaultTime, + ), }, }, } @@ -346,34 +335,11 @@ func getTestCasesForRFC5425() []testCase5425 { return testCases } -func newTCPSyslogReceiver(address string, keepAlive *internal.Duration, maxConn int, bestEffort bool) *Syslog { - d := &internal.Duration{ - Duration: defaultReadTimeout, - } - s := &Syslog{ - Address: address, - now: func() time.Time { - return defaultTime - }, - ReadTimeout: d, - BestEffort: bestEffort, - Separator: "_", - } - if keepAlive != nil { - s.KeepAlivePeriod = keepAlive - } - if maxConn > 0 { - s.MaxConnections = maxConn - } - - return s -} - -func testStrictRFC5425(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { - for _, tc := range getTestCasesForRFC5425() { +func testStrictOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { + for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a strict mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, false, framing.OctetCounting) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -403,13 +369,16 @@ func testStrictRFC5425(t *testing.T, protocol string, address string, wantTLS bo acc.Errors = make([]error, 0) // Write - conn.Write(tc.data) + _, err = conn.Write(tc.data) + conn.Close() + require.NoError(t, err) // Wait that the the number of data points is accumulated // Since the receiver is running concurrently if tc.wantStrict != nil { acc.Wait(len(tc.wantStrict)) } + // Wait the parsing error acc.WaitError(tc.werr) @@ -417,22 +386,16 @@ func testStrictRFC5425(t *testing.T, protocol string, address string, wantTLS bo if len(acc.Errors) != tc.werr { t.Fatalf("Got unexpected errors. want error = %v, errors = %v\n", tc.werr, acc.Errors) } - var got []testutil.Metric - for _, metric := range acc.Metrics { - got = append(got, *metric) - } - if !cmp.Equal(tc.wantStrict, got) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantStrict, got)) - } + testutil.RequireMetricsEqual(t, tc.wantStrict, acc.GetTelegrafMetrics()) }) } } -func testBestEffortRFC5425(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { - for _, tc := range getTestCasesForRFC5425() { +func testBestEffortOctetCounting(t *testing.T, protocol string, address string, wantTLS bool, keepAlive *internal.Duration) { + for _, tc := range getTestCasesForOctetCounting() { t.Run(tc.name, func(t *testing.T) { // Creation of a best effort mode receiver - receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true) + receiver := newTCPSyslogReceiver(protocol+"://"+address, keepAlive, 0, true, framing.OctetCounting) require.NotNil(t, receiver) if wantTLS { receiver.ServerConfig = *pki.TLSServerConfig() @@ -452,7 +415,6 @@ func testBestEffortRFC5425(t *testing.T, protocol string, address string, wantTL conn, err = tls.Dial(protocol, address, config) } else { conn, err = net.Dial(protocol, address) - defer conn.Close() } require.NotNil(t, conn) require.NoError(t, err) @@ -462,7 +424,9 @@ func testBestEffortRFC5425(t *testing.T, protocol string, address string, wantTL acc.Errors = make([]error, 0) // Write - conn.Write(tc.data) + _, err = conn.Write(tc.data) + require.NoError(t, err) + conn.Close() // Wait that the the number of data points is accumulated // Since the receiver is running concurrently @@ -470,70 +434,63 @@ func testBestEffortRFC5425(t *testing.T, protocol string, address string, wantTL acc.Wait(len(tc.wantBestEffort)) } - // Verify - var got []testutil.Metric - for _, metric := range acc.Metrics { - got = append(got, *metric) - } - if !cmp.Equal(tc.wantBestEffort, got) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(tc.wantBestEffort, got)) - } + testutil.RequireMetricsEqual(t, tc.wantBestEffort, acc.GetTelegrafMetrics()) }) } } -func TestStrict_tcp(t *testing.T) { - testStrictRFC5425(t, "tcp", address, false, nil) +func TestOctetCountingStrict_tcp(t *testing.T) { + testStrictOctetCounting(t, "tcp", address, false, nil) } -func TestBestEffort_tcp(t *testing.T) { - testBestEffortRFC5425(t, "tcp", address, false, nil) +func TestOctetCountingBestEffort_tcp(t *testing.T) { + testBestEffortOctetCounting(t, "tcp", address, false, nil) } -func TestStrict_tcp_tls(t *testing.T) { - testStrictRFC5425(t, "tcp", address, true, nil) +func TestOctetCountingStrict_tcp_tls(t *testing.T) { + testStrictOctetCounting(t, "tcp", address, true, nil) } -func TestBestEffort_tcp_tls(t *testing.T) { - testBestEffortRFC5425(t, "tcp", address, true, nil) +func TestOctetCountingBestEffort_tcp_tls(t *testing.T) { + testBestEffortOctetCounting(t, "tcp", address, true, nil) } -func TestStrictWithKeepAlive_tcp_tls(t *testing.T) { - testStrictRFC5425(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) +func TestOctetCountingStrictWithKeepAlive_tcp_tls(t *testing.T) { + testStrictOctetCounting(t, "tcp", address, true, &internal.Duration{Duration: time.Minute}) } -func TestStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { - testStrictRFC5425(t, "tcp", address, true, &internal.Duration{Duration: 0}) +func TestOctetCountingStrictWithZeroKeepAlive_tcp_tls(t *testing.T) { + testStrictOctetCounting(t, "tcp", address, true, &internal.Duration{Duration: 0}) } -func TestStrict_unix(t *testing.T) { +func TestOctetCountingStrict_unix(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix.sock") - testStrictRFC5425(t, "unix", sock, false, nil) + testStrictOctetCounting(t, "unix", sock, false, nil) } -func TestBestEffort_unix(t *testing.T) { +func TestOctetCountingBestEffort_unix(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix.sock") - testBestEffortRFC5425(t, "unix", sock, false, nil) + testBestEffortOctetCounting(t, "unix", sock, false, nil) } -func TestStrict_unix_tls(t *testing.T) { +func TestOctetCountingStrict_unix_tls(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestStrict_unix_tls.sock") - testStrictRFC5425(t, "unix", sock, true, nil) + testStrictOctetCounting(t, "unix", sock, true, nil) } -func TestBestEffort_unix_tls(t *testing.T) { +func TestOctetCountingBestEffort_unix_tls(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) sock := filepath.Join(tmpdir, "syslog.TestBestEffort_unix_tls.sock") - testBestEffortRFC5425(t, "unix", sock, true, nil) + testBestEffortOctetCounting(t, "unix", sock, true, nil) } diff --git a/plugins/inputs/syslog/rfc5426_test.go b/plugins/inputs/syslog/rfc5426_test.go index cae465189..5e65c1d39 100644 --- a/plugins/inputs/syslog/rfc5426_test.go +++ b/plugins/inputs/syslog/rfc5426_test.go @@ -10,96 +10,89 @@ import ( "testing" "time" - "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -type testCase5426 struct { - name string - data []byte - wantBestEffort *testutil.Metric - wantStrict *testutil.Metric - werr bool -} - -func getTestCasesForRFC5426() []testCase5426 { - testCases := []testCase5426{ - { - name: "empty", - data: []byte(""), - werr: true, - }, +func getTestCasesForRFC5426() []testCasePacket { + testCases := []testCasePacket{ { name: "complete", data: []byte("<1>1 - - - - - - A"), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(1), "message": "A", "facility_code": 0, "severity_code": 1, }, - Tags: map[string]string{ + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ "severity": "alert", "facility": "kern", }, - Time: defaultTime, - }, - wantStrict: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(1), "message": "A", "facility_code": 0, "severity_code": 1, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, { name: "one/per/packet", data: []byte("<1>3 - - - - - - A<1>4 - - - - - - B"), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(3), "message": "A<1>4 - - - - - - B", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ "severity": "alert", "facility": "kern", }, - Time: defaultTime, - }, - wantStrict: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(3), "message": "A<1>4 - - - - - - B", "severity_code": 1, "facility_code": 0, }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: defaultTime, - }, + defaultTime, + ), }, { name: "average", data: []byte(`<29>1 2016-02-21T04:32:57+00:00 web1 someservice 2341 2 [origin][meta sequence="14125553" service="someservice"] "GET /v1/ok HTTP/1.1" 200 145 "-" "hacheck 0.9.0" 24306 127.0.0.1:40124 575`), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "notice", + "facility": "daemon", + "hostname": "web1", + "appname": "someservice", + }, + map[string]interface{}{ "version": uint16(1), "timestamp": time.Unix(1456029177, 0).UnixNano(), "procid": "2341", @@ -111,17 +104,17 @@ func getTestCasesForRFC5426() []testCase5426 { "severity_code": 5, "facility_code": 3, }, - Tags: map[string]string{ + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ "severity": "notice", "facility": "daemon", "hostname": "web1", "appname": "someservice", }, - Time: defaultTime, - }, - wantStrict: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": uint16(1), "timestamp": time.Unix(1456029177, 0).UnixNano(), "procid": "2341", @@ -133,21 +126,21 @@ func getTestCasesForRFC5426() []testCase5426 { "severity_code": 5, "facility_code": 3, }, - Tags: map[string]string{ - "severity": "notice", - "facility": "daemon", - "hostname": "web1", - "appname": "someservice", - }, - Time: defaultTime, - }, + defaultTime, + ), }, { name: "max", data: []byte(fmt.Sprintf("<%d>%d %s %s %s %s %s - %s", maxP, maxV, maxTS, maxH, maxA, maxPID, maxMID, message7681)), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "debug", + "facility": "local7", + "hostname": maxH, + "appname": maxA, + }, + map[string]interface{}{ "version": maxV, "timestamp": time.Unix(1514764799, 999999000).UnixNano(), "message": message7681, @@ -156,17 +149,17 @@ func getTestCasesForRFC5426() []testCase5426 { "severity_code": 7, "facility_code": 23, }, - Tags: map[string]string{ + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ "severity": "debug", "facility": "local7", "hostname": maxH, "appname": maxA, }, - Time: defaultTime, - }, - wantStrict: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + map[string]interface{}{ "version": maxV, "timestamp": time.Unix(1514764799, 999999000).UnixNano(), "message": message7681, @@ -175,49 +168,64 @@ func getTestCasesForRFC5426() []testCase5426 { "severity_code": 7, "facility_code": 23, }, - Tags: map[string]string{ - "severity": "debug", - "facility": "local7", - "hostname": maxH, - "appname": maxA, - }, - Time: defaultTime, - }, + defaultTime, + ), }, { name: "minimal/incomplete", data: []byte("<1>2"), - wantBestEffort: &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ "version": uint16(2), "facility_code": 0, "severity_code": 1, }, - Tags: map[string]string{ + defaultTime, + ), + werr: true, + }, + { + name: "trim message", + data: []byte("<1>1 - - - - - - \tA\n"), + wantBestEffort: testutil.MustMetric( + "syslog", + map[string]string{ "severity": "alert", "facility": "kern", }, - Time: defaultTime, - }, - werr: true, + map[string]interface{}{ + "version": uint16(1), + "message": "\tA", + "facility_code": 0, + "severity_code": 1, + }, + defaultTime, + ), + wantStrict: testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(1), + "message": "\tA", + "facility_code": 0, + "severity_code": 1, + }, + defaultTime, + ), }, } return testCases } -func newUDPSyslogReceiver(address string, bestEffort bool) *Syslog { - return &Syslog{ - Address: address, - now: func() time.Time { - return defaultTime - }, - BestEffort: bestEffort, - Separator: "_", - } -} - func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool) { for _, tc := range getTestCasesForRFC5426() { t.Run(tc.name, func(t *testing.T) { @@ -234,12 +242,18 @@ func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool) // Connect conn, err := net.Dial(protocol, address) require.NotNil(t, conn) - defer conn.Close() require.Nil(t, err) // Write - _, e := conn.Write(tc.data) - require.Nil(t, e) + _, err = conn.Write(tc.data) + conn.Close() + if err != nil { + if err, ok := err.(*net.OpError); ok { + if err.Err.Error() == "write: message too long" { + return + } + } + } // Waiting ... if tc.wantStrict == nil && tc.werr || bestEffort && tc.werr { @@ -250,19 +264,17 @@ func testRFC5426(t *testing.T, protocol string, address string, bestEffort bool) } // Compare - var got *testutil.Metric - var want *testutil.Metric + var got telegraf.Metric + var want telegraf.Metric if len(acc.Metrics) > 0 { - got = acc.Metrics[0] + got = acc.GetTelegrafMetrics()[0] } if bestEffort { want = tc.wantBestEffort } else { want = tc.wantStrict } - if !cmp.Equal(want, got) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, got)) - } + testutil.RequireMetricEqual(t, want, got) }) } } @@ -327,23 +339,22 @@ func TestTimeIncrement_udp(t *testing.T) { // Wait acc.Wait(1) - want := &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ - "version": uint16(1), - "facility_code": 0, - "severity_code": 1, - }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: getNow(), - } - - if !cmp.Equal(want, acc.Metrics[0]) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, acc.Metrics[0])) + want := []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(1), + "facility_code": 0, + "severity_code": 1, + }, + getNow(), + ), } + testutil.RequireMetricsEqual(t, want, acc.GetTelegrafMetrics()) // New one with different time atomic.StoreInt64(&i, atomic.LoadInt64(&i)+1) @@ -358,23 +369,22 @@ func TestTimeIncrement_udp(t *testing.T) { // Wait acc.Wait(1) - want = &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ - "version": uint16(1), - "facility_code": 0, - "severity_code": 1, - }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: getNow(), - } - - if !cmp.Equal(want, acc.Metrics[0]) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, acc.Metrics[0])) + want = []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(1), + "facility_code": 0, + "severity_code": 1, + }, + getNow(), + ), } + testutil.RequireMetricsEqual(t, want, acc.GetTelegrafMetrics()) // New one with same time as previous one @@ -388,21 +398,20 @@ func TestTimeIncrement_udp(t *testing.T) { // Wait acc.Wait(1) - want = &testutil.Metric{ - Measurement: "syslog", - Fields: map[string]interface{}{ - "version": uint16(1), - "facility_code": 0, - "severity_code": 1, - }, - Tags: map[string]string{ - "severity": "alert", - "facility": "kern", - }, - Time: getNow().Add(time.Nanosecond), - } - - if !cmp.Equal(want, acc.Metrics[0]) { - t.Fatalf("Got (+) / Want (-)\n %s", cmp.Diff(want, acc.Metrics[0])) + want = []telegraf.Metric{ + testutil.MustMetric( + "syslog", + map[string]string{ + "severity": "alert", + "facility": "kern", + }, + map[string]interface{}{ + "version": uint16(1), + "facility_code": 0, + "severity_code": 1, + }, + getNow().Add(time.Nanosecond), + ), } + testutil.RequireMetricsEqual(t, want, acc.GetTelegrafMetrics()) } diff --git a/plugins/inputs/syslog/syslog.go b/plugins/inputs/syslog/syslog.go index 21f6a770f..ecf190e47 100644 --- a/plugins/inputs/syslog/syslog.go +++ b/plugins/inputs/syslog/syslog.go @@ -10,16 +10,20 @@ import ( "strings" "sync" "time" + "unicode" - "github.com/influxdata/go-syslog/rfc5424" - "github.com/influxdata/go-syslog/rfc5425" + "github.com/influxdata/go-syslog/v2" + "github.com/influxdata/go-syslog/v2/nontransparent" + "github.com/influxdata/go-syslog/v2/octetcounting" + "github.com/influxdata/go-syslog/v2/rfc5424" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + framing "github.com/influxdata/telegraf/internal/syslog" tlsConfig "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/inputs" ) -const defaultReadTimeout = time.Millisecond * 500 +const defaultReadTimeout = time.Second * 5 const ipMaxPacketSize = 64 * 1024 // Syslog is a syslog plugin @@ -27,8 +31,10 @@ type Syslog struct { tlsConfig.ServerConfig Address string `toml:"server"` KeepAlivePeriod *internal.Duration - ReadTimeout *internal.Duration MaxConnections int + ReadTimeout *internal.Duration + Framing framing.Framing + Trailer nontransparent.TrailerType BestEffort bool Separator string `toml:"sdparam_separator"` @@ -71,9 +77,19 @@ var sampleConfig = ` ## Only applies to stream sockets (e.g. TCP). # max_connections = 1024 - ## Read timeout (default = 500ms). + ## Read timeout is the maximum time allowed for reading a single message (default = 5s). ## 0 means unlimited. - # read_timeout = 500ms + # read_timeout = "5s" + + ## The framing technique with which it is expected that messages are transported (default = "octet-counting"). + ## Whether the messages come using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). + ## Must be one of "octet-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-transparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" ## Whether to parse in best effort mode or not (default = false). ## By default best effort parsing is off. @@ -94,7 +110,7 @@ func (s *Syslog) SampleConfig() string { // Description returns the plugin description func (s *Syslog) Description() string { - return "Accepts syslog messages per RFC5425" + return "Accepts syslog messages following RFC5424 format with transports as per RFC5426, RFC5425, or RFC6587" } // Gather ... @@ -202,7 +218,12 @@ func getAddressParts(a string) (string, string, error) { func (s *Syslog) listenPacket(acc telegraf.Accumulator) { defer s.wg.Done() b := make([]byte, ipMaxPacketSize) - p := rfc5424.NewParser() + var p syslog.Machine + if s.BestEffort { + p = rfc5424.NewParser(rfc5424.WithBestEffort()) + } else { + p = rfc5424.NewParser() + } for { n, _, err := s.udpListener.ReadFrom(b) if err != nil { @@ -212,13 +233,9 @@ func (s *Syslog) listenPacket(acc telegraf.Accumulator) { break } - if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { - s.udpListener.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) - } - - message, err := p.Parse(b[:n], &s.BestEffort) + message, err := p.Parse(b[:n]) if message != nil { - acc.AddFields("syslog", fields(*message, s), tags(*message), s.time()) + acc.AddFields("syslog", fields(message, s), tags(message), s.time()) } if err != nil { acc.AddError(err) @@ -279,20 +296,38 @@ func (s *Syslog) handle(conn net.Conn, acc telegraf.Accumulator) { conn.Close() }() + var p syslog.Parser + + emit := func(r *syslog.Result) { + s.store(*r, acc) + if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { + conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) + } + } + + // Create parser options + opts := []syslog.ParserOption{ + syslog.WithListener(emit), + } + if s.BestEffort { + opts = append(opts, syslog.WithBestEffort()) + } + + // Select the parser to use depending on transport framing + if s.Framing == framing.OctetCounting { + // Octet counting transparent framing + p = octetcounting.NewParser(opts...) + } else { + // Non-transparent framing + opts = append(opts, nontransparent.WithTrailer(s.Trailer)) + p = nontransparent.NewParser(opts...) + } + + p.Parse(conn) + if s.ReadTimeout != nil && s.ReadTimeout.Duration > 0 { conn.SetReadDeadline(time.Now().Add(s.ReadTimeout.Duration)) } - - var p *rfc5425.Parser - if s.BestEffort { - p = rfc5425.NewParser(conn, rfc5425.WithBestEffort()) - } else { - p = rfc5425.NewParser(conn) - } - - p.ParseExecuting(func(r *rfc5425.Result) { - s.store(*r, acc) - }) } func (s *Syslog) setKeepAlive(c *net.TCPConn) error { @@ -309,20 +344,16 @@ func (s *Syslog) setKeepAlive(c *net.TCPConn) error { return c.SetKeepAlivePeriod(s.KeepAlivePeriod.Duration) } -func (s *Syslog) store(res rfc5425.Result, acc telegraf.Accumulator) { +func (s *Syslog) store(res syslog.Result, acc telegraf.Accumulator) { if res.Error != nil { acc.AddError(res.Error) } - if res.MessageError != nil { - acc.AddError(res.MessageError) - } if res.Message != nil { - msg := *res.Message - acc.AddFields("syslog", fields(msg, s), tags(msg), s.time()) + acc.AddFields("syslog", fields(res.Message, s), tags(res.Message), s.time()) } } -func tags(msg rfc5424.SyslogMessage) map[string]string { +func tags(msg syslog.Message) map[string]string { ts := map[string]string{} // Not checking assuming a minimally valid message @@ -340,7 +371,7 @@ func tags(msg rfc5424.SyslogMessage) map[string]string { return ts } -func fields(msg rfc5424.SyslogMessage, s *Syslog) map[string]interface{} { +func fields(msg syslog.Message, s *Syslog) map[string]interface{} { // Not checking assuming a minimally valid message flds := map[string]interface{}{ "version": msg.Version(), @@ -361,7 +392,9 @@ func fields(msg rfc5424.SyslogMessage, s *Syslog) map[string]interface{} { } if msg.Message() != nil { - flds["message"] = *msg.Message() + flds["message"] = strings.TrimRightFunc(*msg.Message(), func(r rune) bool { + return unicode.IsSpace(r) + }) } if msg.StructuredData() != nil { @@ -406,14 +439,16 @@ func getNanoNow() time.Time { } func init() { - receiver := &Syslog{ - Address: ":6514", - now: getNanoNow, - ReadTimeout: &internal.Duration{ - Duration: defaultReadTimeout, - }, - Separator: "_", - } - - inputs.Add("syslog", func() telegraf.Input { return receiver }) + inputs.Add("syslog", func() telegraf.Input { + return &Syslog{ + Address: ":6514", + now: getNanoNow, + ReadTimeout: &internal.Duration{ + Duration: defaultReadTimeout, + }, + Framing: framing.OctetCounting, + Trailer: nontransparent.LF, + Separator: "_", + } + }) } diff --git a/plugins/inputs/sysstat/README.md b/plugins/inputs/sysstat/README.md index d8e0e95d8..9775c1a30 100644 --- a/plugins/inputs/sysstat/README.md +++ b/plugins/inputs/sysstat/README.md @@ -16,18 +16,15 @@ the created binary data file with the `sadf` utility. ## On Debian and Arch Linux the default path is /usr/lib/sa/sadc whereas ## on RHEL and CentOS the default path is /usr/lib64/sa/sadc sadc_path = "/usr/lib/sa/sadc" # required - # - # + ## Path to the sadf command, if it is not in PATH # sadf_path = "/usr/bin/sadf" - # - # + ## Activities is a list of activities, that are passed as argument to the ## sadc collector utility (e.g: DISK, SNMP etc...) ## The more activities that are added, the more data is collected. # activities = ["DISK"] - # - # + ## Group metrics to measurements. ## ## If group is false each metric will be prefixed with a description @@ -35,8 +32,7 @@ the created binary data file with the `sadf` utility. ## ## If Group is true, corresponding metrics are grouped to a single measurement. # group = true - # - # + ## Options for the sadf command. The values on the left represent the sadf options and ## the values on the right their description (wich are used for grouping and prefixing metrics). ## @@ -58,8 +54,7 @@ the created binary data file with the `sadf` utility. -w = "task" # -H = "hugepages" # only available for newer linux distributions # "-I ALL" = "interrupts" # requires INT activity - # - # + ## Device tags can be used to add additional tags for devices. For example the configuration below ## adds a tag vg with value rootvg for all metrics with sda devices. # [[inputs.sysstat.device_tags.sda]] diff --git a/plugins/inputs/sysstat/sysstat.go b/plugins/inputs/sysstat/sysstat.go index 42ce89550..9f530024b 100644 --- a/plugins/inputs/sysstat/sysstat.go +++ b/plugins/inputs/sysstat/sysstat.go @@ -7,7 +7,6 @@ import ( "encoding/csv" "fmt" "io" - "log" "os" "os/exec" "path" @@ -67,6 +66,8 @@ type Sysstat struct { DeviceTags map[string][]map[string]string `toml:"device_tags"` tmpFile string interval int + + Log telegraf.Logger } func (*Sysstat) Description() string { @@ -81,18 +82,15 @@ var sampleConfig = ` ## Arch: /usr/lib/sa/sadc ## RHEL/CentOS: /usr/lib64/sa/sadc sadc_path = "/usr/lib/sa/sadc" # required - # - # + ## Path to the sadf command, if it is not in PATH # sadf_path = "/usr/bin/sadf" - # - # + ## Activities is a list of activities, that are passed as argument to the ## sadc collector utility (e.g: DISK, SNMP etc...) ## The more activities that are added, the more data is collected. # activities = ["DISK"] - # - # + ## Group metrics to measurements. ## ## If group is false each metric will be prefixed with a description @@ -100,8 +98,7 @@ var sampleConfig = ` ## ## If Group is true, corresponding metrics are grouped to a single measurement. # group = true - # - # + ## Options for the sadf command. The values on the left represent the sadf ## options and the values on the right their description (which are used for ## grouping and prefixing metrics). @@ -125,8 +122,7 @@ var sampleConfig = ` -w = "task" # -H = "hugepages" # only available for newer linux distributions # "-I ALL" = "interrupts" # requires INT activity - # - # + ## Device tags can be used to add additional tags for devices. ## For example the configuration below adds a tag vg with value rootvg for ## all metrics with sda devices. @@ -196,7 +192,7 @@ func (s *Sysstat) collect() error { out, err := internal.CombinedOutputTimeout(cmd, time.Second*time.Duration(collectInterval+parseInterval)) if err != nil { if err := os.Remove(s.tmpFile); err != nil { - log.Printf("E! failed to remove tmp file after %s command: %s", strings.Join(cmd.Args, " "), err) + s.Log.Errorf("Failed to remove tmp file after %q command: %s", strings.Join(cmd.Args, " "), err.Error()) } return fmt.Errorf("failed to run command %s: %s - %s", strings.Join(cmd.Args, " "), err, string(out)) } @@ -335,6 +331,7 @@ func (s *Sysstat) sadfOptions(activityOption string) []string { // escape removes % and / chars in field names func escape(dirty string) string { var fieldEscaper = strings.NewReplacer( + `%%`, "pct_", `%`, "pct_", `/`, "_per_", ) diff --git a/plugins/inputs/sysstat/sysstat_test.go b/plugins/inputs/sysstat/sysstat_test.go index 876e6d2c8..4aecfaacc 100644 --- a/plugins/inputs/sysstat/sysstat_test.go +++ b/plugins/inputs/sysstat/sysstat_test.go @@ -13,6 +13,7 @@ import ( ) var s = Sysstat{ + Log: testutil.Logger{}, interval: 10, Sadc: "/usr/lib/sa/sadc", Sadf: "/usr/bin/sadf", @@ -225,6 +226,10 @@ func TestEscape(t *testing.T) { "%util", "pct_util", }, + { + "%%util", + "pct_util", + }, { "bread/s", "bread_per_s", diff --git a/plugins/inputs/system/CPU_README.md b/plugins/inputs/system/CPU_README.md deleted file mode 100644 index dfb8561a2..000000000 --- a/plugins/inputs/system/CPU_README.md +++ /dev/null @@ -1,99 +0,0 @@ -# Telegraf plugin: CPU - -#### Plugin arguments: -- **totalcpu** boolean: If true, include `cpu-total` data -- **percpu** boolean: If true, include data on a per-cpu basis `cpu0, cpu1, etc.` - - -##### Configuration: -``` -[[inputs.cpu]] - ## Whether to report per-cpu stats or not - percpu = true - ## Whether to report total system cpu stats or not - totalcpu = true - ## If true, collect raw CPU time metrics. - collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. - report_active = false -``` - -#### Description - -The CPU plugin collects standard CPU metrics as defined in `man proc`. All -architectures do not support all of these metrics. - -``` -cpu 3357 0 4313 1362393 - The amount of time, measured in units of USER_HZ (1/100ths of a second on - most architectures, use sysconf(_SC_CLK_TCK) to obtain the right value), - that the system spent in various states: - - user (1) Time spent in user mode. - - nice (2) Time spent in user mode with low priority (nice). - - system (3) Time spent in system mode. - - idle (4) Time spent in the idle task. This value should be USER_HZ times - the second entry in the /proc/uptime pseudo-file. - - iowait (since Linux 2.5.41) - (5) Time waiting for I/O to complete. - - irq (since Linux 2.6.0-test4) - (6) Time servicing interrupts. - - softirq (since Linux 2.6.0-test4) - (7) Time servicing softirqs. - - steal (since Linux 2.6.11) - (8) Stolen time, which is the time spent in other operating systems - when running in a virtualized environment - - guest (since Linux 2.6.24) - (9) Time spent running a virtual CPU for guest operating systems - under the control of the Linux kernel. - - guest_nice (since Linux 2.6.33) - (10) Time spent running a niced guest (virtual CPU for guest operating systems under the control of the Linux kernel). -``` - -# Measurements: -### CPU Time measurements: - -Meta: -- units: CPU Time -- tags: `cpu= or ` - -Measurement names: -- cpu_time_user -- cpu_time_system -- cpu_time_idle -- cpu_time_active (must be explicitly enabled by setting `report_active = true`) -- cpu_time_nice -- cpu_time_iowait -- cpu_time_irq -- cpu_time_softirq -- cpu_time_steal -- cpu_time_guest -- cpu_time_guest_nice - -### CPU Usage Percent Measurements: - -Meta: -- units: percent (out of 100) -- tags: `cpu= or ` - -Measurement names: -- cpu_usage_user -- cpu_usage_system -- cpu_usage_idle -- cpu_usage_active (must be explicitly enabled by setting `report_active = true`) -- cpu_usage_nice -- cpu_usage_iowait -- cpu_usage_irq -- cpu_usage_softirq -- cpu_usage_steal -- cpu_usage_guest -- cpu_usage_guest_nice diff --git a/plugins/inputs/system/MEM_README.md b/plugins/inputs/system/MEM_README.md deleted file mode 100644 index 8a9ff823c..000000000 --- a/plugins/inputs/system/MEM_README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Mem Input Plugin - -The mem plugin collects system memory metrics. - -For a more complete explanation of the difference between *used* and -*actual_used* RAM, see [Linux ate my ram](http://www.linuxatemyram.com/). - -### Configuration: -```toml -# Read metrics about memory usage -[[inputs.mem]] - # no configuration -``` - -### Metrics: - -- mem - - fields: - - active (int) - - available (int) - - buffered (int) - - cached (int) - - free (int) - - inactive (int) - - slab (int) - - total (int) - - used (int) - - available_percent (float) - - used_percent (float) - - wired (int) - -### Example Output: -``` -mem cached=7809495040i,inactive=6348988416i,total=20855394304i,available=11378946048i,buffered=927199232i,active=11292905472i,slab=1351340032i,used_percent=45.43883523785713,available_percent=54.56116476214287,used=9476448256i,free=1715331072i 1511894782000000000 -``` diff --git a/plugins/inputs/system/SYSTEM_README.md b/plugins/inputs/system/README.md similarity index 82% rename from plugins/inputs/system/SYSTEM_README.md rename to plugins/inputs/system/README.md index bea9bd2d9..8b16c1de0 100644 --- a/plugins/inputs/system/SYSTEM_README.md +++ b/plugins/inputs/system/README.md @@ -3,6 +3,8 @@ The system plugin gathers general stats on system load, uptime, and number of users logged in. It is similar to the unix `uptime` command. +Number of CPUs is obtained from the /proc/cpuinfo file. + ### Configuration: ```toml @@ -13,7 +15,7 @@ and number of users logged in. It is similar to the unix `uptime` command. #### Permissions: The `n_users` field requires read access to `/var/run/utmp`, and may require -the `telegraf` user to be added to the `utmp` group on some systems. +the `telegraf` user to be added to the `utmp` group on some systems. If this file does not exist `n_users` will be skipped. ### Metrics: @@ -25,7 +27,7 @@ the `telegraf` user to be added to the `utmp` group on some systems. - n_users (integer) - n_cpus (integer) - uptime (integer, seconds) - - uptime_format (string) + - uptime_format (string, deprecated in 1.10, use `uptime` field) ### Example Output: diff --git a/plugins/inputs/system/memory_test.go b/plugins/inputs/system/memory_test.go deleted file mode 100644 index 34914db9c..000000000 --- a/plugins/inputs/system/memory_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package system - -import ( - "testing" - - "github.com/influxdata/telegraf/testutil" - "github.com/shirou/gopsutil/mem" - "github.com/stretchr/testify/require" -) - -func TestMemStats(t *testing.T) { - var mps MockPS - var err error - defer mps.AssertExpectations(t) - var acc testutil.Accumulator - - vms := &mem.VirtualMemoryStat{ - Total: 12400, - Available: 7600, - Used: 5000, - Free: 1235, - Active: 8134, - Inactive: 1124, - Slab: 1234, - Wired: 134, - // Buffers: 771, - // Cached: 4312, - // Shared: 2142, - } - - mps.On("VMStat").Return(vms, nil) - - err = (&MemStats{&mps}).Gather(&acc) - require.NoError(t, err) - - memfields := map[string]interface{}{ - "total": uint64(12400), - "available": uint64(7600), - "used": uint64(5000), - "available_percent": float64(7600) / float64(12400) * 100, - "used_percent": float64(5000) / float64(12400) * 100, - "free": uint64(1235), - "cached": uint64(0), - "buffered": uint64(0), - "active": uint64(8134), - "inactive": uint64(1124), - "wired": uint64(134), - "slab": uint64(1234), - } - acc.AssertContainsTaggedFields(t, "mem", memfields, make(map[string]string)) - - acc.Metrics = nil -} diff --git a/plugins/inputs/system/mock_PS.go b/plugins/inputs/system/mock_PS.go index d5093f031..b3cf2c170 100644 --- a/plugins/inputs/system/mock_PS.go +++ b/plugins/inputs/system/mock_PS.go @@ -7,6 +7,7 @@ import ( "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/load" "github.com/shirou/gopsutil/mem" @@ -19,11 +20,11 @@ type MockPS struct { } type MockPSDisk struct { - *systemPS + *SystemPS *mock.Mock } -type mockDiskUsage struct { +type MockDiskUsage struct { *mock.Mock } @@ -100,6 +101,15 @@ func (m *MockPS) SwapStat() (*mem.SwapMemoryStat, error) { return r0, r1 } +func (m *MockPS) Temperature() ([]host.TemperatureStat, error) { + ret := m.Called() + + r0 := ret.Get(0).([]host.TemperatureStat) + r1 := ret.Error(1) + + return r0, r1 +} + func (m *MockPS) NetConnections() ([]net.ConnectionStat, error) { ret := m.Called() @@ -109,7 +119,7 @@ func (m *MockPS) NetConnections() ([]net.ConnectionStat, error) { return r0, r1 } -func (m *mockDiskUsage) Partitions(all bool) ([]disk.PartitionStat, error) { +func (m *MockDiskUsage) Partitions(all bool) ([]disk.PartitionStat, error) { ret := m.Called(all) r0 := ret.Get(0).([]disk.PartitionStat) @@ -118,12 +128,12 @@ func (m *mockDiskUsage) Partitions(all bool) ([]disk.PartitionStat, error) { return r0, r1 } -func (m *mockDiskUsage) OSGetenv(key string) string { +func (m *MockDiskUsage) OSGetenv(key string) string { ret := m.Called(key) return ret.Get(0).(string) } -func (m *mockDiskUsage) OSStat(name string) (os.FileInfo, error) { +func (m *MockDiskUsage) OSStat(name string) (os.FileInfo, error) { ret := m.Called(name) r0 := ret.Get(0).(os.FileInfo) @@ -132,7 +142,7 @@ func (m *mockDiskUsage) OSStat(name string) (os.FileInfo, error) { return r0, r1 } -func (m *mockDiskUsage) PSDiskUsage(path string) (*disk.UsageStat, error) { +func (m *MockDiskUsage) PSDiskUsage(path string) (*disk.UsageStat, error) { ret := m.Called(path) r0 := ret.Get(0).(*disk.UsageStat) diff --git a/plugins/inputs/system/ps.go b/plugins/inputs/system/ps.go index 81161ae68..824dbe446 100644 --- a/plugins/inputs/system/ps.go +++ b/plugins/inputs/system/ps.go @@ -10,6 +10,7 @@ import ( "github.com/shirou/gopsutil/cpu" "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/host" "github.com/shirou/gopsutil/mem" "github.com/shirou/gopsutil/net" ) @@ -23,6 +24,7 @@ type PS interface { VMStat() (*mem.VirtualMemoryStat, error) SwapStat() (*mem.SwapMemoryStat, error) NetConnections() ([]net.ConnectionStat, error) + Temperature() ([]host.TemperatureStat, error) } type PSDiskDeps interface { @@ -39,17 +41,17 @@ func add(acc telegraf.Accumulator, } } -func newSystemPS() *systemPS { - return &systemPS{&systemPSDisk{}} +func NewSystemPS() *SystemPS { + return &SystemPS{&SystemPSDisk{}} } -type systemPS struct { +type SystemPS struct { PSDiskDeps } -type systemPSDisk struct{} +type SystemPSDisk struct{} -func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) { +func (s *SystemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) { var cpuTimes []cpu.TimesStat if perCPU { if perCPUTimes, err := cpu.Times(true); err == nil { @@ -68,7 +70,7 @@ func (s *systemPS) CPUTimes(perCPU, totalCPU bool) ([]cpu.TimesStat, error) { return cpuTimes, nil } -func (s *systemPS) DiskUsage( +func (s *SystemPS) DiskUsage( mountPointFilter []string, fstypeExclude []string, ) ([]*disk.UsageStat, []*disk.PartitionStat, error) { @@ -139,19 +141,19 @@ func (s *systemPS) DiskUsage( return usage, partitions, nil } -func (s *systemPS) NetProto() ([]net.ProtoCountersStat, error) { +func (s *SystemPS) NetProto() ([]net.ProtoCountersStat, error) { return net.ProtoCounters(nil) } -func (s *systemPS) NetIO() ([]net.IOCountersStat, error) { +func (s *SystemPS) NetIO() ([]net.IOCountersStat, error) { return net.IOCounters(true) } -func (s *systemPS) NetConnections() ([]net.ConnectionStat, error) { +func (s *SystemPS) NetConnections() ([]net.ConnectionStat, error) { return net.Connections("all") } -func (s *systemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { +func (s *SystemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error) { m, err := disk.IOCounters(names...) if err == internal.NotImplementedError { return nil, nil @@ -160,26 +162,37 @@ func (s *systemPS) DiskIO(names []string) (map[string]disk.IOCountersStat, error return m, err } -func (s *systemPS) VMStat() (*mem.VirtualMemoryStat, error) { +func (s *SystemPS) VMStat() (*mem.VirtualMemoryStat, error) { return mem.VirtualMemory() } -func (s *systemPS) SwapStat() (*mem.SwapMemoryStat, error) { +func (s *SystemPS) SwapStat() (*mem.SwapMemoryStat, error) { return mem.SwapMemory() } -func (s *systemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) { +func (s *SystemPS) Temperature() ([]host.TemperatureStat, error) { + temp, err := host.SensorsTemperatures() + if err != nil { + _, ok := err.(*host.Warnings) + if !ok { + return temp, err + } + } + return temp, nil +} + +func (s *SystemPSDisk) Partitions(all bool) ([]disk.PartitionStat, error) { return disk.Partitions(all) } -func (s *systemPSDisk) OSGetenv(key string) string { +func (s *SystemPSDisk) OSGetenv(key string) string { return os.Getenv(key) } -func (s *systemPSDisk) OSStat(name string) (os.FileInfo, error) { +func (s *SystemPSDisk) OSStat(name string) (os.FileInfo, error) { return os.Stat(name) } -func (s *systemPSDisk) PSDiskUsage(path string) (*disk.UsageStat, error) { +func (s *SystemPSDisk) PSDiskUsage(path string) (*disk.UsageStat, error) { return disk.Usage(path) } diff --git a/plugins/inputs/system/system.go b/plugins/inputs/system/system.go index ad17c56ed..32747cca2 100644 --- a/plugins/inputs/system/system.go +++ b/plugins/inputs/system/system.go @@ -5,64 +5,77 @@ import ( "bytes" "fmt" "os" - "runtime" "strings" "time" - "github.com/shirou/gopsutil/host" - "github.com/shirou/gopsutil/load" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/load" ) -type SystemStats struct{} +type SystemStats struct { + Log telegraf.Logger +} -func (_ *SystemStats) Description() string { +func (*SystemStats) Description() string { return "Read metrics about system load & uptime" } -func (_ *SystemStats) SampleConfig() string { return "" } +func (*SystemStats) SampleConfig() string { + return ` + ## Uncomment to remove deprecated metrics. + # fielddrop = ["uptime_format"] +` +} -func (_ *SystemStats) Gather(acc telegraf.Accumulator) error { +func (s *SystemStats) Gather(acc telegraf.Accumulator) error { loadavg, err := load.Avg() if err != nil && !strings.Contains(err.Error(), "not implemented") { return err } + numCPUs, err := cpu.Counts(true) + if err != nil { + return err + } + fields := map[string]interface{}{ "load1": loadavg.Load1, "load5": loadavg.Load5, "load15": loadavg.Load15, - "n_cpus": runtime.NumCPU(), + "n_cpus": numCPUs, } users, err := host.Users() if err == nil { fields["n_users"] = len(users) - } else if !os.IsPermission(err) { - return err + } else if os.IsNotExist(err) { + s.Log.Debugf("Reading users: %s", err.Error()) + } else if os.IsPermission(err) { + s.Log.Debug(err.Error()) } now := time.Now() acc.AddGauge("system", fields, nil, now) - hostinfo, err := host.Info() + uptime, err := host.Uptime() if err != nil { return err } acc.AddCounter("system", map[string]interface{}{ - "uptime": hostinfo.Uptime, + "uptime": uptime, }, nil, now) acc.AddFields("system", map[string]interface{}{ - "uptime_format": format_uptime(hostinfo.Uptime), + "uptime_format": formatUptime(uptime), }, nil, now) return nil } -func format_uptime(uptime uint64) string { +func formatUptime(uptime uint64) string { buf := new(bytes.Buffer) w := bufio.NewWriter(buf) diff --git a/plugins/inputs/systemd_units/README.md b/plugins/inputs/systemd_units/README.md new file mode 100644 index 000000000..f6b8796f9 --- /dev/null +++ b/plugins/inputs/systemd_units/README.md @@ -0,0 +1,135 @@ +# Systemd Units Plugin + +The systemd_units plugin gathers systemd unit status on Linux. It relies on +`systemctl list-units --all --type=service` to collect data on service status. + +The results are tagged with the unit name and provide enumerated fields for +loaded, active and running fields, indicating the unit health. + +This plugin is related to the [win_services module](/plugins/inputs/win_services/), which +fulfills the same purpose on windows. + +In addition to services, this plugin can gather other unit types as well, +see `systemctl list-units --all --type help` for possible options. + +### Configuration +```toml +[[inputs.systemd_units]] + ## Set timeout for systemctl execution + # timeout = "1s" + # + ## Filter for a specific unit type, default is "service", other possible + ## values are "socket", "target", "device", "mount", "automount", "swap", + ## "timer", "path", "slice" and "scope ": + # unittype = "service" +``` + +### Metrics +- systemd_units: + - tags: + - name (string, unit name) + - load (string, load state) + - active (string, active state) + - sub (string, sub state) + - fields: + - load_code (int, see below) + - active_code (int, see below) + - sub_code (int, see below) + +#### Load + +enumeration of [unit_load_state_table](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L87) + +| Value | Meaning | Description | +| ----- | ------- | ----------- | +| 0 | loaded | unit is ~ | +| 1 | stub | unit is ~ | +| 2 | not-found | unit is ~ | +| 3 | bad-setting | unit is ~ | +| 4 | error | unit is ~ | +| 5 | merged | unit is ~ | +| 6 | masked | unit is ~ | + +#### Active + +enumeration of [unit_active_state_table](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L99) + +| Value | Meaning | Description | +| ----- | ------- | ----------- | +| 0 | active | unit is ~ | +| 1 | reloading | unit is ~ | +| 2 | inactive | unit is ~ | +| 3 | failed | unit is ~ | +| 4 | activating | unit is ~ | +| 5 | deactivating | unit is ~ | + +#### Sub + +enumeration of sub states, see various [unittype_state_tables](https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c#L163); +duplicates were removed, tables are hex aligned to keep some space for future +values + +| Value | Meaning | Description | +| ----- | ------- | ----------- | +| | | service_state_table start at 0x0000 | +| 0x0000 | running | unit is ~ | +| 0x0001 | dead | unit is ~ | +| 0x0002 | start-pre | unit is ~ | +| 0x0003 | start | unit is ~ | +| 0x0004 | exited | unit is ~ | +| 0x0005 | reload | unit is ~ | +| 0x0006 | stop | unit is ~ | +| 0x0007 | stop-watchdog | unit is ~ | +| 0x0008 | stop-sigterm | unit is ~ | +| 0x0009 | stop-sigkill | unit is ~ | +| 0x000a | stop-post | unit is ~ | +| 0x000b | final-sigterm | unit is ~ | +| 0x000c | failed | unit is ~ | +| 0x000d | auto-restart | unit is ~ | +| | | service_state_table start at 0x0010 | +| 0x0010 | waiting | unit is ~ | +| | | service_state_table start at 0x0020 | +| 0x0020 | tentative | unit is ~ | +| 0x0021 | plugged | unit is ~ | +| | | service_state_table start at 0x0030 | +| 0x0030 | mounting | unit is ~ | +| 0x0031 | mounting-done | unit is ~ | +| 0x0032 | mounted | unit is ~ | +| 0x0033 | remounting | unit is ~ | +| 0x0034 | unmounting | unit is ~ | +| 0x0035 | remounting-sigterm | unit is ~ | +| 0x0036 | remounting-sigkill | unit is ~ | +| 0x0037 | unmounting-sigterm | unit is ~ | +| 0x0038 | unmounting-sigkill | unit is ~ | +| | | service_state_table start at 0x0040 | +| | | service_state_table start at 0x0050 | +| 0x0050 | abandoned | unit is ~ | +| | | service_state_table start at 0x0060 | +| 0x0060 | active | unit is ~ | +| | | service_state_table start at 0x0070 | +| 0x0070 | start-chown | unit is ~ | +| 0x0071 | start-post | unit is ~ | +| 0x0072 | listening | unit is ~ | +| 0x0073 | stop-pre | unit is ~ | +| 0x0074 | stop-pre-sigterm | unit is ~ | +| 0x0075 | stop-pre-sigkill | unit is ~ | +| 0x0076 | final-sigkill | unit is ~ | +| | | service_state_table start at 0x0080 | +| 0x0080 | activating | unit is ~ | +| 0x0081 | activating-done | unit is ~ | +| 0x0082 | deactivating | unit is ~ | +| 0x0083 | deactivating-sigterm | unit is ~ | +| 0x0084 | deactivating-sigkill | unit is ~ | +| | | service_state_table start at 0x0090 | +| | | service_state_table start at 0x00a0 | +| 0x00a0 | elapsed | unit is ~ | +| | | | + +### Example Output + +``` +systemd_units,host=host1.example.com,name=dbus.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000 +systemd_units,host=host1.example.com,name=networking.service,load=loaded,active=failed,sub=failed load_code=0i,active_code=3i,sub_code=12i 1533730725000000000 +systemd_units,host=host1.example.com,name=ssh.service,load=loaded,active=active,sub=running load_code=0i,active_code=0i,sub_code=0i 1533730725000000000 +... +``` diff --git a/plugins/inputs/systemd_units/systemd_units_linux.go b/plugins/inputs/systemd_units/systemd_units_linux.go new file mode 100644 index 000000000..64caf03d0 --- /dev/null +++ b/plugins/inputs/systemd_units/systemd_units_linux.go @@ -0,0 +1,221 @@ +package systemd_units + +import ( + "bufio" + "bytes" + "fmt" + "os/exec" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// SystemdUnits is a telegraf plugin to gather systemd unit status +type SystemdUnits struct { + Timeout internal.Duration + UnitType string `toml:"unittype"` + systemctl systemctl +} + +type systemctl func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) + +const measurement = "systemd_units" + +// Below are mappings of systemd state tables as defined in +// https://github.com/systemd/systemd/blob/c87700a1335f489be31cd3549927da68b5638819/src/basic/unit-def.c +// Duplicate strings are removed from this list. +var load_map = map[string]int{ + "loaded": 0, + "stub": 1, + "not-found": 2, + "bad-setting": 3, + "error": 4, + "merged": 5, + "masked": 6, +} + +var active_map = map[string]int{ + "active": 0, + "reloading": 1, + "inactive": 2, + "failed": 3, + "activating": 4, + "deactivating": 5, +} + +var sub_map = map[string]int{ + // service_state_table, offset 0x0000 + "running": 0x0000, + "dead": 0x0001, + "start-pre": 0x0002, + "start": 0x0003, + "exited": 0x0004, + "reload": 0x0005, + "stop": 0x0006, + "stop-watchdog": 0x0007, + "stop-sigterm": 0x0008, + "stop-sigkill": 0x0009, + "stop-post": 0x000a, + "final-sigterm": 0x000b, + "failed": 0x000c, + "auto-restart": 0x000d, + + // automount_state_table, offset 0x0010 + "waiting": 0x0010, + + // device_state_table, offset 0x0020 + "tentative": 0x0020, + "plugged": 0x0021, + + // mount_state_table, offset 0x0030 + "mounting": 0x0030, + "mounting-done": 0x0031, + "mounted": 0x0032, + "remounting": 0x0033, + "unmounting": 0x0034, + "remounting-sigterm": 0x0035, + "remounting-sigkill": 0x0036, + "unmounting-sigterm": 0x0037, + "unmounting-sigkill": 0x0038, + + // path_state_table, offset 0x0040 + + // scope_state_table, offset 0x0050 + "abandoned": 0x0050, + + // slice_state_table, offset 0x0060 + "active": 0x0060, + + // socket_state_table, offset 0x0070 + "start-chown": 0x0070, + "start-post": 0x0071, + "listening": 0x0072, + "stop-pre": 0x0073, + "stop-pre-sigterm": 0x0074, + "stop-pre-sigkill": 0x0075, + "final-sigkill": 0x0076, + + // swap_state_table, offset 0x0080 + "activating": 0x0080, + "activating-done": 0x0081, + "deactivating": 0x0082, + "deactivating-sigterm": 0x0083, + "deactivating-sigkill": 0x0084, + + // target_state_table, offset 0x0090 + + // timer_state_table, offset 0x00a0 + "elapsed": 0x00a0, +} + +var ( + defaultTimeout = internal.Duration{Duration: time.Second} + defaultUnitType = "service" +) + +// Description returns a short description of the plugin +func (s *SystemdUnits) Description() string { + return "Gather systemd units state" +} + +// SampleConfig returns sample configuration options. +func (s *SystemdUnits) SampleConfig() string { + return ` + ## Set timeout for systemctl execution + # timeout = "1s" + # + ## Filter for a specific unit type, default is "service", other possible + ## values are "socket", "target", "device", "mount", "automount", "swap", + ## "timer", "path", "slice" and "scope ": + # unittype = "service" +` +} + +// Gather parses systemctl outputs and adds counters to the Accumulator +func (s *SystemdUnits) Gather(acc telegraf.Accumulator) error { + out, err := s.systemctl(s.Timeout, s.UnitType) + if err != nil { + return err + } + + scanner := bufio.NewScanner(out) + for scanner.Scan() { + line := scanner.Text() + + data := strings.Fields(line) + if len(data) < 4 { + acc.AddError(fmt.Errorf("Error parsing line (expected at least 4 fields): %s", line)) + continue + } + name := data[0] + load := data[1] + active := data[2] + sub := data[3] + tags := map[string]string{ + "name": name, + "load": load, + "active": active, + "sub": sub, + } + + var ( + load_code int + active_code int + sub_code int + ok bool + ) + if load_code, ok = load_map[load]; !ok { + acc.AddError(fmt.Errorf("Error parsing field 'load', value not in map: %s", load)) + continue + } + if active_code, ok = active_map[active]; !ok { + acc.AddError(fmt.Errorf("Error parsing field 'active', value not in map: %s", active)) + continue + } + if sub_code, ok = sub_map[sub]; !ok { + acc.AddError(fmt.Errorf("Error parsing field 'sub', value not in map: %s", sub)) + continue + } + fields := map[string]interface{}{ + "load_code": load_code, + "active_code": active_code, + "sub_code": sub_code, + } + + acc.AddFields(measurement, fields, tags) + } + + return nil +} + +func setSystemctl(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) { + // is systemctl available ? + systemctlPath, err := exec.LookPath("systemctl") + if err != nil { + return nil, err + } + + cmd := exec.Command(systemctlPath, "list-units", "--all", fmt.Sprintf("--type=%s", UnitType), "--no-legend") + + var out bytes.Buffer + cmd.Stdout = &out + err = internal.RunTimeout(cmd, Timeout.Duration) + if err != nil { + return &out, fmt.Errorf("error running systemctl list-units --all --type=%s --no-legend: %s", UnitType, err) + } + + return &out, nil +} + +func init() { + inputs.Add("systemd_units", func() telegraf.Input { + return &SystemdUnits{ + systemctl: setSystemctl, + Timeout: defaultTimeout, + UnitType: defaultUnitType, + } + }) +} diff --git a/plugins/inputs/systemd_units/systemd_units_linux_test.go b/plugins/inputs/systemd_units/systemd_units_linux_test.go new file mode 100644 index 000000000..f45922bb9 --- /dev/null +++ b/plugins/inputs/systemd_units/systemd_units_linux_test.go @@ -0,0 +1,100 @@ +package systemd_units + +import ( + "bytes" + "fmt" + "reflect" + "testing" + + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" +) + +func TestSystemdUnits(t *testing.T) { + tests := []struct { + name string + line string + tags map[string]string + fields map[string]interface{} + status int + err error + }{ + { + name: "example loaded active running", + line: "example.service loaded active running example service description", + tags: map[string]string{"name": "example.service", "load": "loaded", "active": "active", "sub": "running"}, + fields: map[string]interface{}{ + "load_code": 0, + "active_code": 0, + "sub_code": 0, + }, + }, + { + name: "example loaded active exited", + line: "example.service loaded active exited example service description", + tags: map[string]string{"name": "example.service", "load": "loaded", "active": "active", "sub": "exited"}, + fields: map[string]interface{}{ + "load_code": 0, + "active_code": 0, + "sub_code": 4, + }, + }, + { + name: "example loaded failed failed", + line: "example.service loaded failed failed example service description", + tags: map[string]string{"name": "example.service", "load": "loaded", "active": "failed", "sub": "failed"}, + fields: map[string]interface{}{ + "load_code": 0, + "active_code": 3, + "sub_code": 12, + }, + }, + { + name: "example not-found inactive dead", + line: "example.service not-found inactive dead example service description", + tags: map[string]string{"name": "example.service", "load": "not-found", "active": "inactive", "sub": "dead"}, + fields: map[string]interface{}{ + "load_code": 2, + "active_code": 2, + "sub_code": 1, + }, + }, + { + name: "example unknown unknown unknown", + line: "example.service unknown unknown unknown example service description", + err: fmt.Errorf("Error parsing field 'load', value not in map: %s", "unknown"), + }, + { + name: "example too few fields", + line: "example.service loaded fai", + err: fmt.Errorf("Error parsing line (expected at least 4 fields): %s", "example.service loaded fai"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + systemd_units := &SystemdUnits{ + systemctl: func(Timeout internal.Duration, UnitType string) (*bytes.Buffer, error) { + return bytes.NewBufferString(tt.line), nil + }, + } + acc := new(testutil.Accumulator) + err := acc.GatherError(systemd_units.Gather) + if !reflect.DeepEqual(tt.err, err) { + t.Errorf("%s: expected error '%#v' got '%#v'", tt.name, tt.err, err) + } + if len(acc.Metrics) > 0 { + m := acc.Metrics[0] + if !reflect.DeepEqual(m.Measurement, measurement) { + t.Errorf("%s: expected measurement '%#v' got '%#v'\n", tt.name, measurement, m.Measurement) + } + if !reflect.DeepEqual(m.Tags, tt.tags) { + t.Errorf("%s: expected tags\n%#v got\n%#v\n", tt.name, tt.tags, m.Tags) + } + if !reflect.DeepEqual(m.Fields, tt.fields) { + t.Errorf("%s: expected fields\n%#v got\n%#v\n", tt.name, tt.fields, m.Fields) + } + } + }) + } +} diff --git a/plugins/inputs/systemd_units/systemd_units_notlinux.go b/plugins/inputs/systemd_units/systemd_units_notlinux.go new file mode 100644 index 000000000..f53cea3de --- /dev/null +++ b/plugins/inputs/systemd_units/systemd_units_notlinux.go @@ -0,0 +1,3 @@ +// +build !linux + +package systemd_units diff --git a/plugins/inputs/tail/README.md b/plugins/inputs/tail/README.md index 27cb6418e..e9f9cc8cb 100644 --- a/plugins/inputs/tail/README.md +++ b/plugins/inputs/tail/README.md @@ -19,12 +19,11 @@ see http://man7.org/linux/man-pages/man1/tail.1.html for more details. The plugin expects messages in one of the [Telegraf Input Data Formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md). -### Configuration: +### Configuration ```toml -# Stream a log file, like the tail -f command [[inputs.tail]] - ## files to tail. + ## File names or a pattern to tail. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: ## "/var/log/**.log" -> recursively find all .log files in /var/log @@ -34,14 +33,21 @@ The plugin expects messages in one of the ## See https://github.com/gobwas/glob for more examples ## files = ["/var/mymetrics.out"] + ## Read file from beginning. - from_beginning = false + # from_beginning = false + ## Whether file is a named pipe - pipe = false + # pipe = false ## Method used to watch for file updates. Can be either "inotify" or "poll". # watch_method = "inotify" + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set based on the number of metrics on each + ## line and the size of the output's metric_batch_size. + # max_undelivered_lines = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -49,7 +55,7 @@ The plugin expects messages in one of the data_format = "influx" ``` -### Metrics: +### Metrics Metrics are produced according to the `data_format` option. Additionally a tag labeled `path` is added to the metric containing the filename being tailed. diff --git a/plugins/inputs/tail/tail.go b/plugins/inputs/tail/tail.go index 0de2a344c..02d35c95b 100644 --- a/plugins/inputs/tail/tail.go +++ b/plugins/inputs/tail/tail.go @@ -3,44 +3,67 @@ package tail import ( - "fmt" + "context" + "errors" "strings" "sync" "github.com/influxdata/tail" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/globpath" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" ) const ( - defaultWatchMethod = "inotify" + defaultWatchMethod = "inotify" + defaultMaxUndeliveredLines = 1000 ) +var ( + offsets = make(map[string]int64) + offsetsMutex = new(sync.Mutex) +) + +type empty struct{} +type semaphore chan empty + type Tail struct { - Files []string - FromBeginning bool - Pipe bool - WatchMethod string + Files []string `toml:"files"` + FromBeginning bool `toml:"from_beginning"` + Pipe bool `toml:"pipe"` + WatchMethod string `toml:"watch_method"` + MaxUndeliveredLines int `toml:"max_undelivered_lines"` - tailers []*tail.Tail - parser parsers.Parser - wg sync.WaitGroup - acc telegraf.Accumulator - - sync.Mutex + Log telegraf.Logger `toml:"-"` + tailers map[string]*tail.Tail + offsets map[string]int64 + parserFunc parsers.ParserFunc + wg sync.WaitGroup + ctx context.Context + cancel context.CancelFunc + acc telegraf.TrackingAccumulator + sem semaphore } func NewTail() *Tail { + offsetsMutex.Lock() + offsetsCopy := make(map[string]int64, len(offsets)) + for k, v := range offsets { + offsetsCopy[k] = v + } + offsetsMutex.Unlock() + return &Tail{ - FromBeginning: false, + FromBeginning: false, + MaxUndeliveredLines: 1000, + offsets: offsetsCopy, } } const sampleConfig = ` - ## files to tail. + ## File names or a pattern to tail. ## These accept standard unix glob matching rules, but with the addition of ## ** as a "super asterisk". ie: ## "/var/log/**.log" -> recursively find all .log files in /var/log @@ -50,14 +73,21 @@ const sampleConfig = ` ## See https://github.com/gobwas/glob for more examples ## files = ["/var/mymetrics.out"] + ## Read file from beginning. - from_beginning = false + # from_beginning = false + ## Whether file is a named pipe - pipe = false + # pipe = false ## Method used to watch for file updates. Can be either "inotify" or "poll". # watch_method = "inotify" + ## Maximum lines of the file to process that have not yet be written by the + ## output. For best throughput set based on the number of metrics on each + ## line and the size of the output's metric_batch_size. + # max_undelivered_lines = 1000 + ## Data format to consume. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -70,27 +100,54 @@ func (t *Tail) SampleConfig() string { } func (t *Tail) Description() string { - return "Stream a log file, like the tail -f command" + return "Parse the new lines appended to a file" } -func (t *Tail) Gather(acc telegraf.Accumulator) error { +func (t *Tail) Init() error { + if t.MaxUndeliveredLines == 0 { + return errors.New("max_undelivered_lines must be positive") + } + t.sem = make(semaphore, t.MaxUndeliveredLines) return nil } +func (t *Tail) Gather(acc telegraf.Accumulator) error { + return t.tailNewFiles(true) +} + func (t *Tail) Start(acc telegraf.Accumulator) error { - t.Lock() - defer t.Unlock() + t.acc = acc.WithTracking(t.MaxUndeliveredLines) - t.acc = acc + t.ctx, t.cancel = context.WithCancel(context.Background()) - var seek *tail.SeekInfo - if !t.Pipe && !t.FromBeginning { - seek = &tail.SeekInfo{ - Whence: 2, - Offset: 0, + t.wg.Add(1) + go func() { + defer t.wg.Done() + for { + select { + case <-t.ctx.Done(): + return + case <-t.acc.Delivered(): + <-t.sem + } } - } + }() + t.tailers = make(map[string]*tail.Tail) + + err := t.tailNewFiles(t.FromBeginning) + + // clear offsets + t.offsets = make(map[string]int64) + // assumption that once Start is called, all parallel plugins have already been initialized + offsetsMutex.Lock() + offsets = make(map[string]int64) + offsetsMutex.Unlock() + + return err +} + +func (t *Tail) tailNewFiles(fromBeginning bool) error { var poll bool if t.WatchMethod == "poll" { poll = true @@ -100,9 +157,30 @@ func (t *Tail) Start(acc telegraf.Accumulator) error { for _, filepath := range t.Files { g, err := globpath.Compile(filepath) if err != nil { - t.acc.AddError(fmt.Errorf("E! Error Glob %s failed to compile, %s", filepath, err)) + t.Log.Errorf("Glob %q failed to compile: %s", filepath, err.Error()) } - for file, _ := range g.Match() { + for _, file := range g.Match() { + if _, ok := t.tailers[file]; ok { + // we're already tailing this file + continue + } + + var seek *tail.SeekInfo + if !t.Pipe && !fromBeginning { + if offset, ok := t.offsets[file]; ok { + t.Log.Debugf("Using offset %d for %q", offset, file) + seek = &tail.SeekInfo{ + Whence: 0, + Offset: offset, + } + } else { + seek = &tail.SeekInfo{ + Whence: 2, + Offset: 0, + } + } + } + tailer, err := tail.TailFile(file, tail.Config{ ReOpen: true, @@ -114,70 +192,124 @@ func (t *Tail) Start(acc telegraf.Accumulator) error { Logger: tail.DiscardingLogger, }) if err != nil { - acc.AddError(err) + t.Log.Debugf("Failed to open file (%s): %v", file, err) continue } + + t.Log.Debugf("Tail added for %q", file) + + parser, err := t.parserFunc() + if err != nil { + t.Log.Errorf("Creating parser: %s", err.Error()) + } + // create a goroutine for each "tailer" t.wg.Add(1) - go t.receiver(tailer) - t.tailers = append(t.tailers, tailer) + go func() { + defer t.wg.Done() + t.receiver(parser, tailer) + + t.Log.Debugf("Tail removed for %q", tailer.Filename) + + if err := tailer.Err(); err != nil { + t.Log.Errorf("Tailing %q: %s", tailer.Filename, err.Error()) + } + }() + t.tailers[tailer.Filename] = tailer } } - return nil } -// this is launched as a goroutine to continuously watch a tailed logfile -// for changes, parse any incoming msgs, and add to the accumulator. -func (t *Tail) receiver(tailer *tail.Tail) { - defer t.wg.Done() +// ParseLine parses a line of text. +func parseLine(parser parsers.Parser, line string, firstLine bool) ([]telegraf.Metric, error) { + switch parser.(type) { + case *csv.Parser: + // The csv parser parses headers in Parse and skips them in ParseLine. + // As a temporary solution call Parse only when getting the first + // line from the file. + if firstLine { + return parser.Parse([]byte(line)) + } else { + m, err := parser.ParseLine(line) + if err != nil { + return nil, err + } - var m telegraf.Metric - var err error - var line *tail.Line - for line = range tailer.Lines { + if m != nil { + return []telegraf.Metric{m}, nil + } + return []telegraf.Metric{}, nil + } + default: + return parser.Parse([]byte(line)) + } +} + +// Receiver is launched as a goroutine to continuously watch a tailed logfile +// for changes, parse any incoming msgs, and add to the accumulator. +func (t *Tail) receiver(parser parsers.Parser, tailer *tail.Tail) { + var firstLine = true + for line := range tailer.Lines { if line.Err != nil { - t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n", - tailer.Filename, err)) + t.Log.Errorf("Tailing %q: %s", tailer.Filename, line.Err.Error()) continue } // Fix up files with Windows line endings. text := strings.TrimRight(line.Text, "\r") - m, err = t.parser.ParseLine(text) - if err == nil { - if m != nil { - tags := m.Tags() - tags["path"] = tailer.Filename - t.acc.AddFields(m.Name(), m.Fields(), tags, m.Time()) - } - } else { - t.acc.AddError(fmt.Errorf("E! Malformed log line in %s: [%s], Error: %s\n", - tailer.Filename, line.Text, err)) + metrics, err := parseLine(parser, text, firstLine) + if err != nil { + t.Log.Errorf("Malformed log line in %q: [%q]: %s", + tailer.Filename, line.Text, err.Error()) + continue + } + firstLine = false + + for _, metric := range metrics { + metric.AddTag("path", tailer.Filename) + } + + // Block until plugin is stopping or room is available to add metrics. + select { + case <-t.ctx.Done(): + return + case t.sem <- empty{}: + t.acc.AddTrackingMetricGroup(metrics) } - } - if err := tailer.Err(); err != nil { - t.acc.AddError(fmt.Errorf("E! Error tailing file %s, Error: %s\n", - tailer.Filename, err)) } } func (t *Tail) Stop() { - t.Lock() - defer t.Unlock() - for _, tailer := range t.tailers { + if !t.Pipe && !t.FromBeginning { + // store offset for resume + offset, err := tailer.Tell() + if err == nil { + t.Log.Debugf("Recording offset %d for %q", offset, tailer.Filename) + } else { + t.Log.Errorf("Recording offset for %q: %s", tailer.Filename, err.Error()) + } + } err := tailer.Stop() if err != nil { - t.acc.AddError(fmt.Errorf("E! Error stopping tail on file %s\n", tailer.Filename)) + t.Log.Errorf("Stopping tail on %q: %s", tailer.Filename, err.Error()) } - tailer.Cleanup() } + + t.cancel() t.wg.Wait() + + // persist offsets + offsetsMutex.Lock() + for k, v := range t.offsets { + offsets[k] = v + } + offsetsMutex.Unlock() } -func (t *Tail) SetParser(parser parsers.Parser) { - t.parser = parser +func (t *Tail) SetParserFunc(fn parsers.ParserFunc) { + t.parserFunc = fn } func init() { diff --git a/plugins/inputs/tail/tail_test.go b/plugins/inputs/tail/tail_test.go index e8a16cc5c..3b12ae080 100644 --- a/plugins/inputs/tail/tail_test.go +++ b/plugins/inputs/tail/tail_test.go @@ -1,14 +1,19 @@ package tail import ( + "bytes" "io/ioutil" + "log" "os" "runtime" "testing" + "time" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/parsers/csv" + "github.com/influxdata/telegraf/plugins/parsers/json" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -21,19 +26,22 @@ func TestTailFromBeginning(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) + defer tmpfile.Close() _, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n") require.NoError(t, err) tt := NewTail() + tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} - p, _ := parsers.NewInfluxParser() - tt.SetParser(p) - defer tt.Stop() - defer tmpfile.Close() + tt.SetParserFunc(parsers.NewInfluxParser) + + err = tt.Init() + require.NoError(t, err) acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) + defer tt.Stop() require.NoError(t, acc.GatherError(tt.Gather)) acc.Wait(1) @@ -43,6 +51,7 @@ func TestTailFromBeginning(t *testing.T) { }, map[string]string{ "mytag": "foo", + "path": tmpfile.Name(), }) } @@ -54,18 +63,21 @@ func TestTailFromEnd(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) + defer tmpfile.Close() _, err = tmpfile.WriteString("cpu,mytag=foo usage_idle=100\n") require.NoError(t, err) tt := NewTail() + tt.Log = testutil.Logger{} tt.Files = []string{tmpfile.Name()} - p, _ := parsers.NewInfluxParser() - tt.SetParser(p) - defer tt.Stop() - defer tmpfile.Close() + tt.SetParserFunc(parsers.NewInfluxParser) + + err = tt.Init() + require.NoError(t, err) acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) + defer tt.Stop() for _, tailer := range tt.tailers { for n, err := tailer.Tell(); err == nil && n == 0; n, err = tailer.Tell() { // wait for tailer to jump to end @@ -84,6 +96,7 @@ func TestTailFromEnd(t *testing.T) { }, map[string]string{ "othertag": "foo", + "path": tmpfile.Name(), }) assert.Len(t, acc.Metrics, 1) } @@ -92,43 +105,53 @@ func TestTailBadLine(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) + defer tmpfile.Close() tt := NewTail() + tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} - p, _ := parsers.NewInfluxParser() - tt.SetParser(p) - defer tt.Stop() - defer tmpfile.Close() + tt.SetParserFunc(parsers.NewInfluxParser) + + err = tt.Init() + require.NoError(t, err) acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) + defer tt.Stop() + + buf := &bytes.Buffer{} + log.SetOutput(buf) + require.NoError(t, acc.GatherError(tt.Gather)) _, err = tmpfile.WriteString("cpu mytag= foo usage_idle= 100\n") require.NoError(t, err) - acc.WaitError(1) - assert.Contains(t, acc.Errors[0].Error(), "E! Malformed log line") + time.Sleep(500 * time.Millisecond) + assert.Contains(t, buf.String(), "Malformed log line") } func TestTailDosLineendings(t *testing.T) { tmpfile, err := ioutil.TempFile("", "") require.NoError(t, err) defer os.Remove(tmpfile.Name()) + defer tmpfile.Close() _, err = tmpfile.WriteString("cpu usage_idle=100\r\ncpu2 usage_idle=200\r\n") require.NoError(t, err) tt := NewTail() + tt.Log = testutil.Logger{} tt.FromBeginning = true tt.Files = []string{tmpfile.Name()} - p, _ := parsers.NewInfluxParser() - tt.SetParser(p) - defer tt.Stop() - defer tmpfile.Close() + tt.SetParserFunc(parsers.NewInfluxParser) + + err = tt.Init() + require.NoError(t, err) acc := testutil.Accumulator{} require.NoError(t, tt.Start(&acc)) + defer tt.Stop() require.NoError(t, acc.GatherError(tt.Gather)) acc.Wait(2) @@ -141,3 +164,123 @@ func TestTailDosLineendings(t *testing.T) { "usage_idle": float64(200), }) } + +// The csv parser should only parse the header line once per file. +func TestCSVHeadersParsedOnce(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "") + require.NoError(t, err) + defer func() { + tmpfile.Close() + os.Remove(tmpfile.Name()) + }() + + _, err = tmpfile.WriteString(` +measurement,time_idle +cpu,42 +cpu,42 +`) + require.NoError(t, err) + + plugin := NewTail() + plugin.Log = testutil.Logger{} + plugin.FromBeginning = true + plugin.Files = []string{tmpfile.Name()} + plugin.SetParserFunc(func() (parsers.Parser, error) { + return &csv.Parser{ + MeasurementColumn: "measurement", + HeaderRowCount: 1, + TimeFunc: func() time.Time { return time.Unix(0, 0) }, + }, nil + }) + + err = plugin.Init() + require.NoError(t, err) + + acc := testutil.Accumulator{} + err = plugin.Start(&acc) + require.NoError(t, err) + defer plugin.Stop() + err = plugin.Gather(&acc) + require.NoError(t, err) + acc.Wait(2) + plugin.Stop() + + expected := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "path": tmpfile.Name(), + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0)), + testutil.MustMetric("cpu", + map[string]string{ + "path": tmpfile.Name(), + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0)), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics()) +} + +// Ensure that the first line can produce multiple metrics (#6138) +func TestMultipleMetricsOnFirstLine(t *testing.T) { + tmpfile, err := ioutil.TempFile("", "") + require.NoError(t, err) + defer func() { + tmpfile.Close() + os.Remove(tmpfile.Name()) + }() + + _, err = tmpfile.WriteString(` +[{"time_idle": 42}, {"time_idle": 42}] +`) + require.NoError(t, err) + + plugin := NewTail() + plugin.Log = testutil.Logger{} + plugin.FromBeginning = true + plugin.Files = []string{tmpfile.Name()} + plugin.SetParserFunc(func() (parsers.Parser, error) { + return json.New( + &json.Config{ + MetricName: "cpu", + }) + }) + + err = plugin.Init() + require.NoError(t, err) + + acc := testutil.Accumulator{} + err = plugin.Start(&acc) + require.NoError(t, err) + defer plugin.Stop() + err = plugin.Gather(&acc) + require.NoError(t, err) + acc.Wait(2) + plugin.Stop() + + expected := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "path": tmpfile.Name(), + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0)), + testutil.MustMetric("cpu", + map[string]string{ + "path": tmpfile.Name(), + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0)), + } + testutil.RequireMetricsEqual(t, expected, acc.GetTelegrafMetrics(), + testutil.IgnoreTime()) +} diff --git a/plugins/inputs/tcp_listener/tcp_listener.go b/plugins/inputs/tcp_listener/tcp_listener.go index 544f36bd6..41b8e4637 100644 --- a/plugins/inputs/tcp_listener/tcp_listener.go +++ b/plugins/inputs/tcp_listener/tcp_listener.go @@ -48,13 +48,15 @@ type TcpListener struct { TotalConnections selfstat.Stat PacketsRecv selfstat.Stat BytesRecv selfstat.Stat + + Log telegraf.Logger } -var dropwarn = "E! Error: tcp_listener message queue full. " + +var dropwarn = "tcp_listener message queue full. " + "We have dropped %d messages so far. " + - "You may want to increase allowed_pending_messages in the config\n" + "You may want to increase allowed_pending_messages in the config" -var malformedwarn = "E! tcp_listener has received %d malformed packets" + +var malformedwarn = "tcp_listener has received %d malformed packets" + " thus far." const sampleConfig = ` @@ -114,16 +116,15 @@ func (t *TcpListener) Start(acc telegraf.Accumulator) error { address, _ := net.ResolveTCPAddr("tcp", t.ServiceAddress) t.listener, err = net.ListenTCP("tcp", address) if err != nil { - log.Fatalf("ERROR: ListenUDP - %s", err) + t.Log.Errorf("Failed to listen: %s", err.Error()) return err } - log.Println("I! TCP server listening on: ", t.listener.Addr().String()) t.wg.Add(2) go t.tcpListen() go t.tcpParser() - log.Printf("I! Started TCP listener service on %s\n", t.ServiceAddress) + t.Log.Infof("Started TCP listener service on %q", t.ServiceAddress) return nil } @@ -150,7 +151,7 @@ func (t *TcpListener) Stop() { t.wg.Wait() close(t.in) - log.Println("I! Stopped TCP listener service on ", t.ServiceAddress) + t.Log.Infof("Stopped TCP listener service on %q", t.ServiceAddress) } // tcpListen listens for incoming TCP connections. @@ -191,9 +192,8 @@ func (t *TcpListener) refuser(conn *net.TCPConn) { " reached, closing.\nYou may want to increase max_tcp_connections in"+ " the Telegraf tcp listener configuration.\n", t.MaxTCPConnections) conn.Close() - log.Printf("I! Refused TCP Connection from %s", conn.RemoteAddr()) - log.Printf("I! WARNING: Maximum TCP Connections reached, you may want to" + - " adjust max_tcp_connections") + t.Log.Infof("Refused TCP Connection from %s", conn.RemoteAddr()) + t.Log.Warn("Maximum TCP Connections reached, you may want to adjust max_tcp_connections") } // handler handles a single TCP Connection @@ -235,7 +235,7 @@ func (t *TcpListener) handler(conn *net.TCPConn, id string) { default: t.drops++ if t.drops == 1 || t.drops%t.AllowedPendingMessages == 0 { - log.Printf(dropwarn, t.drops) + t.Log.Errorf(dropwarn, t.drops) } } } @@ -268,7 +268,7 @@ func (t *TcpListener) tcpParser() error { } else { t.malformed++ if t.malformed == 1 || t.malformed%1000 == 0 { - log.Printf(malformedwarn, t.malformed) + t.Log.Errorf(malformedwarn, t.malformed) } } } diff --git a/plugins/inputs/tcp_listener/tcp_listener_test.go b/plugins/inputs/tcp_listener/tcp_listener_test.go index 1063cb5c1..16895d674 100644 --- a/plugins/inputs/tcp_listener/tcp_listener_test.go +++ b/plugins/inputs/tcp_listener/tcp_listener_test.go @@ -33,6 +33,7 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257 func newTestTcpListener() (*TcpListener, chan []byte) { in := make(chan []byte, 1500) listener := &TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8194", AllowedPendingMessages: 10000, MaxTCPConnections: 250, @@ -45,6 +46,7 @@ func newTestTcpListener() (*TcpListener, chan []byte) { // benchmark how long it takes to accept & process 100,000 metrics: func BenchmarkTCP(b *testing.B) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8198", AllowedPendingMessages: 100000, MaxTCPConnections: 250, @@ -76,6 +78,7 @@ func BenchmarkTCP(b *testing.B) { func TestHighTrafficTCP(t *testing.T) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8199", AllowedPendingMessages: 100000, MaxTCPConnections: 250, @@ -103,6 +106,7 @@ func TestHighTrafficTCP(t *testing.T) { func TestConnectTCP(t *testing.T) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8194", AllowedPendingMessages: 10000, MaxTCPConnections: 250, @@ -137,9 +141,10 @@ func TestConnectTCP(t *testing.T) { } } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestConcurrentConns(t *testing.T) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8195", AllowedPendingMessages: 10000, MaxTCPConnections: 2, @@ -172,9 +177,10 @@ func TestConcurrentConns(t *testing.T) { assert.Equal(t, io.EOF, err) } -// Test that MaxTCPConections is respected when max==1 +// Test that MaxTCPConnections is respected when max==1 func TestConcurrentConns1(t *testing.T) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8196", AllowedPendingMessages: 10000, MaxTCPConnections: 1, @@ -205,9 +211,10 @@ func TestConcurrentConns1(t *testing.T) { assert.Equal(t, io.EOF, err) } -// Test that MaxTCPConections is respected +// Test that MaxTCPConnections is respected func TestCloseConcurrentConns(t *testing.T) { listener := TcpListener{ + Log: testutil.Logger{}, ServiceAddress: "localhost:8195", AllowedPendingMessages: 10000, MaxTCPConnections: 2, @@ -300,7 +307,10 @@ func TestRunParserJSONMsg(t *testing.T) { listener.acc = &acc defer close(listener.done) - listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil) + listener.parser, _ = parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "udp_json_test", + }) listener.wg.Add(1) go listener.tcpParser() diff --git a/plugins/inputs/temp/README.md b/plugins/inputs/temp/README.md new file mode 100644 index 000000000..8398d25ca --- /dev/null +++ b/plugins/inputs/temp/README.md @@ -0,0 +1,39 @@ +# Temp Input plugin + +The temp input plugin gather metrics on system temperature. This plugin is +meant to be multi platform and uses platform specific collection methods. + +Currently supports Linux and Windows. + +### Configuration + +```toml +[[inputs.temp]] + # no configuration +``` + +### Metrics + +- temp + - tags: + - sensor + - fields: + - temp (float, celcius) + + +### Troubleshooting + +On **Windows**, the plugin uses a WMI call that is can be replicated with the +following command: +``` +wmic /namespace:\\root\wmi PATH MSAcpi_ThermalZoneTemperature +``` + +### Example Output + +``` +temp,sensor=coretemp_physicalid0_crit temp=100 1531298763000000000 +temp,sensor=coretemp_physicalid0_critalarm temp=0 1531298763000000000 +temp,sensor=coretemp_physicalid0_input temp=100 1531298763000000000 +temp,sensor=coretemp_physicalid0_max temp=100 1531298763000000000 +``` diff --git a/plugins/inputs/temp/temp.go b/plugins/inputs/temp/temp.go new file mode 100644 index 000000000..baf647b59 --- /dev/null +++ b/plugins/inputs/temp/temp.go @@ -0,0 +1,50 @@ +package temp + +import ( + "fmt" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/influxdata/telegraf/plugins/inputs/system" +) + +type Temperature struct { + ps system.PS +} + +func (t *Temperature) Description() string { + return "Read metrics about temperature" +} + +const sampleConfig = "" + +func (t *Temperature) SampleConfig() string { + return sampleConfig +} + +func (t *Temperature) Gather(acc telegraf.Accumulator) error { + temps, err := t.ps.Temperature() + if err != nil { + if strings.Contains(err.Error(), "not implemented yet") { + return fmt.Errorf("plugin is not supported on this platform: %v", err) + } + return fmt.Errorf("error getting temperatures info: %s", err) + } + for _, temp := range temps { + tags := map[string]string{ + "sensor": temp.SensorKey, + } + fields := map[string]interface{}{ + "temp": temp.Temperature, + } + acc.AddFields("temp", fields, tags) + } + return nil +} + +func init() { + inputs.Add("temp", func() telegraf.Input { + return &Temperature{ps: system.NewSystemPS()} + }) +} diff --git a/plugins/inputs/temp/temp_test.go b/plugins/inputs/temp/temp_test.go new file mode 100644 index 000000000..080ff66ac --- /dev/null +++ b/plugins/inputs/temp/temp_test.go @@ -0,0 +1,38 @@ +package temp + +import ( + "testing" + + "github.com/shirou/gopsutil/host" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf/plugins/inputs/system" + "github.com/influxdata/telegraf/testutil" +) + +func TestTemperature(t *testing.T) { + var mps system.MockPS + var err error + defer mps.AssertExpectations(t) + var acc testutil.Accumulator + + ts := host.TemperatureStat{ + SensorKey: "coretemp_sensor1_crit", + Temperature: 60.5, + } + + mps.On("Temperature").Return([]host.TemperatureStat{ts}, nil) + + err = (&Temperature{ps: &mps}).Gather(&acc) + require.NoError(t, err) + + expectedFields := map[string]interface{}{ + "temp": float64(60.5), + } + + expectedTags := map[string]string{ + "sensor": "coretemp_sensor1_crit", + } + acc.AssertContainsTaggedFields(t, "temp", expectedFields, expectedTags) + +} diff --git a/plugins/inputs/tengine/tengine.go b/plugins/inputs/tengine/tengine.go index 1ee63740f..245e0a3a2 100644 --- a/plugins/inputs/tengine/tengine.go +++ b/plugins/inputs/tengine/tengine.go @@ -101,7 +101,7 @@ func (n *Tengine) createHttpClient() (*http.Client, error) { return client, nil } -type TengineSatus struct { +type TengineStatus struct { host string bytes_in uint64 bytes_out uint64 @@ -135,7 +135,7 @@ type TengineSatus struct { } func (n *Tengine) gatherUrl(addr *url.URL, acc telegraf.Accumulator) error { - var tenginestatus TengineSatus + var tenginestatus TengineStatus resp, err := n.client.Get(addr.String()) if err != nil { return fmt.Errorf("error making HTTP request to %s: %s", addr.String(), err) diff --git a/plugins/inputs/udp_listener/udp_listener.go b/plugins/inputs/udp_listener/udp_listener.go index d0a728b3c..7fa59fdb1 100644 --- a/plugins/inputs/udp_listener/udp_listener.go +++ b/plugins/inputs/udp_listener/udp_listener.go @@ -53,17 +53,19 @@ type UdpListener struct { PacketsRecv selfstat.Stat BytesRecv selfstat.Stat + + Log telegraf.Logger } // UDP_MAX_PACKET_SIZE is packet limit, see // https://en.wikipedia.org/wiki/User_Datagram_Protocol#Packet_structure const UDP_MAX_PACKET_SIZE int = 64 * 1024 -var dropwarn = "E! Error: udp_listener message queue full. " + +var dropwarn = "udp_listener message queue full. " + "We have dropped %d messages so far. " + - "You may want to increase allowed_pending_messages in the config\n" + "You may want to increase allowed_pending_messages in the config" -var malformedwarn = "E! udp_listener has received %d malformed packets" + +var malformedwarn = "udp_listener has received %d malformed packets" + " thus far." const sampleConfig = ` @@ -113,7 +115,7 @@ func (u *UdpListener) Start(acc telegraf.Accumulator) error { u.wg.Add(1) go u.udpParser() - log.Printf("I! Started UDP listener service on %s (ReadBuffer: %d)\n", u.ServiceAddress, u.UDPBufferSize) + u.Log.Infof("Started service on %q (ReadBuffer: %d)", u.ServiceAddress, u.UDPBufferSize) return nil } @@ -124,7 +126,7 @@ func (u *UdpListener) Stop() { u.wg.Wait() u.listener.Close() close(u.in) - log.Println("I! Stopped UDP listener service on ", u.ServiceAddress) + u.Log.Infof("Stopped service on %q", u.ServiceAddress) } func (u *UdpListener) udpListen() error { @@ -134,15 +136,15 @@ func (u *UdpListener) udpListen() error { u.listener, err = net.ListenUDP("udp", address) if err != nil { - return fmt.Errorf("E! Error: ListenUDP - %s", err) + return err } - log.Println("I! UDP server listening on: ", u.listener.LocalAddr().String()) + u.Log.Infof("Server listening on %q", u.listener.LocalAddr().String()) if u.UDPBufferSize > 0 { err = u.listener.SetReadBuffer(u.UDPBufferSize) // if we want to move away from OS default if err != nil { - return fmt.Errorf("E! Failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err) + return fmt.Errorf("failed to set UDP read buffer to %d: %s", u.UDPBufferSize, err) } } @@ -166,7 +168,7 @@ func (u *UdpListener) udpListenLoop() { if err != nil { if err, ok := err.(net.Error); ok && err.Timeout() { } else { - log.Printf("E! Error: %s\n", err.Error()) + u.Log.Error(err.Error()) } continue } @@ -180,7 +182,7 @@ func (u *UdpListener) udpListenLoop() { default: u.drops++ if u.drops == 1 || u.drops%u.AllowedPendingMessages == 0 { - log.Printf(dropwarn, u.drops) + u.Log.Errorf(dropwarn, u.drops) } } } @@ -208,7 +210,7 @@ func (u *UdpListener) udpParser() error { } else { u.malformed++ if u.malformed == 1 || u.malformed%1000 == 0 { - log.Printf(malformedwarn, u.malformed) + u.Log.Errorf(malformedwarn, u.malformed) } } } diff --git a/plugins/inputs/udp_listener/udp_listener_test.go b/plugins/inputs/udp_listener/udp_listener_test.go index e0e0e862e..b241235e4 100644 --- a/plugins/inputs/udp_listener/udp_listener_test.go +++ b/plugins/inputs/udp_listener/udp_listener_test.go @@ -8,14 +8,11 @@ import ( "log" "net" "os" - "runtime" "strings" "testing" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/testutil" - - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -34,53 +31,55 @@ cpu_load_short,host=server06 value=12.0 1422568543702900257 func newTestUdpListener() (*UdpListener, chan []byte) { in := make(chan []byte, 1500) listener := &UdpListener{ + Log: testutil.Logger{}, ServiceAddress: ":8125", AllowedPendingMessages: 10000, - in: in, - done: make(chan struct{}), + in: in, + done: make(chan struct{}), } return listener, in } -func TestHighTrafficUDP(t *testing.T) { - listener := UdpListener{ - ServiceAddress: ":8126", - AllowedPendingMessages: 100000, - } - var err error - listener.parser, err = parsers.NewInfluxParser() - require.NoError(t, err) - acc := &testutil.Accumulator{} +// func TestHighTrafficUDP(t *testing.T) { +// listener := UdpListener{ +// ServiceAddress: ":8126", +// AllowedPendingMessages: 100000, +// } +// var err error +// listener.parser, err = parsers.NewInfluxParser() +// require.NoError(t, err) +// acc := &testutil.Accumulator{} - // send multiple messages to socket - err = listener.Start(acc) - require.NoError(t, err) +// // send multiple messages to socket +// err = listener.Start(acc) +// require.NoError(t, err) - conn, err := net.Dial("udp", "127.0.0.1:8126") - require.NoError(t, err) - mlen := int64(len(testMsgs)) - var sent int64 - for i := 0; i < 20000; i++ { - for sent > listener.BytesRecv.Get()+32000 { - // more than 32kb sitting in OS buffer, let it drain - runtime.Gosched() - } - conn.Write([]byte(testMsgs)) - sent += mlen - } - for sent > listener.BytesRecv.Get() { - runtime.Gosched() - } - for len(listener.in) > 0 { - runtime.Gosched() - } - listener.Stop() +// conn, err := net.Dial("udp", "127.0.0.1:8126") +// require.NoError(t, err) +// mlen := int64(len(testMsgs)) +// var sent int64 +// for i := 0; i < 20000; i++ { +// for sent > listener.BytesRecv.Get()+32000 { +// // more than 32kb sitting in OS buffer, let it drain +// runtime.Gosched() +// } +// conn.Write([]byte(testMsgs)) +// sent += mlen +// } +// for sent > listener.BytesRecv.Get() { +// runtime.Gosched() +// } +// for len(listener.in) > 0 { +// runtime.Gosched() +// } +// listener.Stop() - assert.Equal(t, uint64(100000), acc.NMetrics()) -} +// assert.Equal(t, uint64(100000), acc.NMetrics()) +// } func TestConnectUDP(t *testing.T) { listener := UdpListener{ + Log: testutil.Logger{}, ServiceAddress: ":8127", AllowedPendingMessages: 10000, } @@ -193,7 +192,10 @@ func TestRunParserJSONMsg(t *testing.T) { listener.acc = &acc defer close(listener.done) - listener.parser, _ = parsers.NewJSONParser("udp_json_test", []string{}, nil) + listener.parser, _ = parsers.NewParser(&parsers.Config{ + DataFormat: "json", + MetricName: "udp_json_test", + }) listener.wg.Add(1) go listener.udpParser() diff --git a/plugins/inputs/unbound/README.md b/plugins/inputs/unbound/README.md index 4f1f862bb..1ccd183bc 100644 --- a/plugins/inputs/unbound/README.md +++ b/plugins/inputs/unbound/README.md @@ -18,12 +18,15 @@ a validating, recursive, and caching DNS resolver. ## The default location of the unbound-control binary can be overridden with: # binary = "/usr/sbin/unbound-control" - ## The default timeout of 1s can be overriden with: + ## The default location of the unbound config file can be overridden with: + # config_file = "/etc/unbound/unbound.conf" + + ## The default timeout of 1s can be overridden with: # timeout = "1s" ## When set to true, thread metrics are tagged with the thread id. ## - ## The default is false for backwards compatibility, and will be change to + ## The default is false for backwards compatibility, and will be changed to ## true in a future version. It is recommended to set to true on new ## deployments. thread_as_tag = false @@ -56,7 +59,9 @@ You will also need to update your sudoers file: ```bash $ visudo # Add the following line: -telegraf ALL=(ALL) NOPASSWD: /usr/sbin/unbound-control +Cmnd_Alias UNBOUNDCTL = /usr/sbin/unbound-control +telegraf ALL=(ALL) NOPASSWD: UNBOUNDCTL +Defaults!UNBOUNDCTL !logfile, !syslog, !pam_session ``` Please use the solution you see as most appropriate. diff --git a/plugins/inputs/unbound/unbound.go b/plugins/inputs/unbound/unbound.go index 31a6d5005..bb4ecde58 100644 --- a/plugins/inputs/unbound/unbound.go +++ b/plugins/inputs/unbound/unbound.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool) (*bytes.Buffer, error) +type runner func(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool, ConfigFile string) (*bytes.Buffer, error) // Unbound is used to store configuration values type Unbound struct { @@ -26,6 +26,7 @@ type Unbound struct { UseSudo bool Server string ThreadAsTag bool + ConfigFile string filter filter.Filter run runner @@ -45,12 +46,15 @@ var sampleConfig = ` ## The default location of the unbound-control binary can be overridden with: # binary = "/usr/sbin/unbound-control" - ## The default timeout of 1s can be overriden with: + ## The default location of the unbound config file can be overridden with: + # config_file = "/etc/unbound/unbound.conf" + + ## The default timeout of 1s can be overridden with: # timeout = "1s" ## When set to true, thread metrics are tagged with the thread id. ## - ## The default is false for backwards compatibility, and will be change to + ## The default is false for backwards compatibility, and will be changed to ## true in a future version. It is recommended to set to true on new ## deployments. thread_as_tag = false @@ -67,7 +71,7 @@ func (s *Unbound) SampleConfig() string { } // Shell out to unbound_stat and return the output -func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool) (*bytes.Buffer, error) { +func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Server string, ThreadAsTag bool, ConfigFile string) (*bytes.Buffer, error) { cmdArgs := []string{"stats_noreset"} if Server != "" { @@ -96,6 +100,10 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv cmdArgs = append([]string{"-s", server}, cmdArgs...) } + if ConfigFile != "" { + cmdArgs = append([]string{"-c", ConfigFile}, cmdArgs...) + } + cmd := exec.Command(cmdName, cmdArgs...) if UseSudo { @@ -118,14 +126,14 @@ func unboundRunner(cmdName string, Timeout internal.Duration, UseSudo bool, Serv // All the dots in stat name will replaced by underscores. Histogram statistics will not be collected. func (s *Unbound) Gather(acc telegraf.Accumulator) error { - // Always exclude histrogram statistics + // Always exclude histogram statistics statExcluded := []string{"histogram.*"} filterExcluded, err := filter.Compile(statExcluded) if err != nil { return err } - out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ThreadAsTag) + out, err := s.run(s.Binary, s.Timeout, s.UseSudo, s.Server, s.ThreadAsTag, s.ConfigFile) if err != nil { return fmt.Errorf("error gathering metrics: %s", err) } @@ -207,6 +215,7 @@ func init() { UseSudo: false, Server: "", ThreadAsTag: false, + ConfigFile: "", } }) } diff --git a/plugins/inputs/unbound/unbound_test.go b/plugins/inputs/unbound/unbound_test.go index b1d6206c3..cc4b99dae 100644 --- a/plugins/inputs/unbound/unbound_test.go +++ b/plugins/inputs/unbound/unbound_test.go @@ -12,8 +12,8 @@ import ( var TestTimeout = internal.Duration{Duration: time.Second} -func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Server string, ThreadAsTag bool) func(string, internal.Duration, bool, string, bool) (*bytes.Buffer, error) { - return func(string, internal.Duration, bool, string, bool) (*bytes.Buffer, error) { +func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Server string, ThreadAsTag bool, ConfigFile string) func(string, internal.Duration, bool, string, bool, string) (*bytes.Buffer, error) { + return func(string, internal.Duration, bool, string, bool, string) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,7 +21,7 @@ func UnboundControl(output string, Timeout internal.Duration, useSudo bool, Serv func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Unbound{ - run: UnboundControl(fullOutput, TestTimeout, true, "", false), + run: UnboundControl(fullOutput, TestTimeout, true, "", false, ""), } err := v.Gather(acc) @@ -38,7 +38,7 @@ func TestParseFullOutput(t *testing.T) { func TestParseFullOutputThreadAsTag(t *testing.T) { acc := &testutil.Accumulator{} v := &Unbound{ - run: UnboundControl(fullOutput, TestTimeout, true, "", true), + run: UnboundControl(fullOutput, TestTimeout, true, "", true, ""), ThreadAsTag: true, } err := v.Gather(acc) diff --git a/plugins/inputs/uwsgi/README.md b/plugins/inputs/uwsgi/README.md new file mode 100644 index 000000000..c4d41a02d --- /dev/null +++ b/plugins/inputs/uwsgi/README.md @@ -0,0 +1,92 @@ +# uWSGI + +The uWSGI input plugin gathers metrics about uWSGI using its [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html). + +### Configuration + +```toml +[[inputs.uwsgi]] + ## List with urls of uWSGI Stats servers. Url must match pattern: + ## scheme://address[:port] + ## + ## For example: + ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] + servers = ["tcp://127.0.0.1:1717"] + + ## General connection timeout + # timeout = "5s" +``` + + +### Metrics: + + - uwsgi_overview + - tags: + - source + - uid + - gid + - version + - fields: + - listen_queue + - listen_queue_errors + - signal_queue + - load + - pid + ++ uwsgi_workers + - tags: + - worker_id + - source + - fields: + - requests + - accepting + - delta_request + - exceptions + - harakiri_count + - pid + - signals + - signal_queue + - status + - rss + - vsz + - running_time + - last_spawn + - respawn_count + - tx + - avg_rt + +- uwsgi_apps + - tags: + - app_id + - worker_id + - source + - fields: + - modifier1 + - requests + - startup_time + - exceptions + ++ uwsgi_cores + - tags: + - core_id + - worker_id + - source + - fields: + - requests + - static_requests + - routed_requests + - offloaded_requests + - write_errors + - read_errors + - in_request + + +### Example Output: + +``` +uwsgi_overview,gid=0,uid=0,source=172.17.0.2,version=2.0.18 listen_queue=0i,listen_queue_errors=0i,load=0i,pid=1i,signal_queue=0i 1564441407000000000 +uwsgi_workers,source=172.17.0.2,worker_id=1 accepting=1i,avg_rt=0i,delta_request=0i,exceptions=0i,harakiri_count=0i,last_spawn=1564441202i,pid=6i,requests=0i,respawn_count=1i,rss=0i,running_time=0i,signal_queue=0i,signals=0i,status="idle",tx=0i,vsz=0i 1564441407000000000 +uwsgi_apps,app_id=0,worker_id=1,source=172.17.0.2 exceptions=0i,modifier1=0i,requests=0i,startup_time=0i 1564441407000000000 +uwsgi_cores,core_id=0,worker_id=1,source=172.17.0.2 in_request=0i,offloaded_requests=0i,read_errors=0i,requests=0i,routed_requests=0i,static_requests=0i,write_errors=0i 1564441407000000000 +``` + diff --git a/plugins/inputs/uwsgi/uwsgi.go b/plugins/inputs/uwsgi/uwsgi.go new file mode 100644 index 000000000..b13a7b3e6 --- /dev/null +++ b/plugins/inputs/uwsgi/uwsgi.go @@ -0,0 +1,295 @@ +// Package uwsgi implements a telegraf plugin for collecting uwsgi stats from +// the uwsgi stats server. +package uwsgi + +import ( + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "strconv" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Uwsgi server struct +type Uwsgi struct { + Servers []string `toml:"servers"` + Timeout internal.Duration `toml:"timeout"` + + client *http.Client +} + +// Description returns the plugin description +func (u *Uwsgi) Description() string { + return "Read uWSGI metrics." +} + +// SampleConfig returns the sample configuration +func (u *Uwsgi) SampleConfig() string { + return ` + ## List with urls of uWSGI Stats servers. URL must match pattern: + ## scheme://address[:port] + ## + ## For example: + ## servers = ["tcp://localhost:5050", "http://localhost:1717", "unix:///tmp/statsock"] + servers = ["tcp://127.0.0.1:1717"] + + ## General connection timeout + # timeout = "5s" +` +} + +// Gather collect data from uWSGI Server +func (u *Uwsgi) Gather(acc telegraf.Accumulator) error { + if u.client == nil { + u.client = &http.Client{ + Timeout: u.Timeout.Duration, + } + } + wg := &sync.WaitGroup{} + + for _, s := range u.Servers { + wg.Add(1) + go func(s string) { + defer wg.Done() + n, err := url.Parse(s) + if err != nil { + acc.AddError(fmt.Errorf("could not parse uWSGI Stats Server url '%s': %s", s, err.Error())) + return + } + + if err := u.gatherServer(acc, n); err != nil { + acc.AddError(err) + return + } + }(s) + } + + wg.Wait() + + return nil +} + +func (u *Uwsgi) gatherServer(acc telegraf.Accumulator, url *url.URL) error { + var err error + var r io.ReadCloser + var s StatsServer + + switch url.Scheme { + case "tcp": + r, err = net.DialTimeout(url.Scheme, url.Host, u.Timeout.Duration) + if err != nil { + return err + } + s.source = url.Host + case "unix": + r, err = net.DialTimeout(url.Scheme, url.Path, u.Timeout.Duration) + if err != nil { + return err + } + s.source, err = os.Hostname() + if err != nil { + s.source = "" + } + case "http": + resp, err := u.client.Get(url.String()) + if err != nil { + return err + } + r = resp.Body + s.source = url.Host + default: + return fmt.Errorf("'%s' is not a supported scheme", url.Scheme) + } + + defer r.Close() + + if err := json.NewDecoder(r).Decode(&s); err != nil { + return fmt.Errorf("failed to decode json payload from '%s': %s", url.String(), err.Error()) + } + + u.gatherStatServer(acc, &s) + + return err +} + +func (u *Uwsgi) gatherStatServer(acc telegraf.Accumulator, s *StatsServer) { + fields := map[string]interface{}{ + "listen_queue": s.ListenQueue, + "listen_queue_errors": s.ListenQueueErrors, + "signal_queue": s.SignalQueue, + "load": s.Load, + "pid": s.PID, + } + + tags := map[string]string{ + "source": s.source, + "uid": strconv.Itoa(s.UID), + "gid": strconv.Itoa(s.GID), + "version": s.Version, + } + acc.AddFields("uwsgi_overview", fields, tags) + + u.gatherWorkers(acc, s) + u.gatherApps(acc, s) + u.gatherCores(acc, s) +} + +func (u *Uwsgi) gatherWorkers(acc telegraf.Accumulator, s *StatsServer) { + for _, w := range s.Workers { + fields := map[string]interface{}{ + "requests": w.Requests, + "accepting": w.Accepting, + "delta_request": w.DeltaRequests, + "exceptions": w.Exceptions, + "harakiri_count": w.HarakiriCount, + "pid": w.PID, + "signals": w.Signals, + "signal_queue": w.SignalQueue, + "status": w.Status, + "rss": w.Rss, + "vsz": w.Vsz, + "running_time": w.RunningTime, + "last_spawn": w.LastSpawn, + "respawn_count": w.RespawnCount, + "tx": w.Tx, + "avg_rt": w.AvgRt, + } + tags := map[string]string{ + "worker_id": strconv.Itoa(w.WorkerID), + "source": s.source, + } + + acc.AddFields("uwsgi_workers", fields, tags) + } +} + +func (u *Uwsgi) gatherApps(acc telegraf.Accumulator, s *StatsServer) { + for _, w := range s.Workers { + for _, a := range w.Apps { + fields := map[string]interface{}{ + "modifier1": a.Modifier1, + "requests": a.Requests, + "startup_time": a.StartupTime, + "exceptions": a.Exceptions, + } + tags := map[string]string{ + "app_id": strconv.Itoa(a.AppID), + "worker_id": strconv.Itoa(w.WorkerID), + "source": s.source, + } + acc.AddFields("uwsgi_apps", fields, tags) + } + } +} + +func (u *Uwsgi) gatherCores(acc telegraf.Accumulator, s *StatsServer) { + for _, w := range s.Workers { + for _, c := range w.Cores { + fields := map[string]interface{}{ + "requests": c.Requests, + "static_requests": c.StaticRequests, + "routed_requests": c.RoutedRequests, + "offloaded_requests": c.OffloadedRequests, + "write_errors": c.WriteErrors, + "read_errors": c.ReadErrors, + "in_request": c.InRequest, + } + tags := map[string]string{ + "core_id": strconv.Itoa(c.CoreID), + "worker_id": strconv.Itoa(w.WorkerID), + "source": s.source, + } + acc.AddFields("uwsgi_cores", fields, tags) + } + + } +} + +func init() { + inputs.Add("uwsgi", func() telegraf.Input { + return &Uwsgi{ + Timeout: internal.Duration{Duration: 5 * time.Second}, + } + }) +} + +// StatsServer defines the stats server structure. +type StatsServer struct { + // Tags + source string + PID int `json:"pid"` + UID int `json:"uid"` + GID int `json:"gid"` + Version string `json:"version"` + + // Fields + ListenQueue int `json:"listen_queue"` + ListenQueueErrors int `json:"listen_queue_errors"` + SignalQueue int `json:"signal_queue"` + Load int `json:"load"` + + Workers []*Worker `json:"workers"` +} + +// Worker defines the worker metric structure. +type Worker struct { + // Tags + WorkerID int `json:"id"` + PID int `json:"pid"` + + // Fields + Accepting int `json:"accepting"` + Requests int `json:"requests"` + DeltaRequests int `json:"delta_requests"` + Exceptions int `json:"exceptions"` + HarakiriCount int `json:"harakiri_count"` + Signals int `json:"signals"` + SignalQueue int `json:"signal_queue"` + Status string `json:"status"` + Rss int `json:"rss"` + Vsz int `json:"vsz"` + RunningTime int `json:"running_time"` + LastSpawn int `json:"last_spawn"` + RespawnCount int `json:"respawn_count"` + Tx int `json:"tx"` + AvgRt int `json:"avg_rt"` + + Apps []*App `json:"apps"` + Cores []*Core `json:"cores"` +} + +// App defines the app metric structure. +type App struct { + // Tags + AppID int `json:"id"` + + // Fields + Modifier1 int `json:"modifier1"` + Requests int `json:"requests"` + StartupTime int `json:"startup_time"` + Exceptions int `json:"exceptions"` +} + +// Core defines the core metric structure. +type Core struct { + // Tags + CoreID int `json:"id"` + + // Fields + Requests int `json:"requests"` + StaticRequests int `json:"static_requests"` + RoutedRequests int `json:"routed_requests"` + OffloadedRequests int `json:"offloaded_requests"` + WriteErrors int `json:"write_errors"` + ReadErrors int `json:"read_errors"` + InRequest int `json:"in_request"` +} diff --git a/plugins/inputs/uwsgi/uwsgi_test.go b/plugins/inputs/uwsgi/uwsgi_test.go new file mode 100644 index 000000000..34581791e --- /dev/null +++ b/plugins/inputs/uwsgi/uwsgi_test.go @@ -0,0 +1,185 @@ +package uwsgi_test + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/influxdata/telegraf/plugins/inputs/uwsgi" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestBasic(t *testing.T) { + js := ` +{ + "version":"2.0.12", + "listen_queue":0, + "listen_queue_errors":0, + "signal_queue":0, + "load":0, + "pid":28372, + "uid":1000, + "gid":1000, + "cwd":"/opt/uwsgi", + "locks":[ + { + "user 0":0 + }, + { + "signal":0 + }, + { + "filemon":0 + }, + { + "timer":0 + }, + { + "rbtimer":0 + }, + { + "cron":0 + }, + { + "rpc":0 + }, + { + "snmp":0 + } + ], + "sockets":[ + { + "name":"127.0.0.1:47430", + "proto":"uwsgi", + "queue":0, + "max_queue":100, + "shared":0, + "can_offload":0 + } + ], + "workers":[ + { + "id":1, + "pid":28375, + "accepting":1, + "requests":0, + "delta_requests":0, + "exceptions":0, + "harakiri_count":0, + "signals":0, + "signal_queue":0, + "status":"idle", + "rss":0, + "vsz":0, + "running_time":0, + "last_spawn":1459942782, + "respawn_count":1, + "tx":0, + "avg_rt":0, + "apps":[ + { + "id":0, + "modifier1":0, + "mountpoint":"", + "startup_time":0, + "requests":0, + "exceptions":0, + "chdir":"" + } + ], + "cores":[ + { + "id":0, + "requests":0, + "static_requests":0, + "routed_requests":0, + "offloaded_requests":0, + "write_errors":0, + "read_errors":0, + "in_request":0, + "vars":[ + + ] + } + ] + } + ] +} +` + + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/" { + _, _ = w.Write([]byte(js)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + + defer fakeServer.Close() + + plugin := &uwsgi.Uwsgi{ + Servers: []string{fakeServer.URL + "/"}, + } + var acc testutil.Accumulator + plugin.Gather(&acc) + require.Equal(t, 0, len(acc.Errors)) +} + +func TestInvalidJSON(t *testing.T) { + js := ` +{ + "version":"2.0.12", + "listen_queue":0, + "listen_queue_errors":0, + "signal_queue":0, + "load":0, + "pid:28372 + "uid":10 +} +` + + fakeServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/" { + _, _ = w.Write([]byte(js)) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + + defer fakeServer.Close() + + plugin := &uwsgi.Uwsgi{ + Servers: []string{fakeServer.URL + "/"}, + } + var acc testutil.Accumulator + plugin.Gather(&acc) + require.Equal(t, 1, len(acc.Errors)) +} + +func TestHttpError(t *testing.T) { + plugin := &uwsgi.Uwsgi{ + Servers: []string{"http://novalidurladress/"}, + } + var acc testutil.Accumulator + plugin.Gather(&acc) + require.Equal(t, 1, len(acc.Errors)) +} + +func TestTcpError(t *testing.T) { + plugin := &uwsgi.Uwsgi{ + Servers: []string{"tcp://novalidtcpadress/"}, + } + var acc testutil.Accumulator + plugin.Gather(&acc) + require.Equal(t, 1, len(acc.Errors)) +} + +func TestUnixSocketError(t *testing.T) { + plugin := &uwsgi.Uwsgi{ + Servers: []string{"unix:///novalidunixsocket"}, + } + var acc testutil.Accumulator + plugin.Gather(&acc) + require.Equal(t, 1, len(acc.Errors)) +} diff --git a/plugins/inputs/varnish/README.md b/plugins/inputs/varnish/README.md index 8949fe6db..2db149804 100644 --- a/plugins/inputs/varnish/README.md +++ b/plugins/inputs/varnish/README.md @@ -5,22 +5,25 @@ This plugin gathers stats from [Varnish HTTP Cache](https://varnish-cache.org/) ### Configuration: ```toml - # A plugin to collect stats from Varnish HTTP Cache - [[inputs.varnish]] - ## If running as a restricted user you can prepend sudo for additional access: - #use_sudo = false +[[inputs.varnish]] + ## If running as a restricted user you can prepend sudo for additional access: + #use_sudo = false - ## The default location of the varnishstat binary can be overridden with: - binary = "/usr/bin/varnishstat" + ## The default location of the varnishstat binary can be overridden with: + binary = "/usr/bin/varnishstat" - ## By default, telegraf gathers stats for 3 metric points. - ## Setting stats will override the defaults shown below. - ## stats may also be set to ["all"], which will collect all stats - stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] + ## By default, telegraf gather stats for 3 metric points. + ## Setting stats will override the defaults shown below. + ## Glob matching can be used, ie, stats = ["MAIN.*"] + ## stats may also be set to ["*"], which will collect all stats + stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] - ## Optional name for the varnish instance (or working directory) to query - ## Usually appened after -n in varnish cli - # instance_name = instanceName + ## Optional name for the varnish instance (or working directory) to query + ## Usually append after -n in varnish cli + # instance_name = instanceName + + ## Timeout for varnishstat command + # timeout = "1s" ``` ### Measurements & Fields: @@ -89,7 +92,7 @@ MEMPOOL, etc). In the output, the prefix will be used as a tag, and removed from - MAIN.s_pipe (uint64, count, Total pipe sessions) - MAIN.s_pass (uint64, count, Total pass- ed requests) - MAIN.s_fetch (uint64, count, Total backend fetches) - - MAIN.s_synth (uint64, count, Total synthethic responses) + - MAIN.s_synth (uint64, count, Total synthetic responses) - MAIN.s_req_hdrbytes (uint64, count, Request header bytes) - MAIN.s_req_bodybytes (uint64, count, Request body bytes) - MAIN.s_resp_hdrbytes (uint64, count, Response header bytes) @@ -388,7 +391,9 @@ You will also need to update your sudoers file: ```bash $ visudo # Add the following line: -telegraf ALL=(ALL) NOPASSWD: /usr/bin/varnishstat +Cmnd_Alias VARNISHSTAT = /usr/bin/varnishstat +telegraf ALL=(ALL) NOPASSWD: VARNISHSTAT +Defaults!VARNISHSTAT !logfile, !syslog, !pam_session ``` Please use the solution you see as most appropriate. diff --git a/plugins/inputs/varnish/varnish.go b/plugins/inputs/varnish/varnish.go index f1c703971..893f00c0a 100644 --- a/plugins/inputs/varnish/varnish.go +++ b/plugins/inputs/varnish/varnish.go @@ -17,7 +17,7 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) -type runner func(cmdName string, UseSudo bool, InstanceName string) (*bytes.Buffer, error) +type runner func(cmdName string, UseSudo bool, InstanceName string, Timeout internal.Duration) (*bytes.Buffer, error) // Varnish is used to store configuration values type Varnish struct { @@ -25,6 +25,7 @@ type Varnish struct { Binary string UseSudo bool InstanceName string + Timeout internal.Duration filter filter.Filter run runner @@ -32,6 +33,7 @@ type Varnish struct { var defaultStats = []string{"MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"} var defaultBinary = "/usr/bin/varnishstat" +var defaultTimeout = internal.Duration{Duration: time.Second} var sampleConfig = ` ## If running as a restricted user you can prepend sudo for additional access: @@ -47,8 +49,11 @@ var sampleConfig = ` stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] ## Optional name for the varnish instance (or working directory) to query - ## Usually appened after -n in varnish cli + ## Usually append after -n in varnish cli # instance_name = instanceName + + ## Timeout for varnishstat command + # timeout = "1s" ` func (s *Varnish) Description() string { @@ -61,7 +66,7 @@ func (s *Varnish) SampleConfig() string { } // Shell out to varnish_stat and return the output -func varnishRunner(cmdName string, UseSudo bool, InstanceName string) (*bytes.Buffer, error) { +func varnishRunner(cmdName string, UseSudo bool, InstanceName string, Timeout internal.Duration) (*bytes.Buffer, error) { cmdArgs := []string{"-1"} if InstanceName != "" { @@ -78,7 +83,8 @@ func varnishRunner(cmdName string, UseSudo bool, InstanceName string) (*bytes.Bu var out bytes.Buffer cmd.Stdout = &out - err := internal.RunTimeout(cmd, time.Millisecond*200) + + err := internal.RunTimeout(cmd, Timeout.Duration) if err != nil { return &out, fmt.Errorf("error running varnishstat: %s", err) } @@ -109,7 +115,7 @@ func (s *Varnish) Gather(acc telegraf.Accumulator) error { } } - out, err := s.run(s.Binary, s.UseSudo, s.InstanceName) + out, err := s.run(s.Binary, s.UseSudo, s.InstanceName, s.Timeout) if err != nil { return fmt.Errorf("error gathering metrics: %s", err) } @@ -170,6 +176,7 @@ func init() { Binary: defaultBinary, UseSudo: false, InstanceName: "", + Timeout: defaultTimeout, } }) } diff --git a/plugins/inputs/varnish/varnish_test.go b/plugins/inputs/varnish/varnish_test.go index 30f91e237..96e5c3556 100644 --- a/plugins/inputs/varnish/varnish_test.go +++ b/plugins/inputs/varnish/varnish_test.go @@ -7,13 +7,15 @@ import ( "fmt" "strings" "testing" + "time" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" ) -func fakeVarnishStat(output string, useSudo bool, InstanceName string) func(string, bool, string) (*bytes.Buffer, error) { - return func(string, bool, string) (*bytes.Buffer, error) { +func fakeVarnishStat(output string, useSudo bool, InstanceName string, Timeout internal.Duration) func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { + return func(string, bool, string, internal.Duration) (*bytes.Buffer, error) { return bytes.NewBuffer([]byte(output)), nil } } @@ -21,7 +23,7 @@ func fakeVarnishStat(output string, useSudo bool, InstanceName string) func(stri func TestGather(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(smOutput, false, ""), + run: fakeVarnishStat(smOutput, false, "", internal.Duration{Duration: time.Second}), Stats: []string{"*"}, } v.Gather(acc) @@ -37,7 +39,7 @@ func TestGather(t *testing.T) { func TestParseFullOutput(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, true, ""), + run: fakeVarnishStat(fullOutput, true, "", internal.Duration{Duration: time.Second}), Stats: []string{"*"}, } err := v.Gather(acc) @@ -52,7 +54,7 @@ func TestParseFullOutput(t *testing.T) { func TestFilterSomeStats(t *testing.T) { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, false, ""), + run: fakeVarnishStat(fullOutput, false, "", internal.Duration{Duration: time.Second}), Stats: []string{"MGT.*", "VBE.*"}, } err := v.Gather(acc) @@ -75,7 +77,7 @@ func TestFieldConfig(t *testing.T) { for fieldCfg, expected := range expect { acc := &testutil.Accumulator{} v := &Varnish{ - run: fakeVarnishStat(fullOutput, true, ""), + run: fakeVarnishStat(fullOutput, true, "", internal.Duration{Duration: time.Second}), Stats: strings.Split(fieldCfg, ","), } err := v.Gather(acc) @@ -113,16 +115,16 @@ MEMPOOL.vbc.sz_wanted 88 . Size requested ` var parsedSmOutput = map[string]map[string]interface{}{ - "MAIN": map[string]interface{}{ + "MAIN": { "uptime": uint64(895), "cache_hit": uint64(95), "cache_miss": uint64(5), }, - "MGT": map[string]interface{}{ + "MGT": { "uptime": uint64(896), "child_start": uint64(1), }, - "MEMPOOL": map[string]interface{}{ + "MEMPOOL": { "vbc.live": uint64(0), "vbc.pool": uint64(10), "vbc.sz_wanted": uint64(88), @@ -190,7 +192,7 @@ MAIN.s_req 0 0.00 Total requests seen MAIN.s_pipe 0 0.00 Total pipe sessions seen MAIN.s_pass 0 0.00 Total pass-ed requests seen MAIN.s_fetch 0 0.00 Total backend fetches initiated -MAIN.s_synth 0 0.00 Total synthethic responses made +MAIN.s_synth 0 0.00 Total synthetic responses made MAIN.s_req_hdrbytes 0 0.00 Request header bytes MAIN.s_req_bodybytes 0 0.00 Request body bytes MAIN.s_resp_hdrbytes 0 0.00 Response header bytes diff --git a/plugins/inputs/vsphere/METRICS.md b/plugins/inputs/vsphere/METRICS.md new file mode 100644 index 000000000..d1a34bb26 --- /dev/null +++ b/plugins/inputs/vsphere/METRICS.md @@ -0,0 +1,289 @@ +# Common vSphere Performance Metrics +The set of performance metrics in vSphere is open ended. Metrics may be added or removed in new releases +and the set of available metrics may vary depending hardware, as well as what plugins and add-on products +are installed. Therefore, providing a definitive list of available metrics is difficult. The metrics listed +below are the most commonly available as of vSphere 6.5. + +For a complete list of metrics available from vSphere and the units they measure in, please reference the [VMWare vCenter Converter API Reference](https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.PerformanceManager.html). + +To list the exact set in your environment, please use the govc tool available [here](https://github.com/vmware/govmomi/tree/master/govc) + +To obtain the set of metrics for e.g. a VM, you may use the following command: +``` +govc metric.ls vm/* +``` + +## Virtual Machine Metrics +``` +cpu.demandEntitlementRatio.latest +cpu.usage.average +cpu.ready.summation +cpu.run.summation +cpu.system.summation +cpu.swapwait.summation +cpu.costop.summation +cpu.demand.average +cpu.readiness.average +cpu.maxlimited.summation +cpu.wait.summation +cpu.usagemhz.average +cpu.latency.average +cpu.used.summation +cpu.overlap.summation +cpu.idle.summation +cpu.entitlement.latest +datastore.maxTotalLatency.latest +disk.usage.average +disk.read.average +disk.write.average +disk.maxTotalLatency.latest +mem.llSwapUsed.average +mem.swapin.average +mem.vmmemctltarget.average +mem.activewrite.average +mem.overhead.average +mem.vmmemctl.average +mem.zero.average +mem.swapoutRate.average +mem.active.average +mem.llSwapOutRate.average +mem.swapout.average +mem.llSwapInRate.average +mem.swapinRate.average +mem.granted.average +mem.latency.average +mem.overheadMax.average +mem.swapped.average +mem.compressionRate.average +mem.swaptarget.average +mem.shared.average +mem.zipSaved.latest +mem.overheadTouched.average +mem.zipped.latest +mem.consumed.average +mem.entitlement.average +mem.usage.average +mem.decompressionRate.average +mem.compressed.average +net.multicastRx.summation +net.transmitted.average +net.received.average +net.usage.average +net.broadcastTx.summation +net.broadcastRx.summation +net.packetsRx.summation +net.pnicBytesRx.average +net.multicastTx.summation +net.bytesTx.average +net.bytesRx.average +net.droppedRx.summation +net.pnicBytesTx.average +net.droppedTx.summation +net.packetsTx.summation +power.power.average +power.energy.summation +rescpu.runpk1.latest +rescpu.runpk15.latest +rescpu.maxLimited5.latest +rescpu.actpk5.latest +rescpu.samplePeriod.latest +rescpu.runav1.latest +rescpu.runav15.latest +rescpu.sampleCount.latest +rescpu.actpk1.latest +rescpu.runpk5.latest +rescpu.runav5.latest +rescpu.actav15.latest +rescpu.actav1.latest +rescpu.actpk15.latest +rescpu.actav5.latest +rescpu.maxLimited1.latest +rescpu.maxLimited15.latest +sys.osUptime.latest +sys.uptime.latest +sys.heartbeat.latest +virtualDisk.write.average +virtualDisk.read.average +``` + +## Host System Metrics +``` +cpu.corecount.contention.average +cpu.usage.average +cpu.reservedCapacity.average +cpu.usagemhz.minimum +cpu.usagemhz.maximum +cpu.usage.minimum +cpu.usage.maximum +cpu.capacity.provisioned.average +cpu.capacity.usage.average +cpu.capacity.demand.average +cpu.capacity.contention.average +cpu.corecount.provisioned.average +cpu.corecount.usage.average +cpu.usagemhz.average +disk.throughput.contention.average +disk.throughput.usage.average +mem.decompressionRate.average +mem.granted.average +mem.active.average +mem.shared.average +mem.zero.average +mem.swapused.average +mem.vmmemctl.average +mem.compressed.average +mem.compressionRate.average +mem.reservedCapacity.average +mem.capacity.provisioned.average +mem.capacity.usable.average +mem.capacity.usage.average +mem.capacity.entitlement.average +mem.capacity.contention.average +mem.usage.minimum +mem.overhead.minimum +mem.consumed.minimum +mem.granted.minimum +mem.active.minimum +mem.shared.minimum +mem.zero.minimum +mem.swapused.minimum +mem.consumed.average +mem.usage.maximum +mem.overhead.maximum +mem.consumed.maximum +mem.granted.maximum +mem.overhead.average +mem.shared.maximum +mem.zero.maximum +mem.swapused.maximum +mem.vmmemctl.maximum +mem.usage.average +mem.active.maximum +mem.vmmemctl.minimum +net.throughput.contention.summation +net.throughput.usage.average +net.throughput.usable.average +net.throughput.provisioned.average +power.power.average +power.powerCap.average +power.energy.summation +vmop.numShutdownGuest.latest +vmop.numPoweroff.latest +vmop.numSuspend.latest +vmop.numReset.latest +vmop.numRebootGuest.latest +vmop.numStandbyGuest.latest +vmop.numPoweron.latest +vmop.numCreate.latest +vmop.numDestroy.latest +vmop.numRegister.latest +vmop.numUnregister.latest +vmop.numReconfigure.latest +vmop.numClone.latest +vmop.numDeploy.latest +vmop.numChangeHost.latest +vmop.numChangeDS.latest +vmop.numChangeHostDS.latest +vmop.numVMotion.latest +vmop.numSVMotion.latest +vmop.numXVMotion.latest +``` + +## Cluster Metrics +``` +cpu.corecount.contention.average +cpu.usage.average +cpu.reservedCapacity.average +cpu.usagemhz.minimum +cpu.usagemhz.maximum +cpu.usage.minimum +cpu.usage.maximum +cpu.capacity.provisioned.average +cpu.capacity.usage.average +cpu.capacity.demand.average +cpu.capacity.contention.average +cpu.corecount.provisioned.average +cpu.corecount.usage.average +cpu.usagemhz.average +disk.throughput.contention.average +disk.throughput.usage.average +mem.decompressionRate.average +mem.granted.average +mem.active.average +mem.shared.average +mem.zero.average +mem.swapused.average +mem.vmmemctl.average +mem.compressed.average +mem.compressionRate.average +mem.reservedCapacity.average +mem.capacity.provisioned.average +mem.capacity.usable.average +mem.capacity.usage.average +mem.capacity.entitlement.average +mem.capacity.contention.average +mem.usage.minimum +mem.overhead.minimum +mem.consumed.minimum +mem.granted.minimum +mem.active.minimum +mem.shared.minimum +mem.zero.minimum +mem.swapused.minimum +mem.consumed.average +mem.usage.maximum +mem.overhead.maximum +mem.consumed.maximum +mem.granted.maximum +mem.overhead.average +mem.shared.maximum +mem.zero.maximum +mem.swapused.maximum +mem.vmmemctl.maximum +mem.usage.average +mem.active.maximum +mem.vmmemctl.minimum +net.throughput.contention.summation +net.throughput.usage.average +net.throughput.usable.average +net.throughput.provisioned.average +power.power.average +power.powerCap.average +power.energy.summation +vmop.numShutdownGuest.latest +vmop.numPoweroff.latest +vmop.numSuspend.latest +vmop.numReset.latest +vmop.numRebootGuest.latest +vmop.numStandbyGuest.latest +vmop.numPoweron.latest +vmop.numCreate.latest +vmop.numDestroy.latest +vmop.numRegister.latest +vmop.numUnregister.latest +vmop.numReconfigure.latest +vmop.numClone.latest +vmop.numDeploy.latest +vmop.numChangeHost.latest +vmop.numChangeDS.latest +vmop.numChangeHostDS.latest +vmop.numVMotion.latest +vmop.numSVMotion.latest +vmop.numXVMotion.latest +``` + +## Datastore Metrics +``` +datastore.numberReadAveraged.average +datastore.throughput.contention.average +datastore.throughput.usage.average +datastore.write.average +datastore.read.average +datastore.numberWriteAveraged.average +disk.used.latest +disk.provisioned.latest +disk.capacity.latest +disk.capacity.contention.average +disk.capacity.provisioned.average +disk.capacity.usage.average +``` diff --git a/plugins/inputs/vsphere/README.md b/plugins/inputs/vsphere/README.md new file mode 100644 index 000000000..ef9e610fd --- /dev/null +++ b/plugins/inputs/vsphere/README.md @@ -0,0 +1,525 @@ +# VMware vSphere Input Plugin + +The VMware vSphere plugin uses the vSphere API to gather metrics from multiple vCenter servers. + +* Clusters +* Hosts +* VMs +* Datastores + +## Supported versions of vSphere +This plugin supports vSphere version 5.5 through 6.7. + +## Configuration + +NOTE: To disable collection of a specific resource type, simply exclude all metrics using the XX_metric_exclude. +For example, to disable collection of VMs, add this: + +``` +vm_metric_exclude = [ "*" ] +``` + +``` +# Read metrics from one or many vCenters +[[inputs.vsphere]] + ## List of vCenter URLs to be monitored. These three lines must be uncommented + ## and edited for the plugin to work. + vcenters = [ "https://vcenter.local/sdk" ] + username = "user@corp.local" + password = "secret" + + ## VMs + ## Typical VM metrics (if omitted or empty, all metrics are collected) + # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) + # vm_exclude = [] # Inventory paths to exclude + vm_metric_include = [ + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.run.summation", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.wait.summation", + "mem.active.average", + "mem.granted.average", + "mem.latency.average", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.usage.average", + "power.power.average", + "virtualDisk.numberReadAveraged.average", + "virtualDisk.numberWriteAveraged.average", + "virtualDisk.read.average", + "virtualDisk.readOIO.latest", + "virtualDisk.throughput.usage.average", + "virtualDisk.totalReadLatency.average", + "virtualDisk.totalWriteLatency.average", + "virtualDisk.write.average", + "virtualDisk.writeOIO.latest", + "sys.uptime.latest", + ] + # vm_metric_exclude = [] ## Nothing is excluded by default + # vm_instances = true ## true by default + + ## Hosts + ## Typical host metrics (if omitted or empty, all metrics are collected) + # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) + # host_exclude [] # Inventory paths to exclude + host_metric_include = [ + "cpu.coreUtilization.average", + "cpu.costop.summation", + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.swapwait.summation", + "cpu.usage.average", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.utilization.average", + "cpu.wait.summation", + "disk.deviceReadLatency.average", + "disk.deviceWriteLatency.average", + "disk.kernelReadLatency.average", + "disk.kernelWriteLatency.average", + "disk.numberReadAveraged.average", + "disk.numberWriteAveraged.average", + "disk.read.average", + "disk.totalReadLatency.average", + "disk.totalWriteLatency.average", + "disk.write.average", + "mem.active.average", + "mem.latency.average", + "mem.state.latest", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.totalCapacity.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.errorsRx.summation", + "net.errorsTx.summation", + "net.usage.average", + "power.power.average", + "storageAdapter.numberReadAveraged.average", + "storageAdapter.numberWriteAveraged.average", + "storageAdapter.read.average", + "storageAdapter.write.average", + "sys.uptime.latest", + ] + ## Collect IP addresses? Valid values are "ipv4" and "ipv6" + # ip_addresses = ["ipv6", "ipv4" ] + + # host_metric_exclude = [] ## Nothing excluded by default + # host_instances = true ## true by default + + + ## Clusters + # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) + # cluster_exclude = [] # Inventory paths to exclude + # cluster_metric_include = [] ## if omitted or empty, all metrics are collected + # cluster_metric_exclude = [] ## Nothing excluded by default + # cluster_instances = false ## false by default + + ## Datastores + # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) + # datastore_exclude = [] # Inventory paths to exclude + # datastore_metric_include = [] ## if omitted or empty, all metrics are collected + # datastore_metric_exclude = [] ## Nothing excluded by default + # datastore_instances = false ## false by default + + ## Datacenters + # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) + # datacenter_exclude = [] # Inventory paths to exclude + datacenter_metric_include = [] ## if omitted or empty, all metrics are collected + datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. + # datacenter_instances = false ## false by default + + ## Plugin Settings + ## separator character to use for measurement and field names (default: "_") + # separator = "_" + + ## number of objects to retrieve per query for realtime resources (vms and hosts) + ## set to 64 for vCenter 5.5 and 6.0 (default: 256) + # max_query_objects = 256 + + ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) + ## set to 64 for vCenter 5.5 and 6.0 (default: 256) + # max_query_metrics = 256 + + ## number of go routines to use for collection and discovery of objects and metrics + # collect_concurrency = 1 + # discover_concurrency = 1 + + ## the interval before (re)discovering objects subject to metrics collection (default: 300s) + # object_discovery_interval = "300s" + + ## timeout applies to any of the api request made to vcenter + # timeout = "60s" + + ## When set to true, all samples are sent as integers. This makes the output + ## data types backwards compatible with Telegraf 1.9 or lower. Normally all + ## samples from vCenter, with the exception of percentages, are integer + ## values, but under some conditions, some averaging takes place internally in + ## the plugin. Setting this flag to "false" will send values as floats to + ## preserve the full precision when averaging takes place. + # use_int_samples = true + + ## Custom attributes from vCenter can be very useful for queries in order to slice the + ## metrics along different dimension and for forming ad-hoc relationships. They are disabled + ## by default, since they can add a considerable amount of tags to the resulting metrics. To + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include + ## to select the attributes you want to include. + ## By default, since they can add a considerable amount of tags to the resulting metrics. To + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include + ## to select the attributes you want to include. + # custom_attribute_include = [] + # custom_attribute_exclude = ["*"] + + ## Optional SSL Config + # ssl_ca = "/path/to/cafile" + # ssl_cert = "/path/to/certfile" + # ssl_key = "/path/to/keyfile" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false +``` + +### Objects and Metrics Per Query + +By default, in vCenter's configuration a limit is set to the number of entities that are included in a performance chart query. Default settings for vCenter 6.5 and above is 256. Prior versions of vCenter have this set to 64. +A vCenter administrator can change this setting, see this [VMware KB article](https://kb.vmware.com/s/article/2107096) for more information. + +Any modification should be reflected in this plugin by modifying the parameter `max_query_objects` + +``` + ## number of objects to retrieve per query for realtime resources (vms and hosts) + ## set to 64 for vCenter 5.5 and 6.0 (default: 256) + # max_query_objects = 256 +``` + +### Collection and Discovery concurrency + +On large vCenter setups it may be prudent to have multiple concurrent go routines collect performance metrics +in order to avoid potential errors for time elapsed during a collection cycle. This should never be greater than 8, +though the default of 1 (no concurrency) should be sufficient for most configurations. + +For setting up concurrency, modify `collect_concurrency` and `discover_concurrency` parameters. + +``` + ## number of go routines to use for collection and discovery of objects and metrics + # collect_concurrency = 1 + # discover_concurrency = 1 +``` + +### Inventory Paths +Resources to be monitored can be selected using Inventory Paths. This treats the vSphere inventory as a tree structure similar +to a file system. A vSphere inventory has a structure similar to this: + +``` + ++-DC0 # Virtual datacenter + +-datastore # Datastore folder (created by system) + | +-Datastore1 + +-host # Host folder (created by system) + | +-Cluster1 + | | +-Host1 + | | | +-VM1 + | | | +-VM2 + | | | +-hadoop1 + | +-Host2 # Dummy cluster created for non-clustered host + | | +-Host2 + | | | +-VM3 + | | | +-VM4 + +-vm # VM folder (created by system) + | +-VM1 + | +-VM2 + | +-Folder1 + | | +-hadoop1 + | | +-NestedFolder1 + | | | +-VM3 + | | | +-VM4 +``` + +#### Using Inventory Paths +Using familiar UNIX-style paths, one could select e.g. VM2 with the path ```/DC0/vm/VM2```. + +Often, we want to select a group of resource, such as all the VMs in a folder. We could use the path ```/DC0/vm/Folder1/*``` for that. + +Another possibility is to select objects using a partial name, such as ```/DC0/vm/Folder1/hadoop*``` yielding all vms in Folder1 with a name starting with "hadoop". + +Finally, due to the arbitrary nesting of the folder structure, we need a "recursive wildcard" for traversing multiple folders. We use the "**" symbol for that. If we want to look for a VM with a name starting with "hadoop" in any folder, we could use the following path: ```/DC0/vm/**/hadoop*``` + +#### Multiple paths to VMs +As we can see from the example tree above, VMs appear both in its on folder under the datacenter, as well as under the hosts. This is useful when you like to select VMs on a specific host. For example, ```/DC0/host/Cluster1/Host1/hadoop*``` selects all VMs with a name starting with "hadoop" that are running on Host1. + +We can extend this to looking at a cluster level: ```/DC0/host/Cluster1/*/hadoop*```. This selects any VM matching "hadoop*" on any host in Cluster1. +## Performance Considerations + +### Realtime vs. historical metrics + +vCenter keeps two different kinds of metrics, known as realtime and historical metrics. + +* Realtime metrics: Available at a 20 second granularity. These metrics are stored in memory and are very fast and cheap to query. Our tests have shown that a complete set of realtime metrics for 7000 virtual machines can be obtained in less than 20 seconds. Realtime metrics are only available on **ESXi hosts** and **virtual machine** resources. Realtime metrics are only stored for 1 hour in vCenter. +* Historical metrics: Available at a 5 minute, 30 minutes, 2 hours and 24 hours rollup levels. The vSphere Telegraf plugin only uses the 5 minute rollup. These metrics are stored in the vCenter database and can be expensive and slow to query. Historical metrics are the only type of metrics available for **clusters**, **datastores** and **datacenters**. + +For more information, refer to the vSphere documentation here: https://pubs.vmware.com/vsphere-50/index.jsp?topic=%2Fcom.vmware.wssdk.pg.doc_50%2FPG_Ch16_Performance.18.2.html + +This distinction has an impact on how Telegraf collects metrics. A single instance of an input plugin can have one and only one collection interval, which means that you typically set the collection interval based on the most frequently collected metric. Let's assume you set the collection interval to 1 minute. All realtime metrics will be collected every minute. Since the historical metrics are only available on a 5 minute interval, the vSphere Telegraf plugin automatically skips four out of five collection cycles for these metrics. This works fine in many cases. Problems arise when the collection of historical metrics takes longer than the collection interval. This will cause error messages similar to this to appear in the Telegraf logs: + +```2019-01-16T13:41:10Z W! [agent] input "inputs.vsphere" did not complete within its interval``` + +This will disrupt the metric collection and can result in missed samples. The best practice workaround is to specify two instances of the vSphere plugin, one for the realtime metrics with a short collection interval and one for the historical metrics with a longer interval. You can use the ```*_metric_exclude``` to turn off the resources you don't want to collect metrics for in each instance. For example: + +``` +## Realtime instance +[[inputs.vsphere]] + interval = "60s" + vcenters = [ "https://someaddress/sdk" ] + username = "someuser@vsphere.local" + password = "secret" + + insecure_skip_verify = true + force_discover_on_init = true + + # Exclude all historical metrics + datastore_metric_exclude = ["*"] + cluster_metric_exclude = ["*"] + datacenter_metric_exclude = ["*"] + + collect_concurrency = 5 + discover_concurrency = 5 + +# Historical instance +[[inputs.vsphere]] + + interval = "300s" + + vcenters = [ "https://someaddress/sdk" ] + username = "someuser@vsphere.local" + password = "secret" + + insecure_skip_verify = true + force_discover_on_init = true + host_metric_exclude = ["*"] # Exclude realtime metrics + vm_metric_exclude = ["*"] # Exclude realtime metrics + + max_query_metrics = 256 + collect_concurrency = 3 +``` + +### Configuring max_query_metrics setting + +The ```max_query_metrics``` determines the maximum number of metrics to attempt to retrieve in one call to vCenter. Generally speaking, a higher number means faster and more efficient queries. However, the number of allowed metrics in a query is typically limited in vCenter by the ```config.vpxd.stats.maxQueryMetrics``` setting in vCenter. The value defaults to 64 on vSphere 5.5 and older and 256 on newver versions of vCenter. The vSphere plugin always checks this setting and will automatically reduce the number if the limit configured in vCenter is lower than max_query_metrics in the plugin. This will result in a log message similar to this: + +```2019-01-21T03:24:18Z W! [input.vsphere] Configured max_query_metrics is 256, but server limits it to 64. Reducing.``` + +You may ask a vCenter administrator to increase this limit to help boost performance. + +### Cluster metrics and the max_query_metrics setting + +Cluster metrics are handled a bit differently by vCenter. They are aggregated from ESXi and virtual machine metrics and may not be available when you query their most recent values. When this happens, vCenter will attempt to perform that aggregation on the fly. Unfortunately, all the subqueries needed internally in vCenter to perform this aggregation will count towards ```config.vpxd.stats.maxQueryMetrics```. This means that even a very small query may result in an error message similar to this: + +```2018-11-02T13:37:11Z E! Error in plugin [inputs.vsphere]: ServerFaultCode: This operation is restricted by the administrator - 'vpxd.stats.maxQueryMetrics'. Contact your system administrator``` + +There are two ways of addressing this: +* Ask your vCenter administrator to set ```config.vpxd.stats.maxQueryMetrics``` to a number that's higher than the total number of virtual machines managed by a vCenter instance. +* Exclude the cluster metrics and use either the basicstats aggregator to calculate sums and averages per cluster or use queries in the visualization tool to obtain the same result. + +### Concurrency settings + +The vSphere plugin allows you to specify two concurrency settings: +* ```collect_concurrency```: The maximum number of simultaneous queries for performance metrics allowed per resource type. +* ```discover_concurrency```: The maximum number of simultaneous queries for resource discovery allowed. + +While a higher level of concurrency typically has a positive impact on performance, increasing these numbers too much can cause performance issues at the vCenter server. A rule of thumb is to set these parameters to the number of virtual machines divided by 1500 and rounded up to the nearest integer. + +## Measurements & Fields + +- Cluster Stats + - Cluster services: CPU, memory, failover + - CPU: total, usage + - Memory: consumed, total, vmmemctl + - VM operations: # changes, clone, create, deploy, destroy, power, reboot, reconfigure, register, reset, shutdown, standby, vmotion +- Host Stats: + - CPU: total, usage, cost, mhz + - Datastore: iops, latency, read/write bytes, # reads/writes + - Disk: commands, latency, kernel reads/writes, # reads/writes, queues + - Memory: total, usage, active, latency, swap, shared, vmmemctl + - Network: broadcast, bytes, dropped, errors, multicast, packets, usage + - Power: energy, usage, capacity + - Res CPU: active, max, running + - Storage Adapter: commands, latency, # reads/writes + - Storage Path: commands, latency, # reads/writes + - System Resources: cpu active, cpu max, cpu running, cpu usage, mem allocated, mem consumed, mem shared, swap + - System: uptime + - Flash Module: active VMDKs +- VM Stats: + - CPU: demand, usage, readiness, cost, mhz + - Datastore: latency, # reads/writes + - Disk: commands, latency, # reads/writes, provisioned, usage + - Memory: granted, usage, active, swap, vmmemctl + - Network: broadcast, bytes, dropped, multicast, packets, usage + - Power: energy, usage + - Res CPU: active, max, running + - System: operating system uptime, uptime + - Virtual Disk: seeks, # reads/writes, latency, load +- Datastore stats: + - Disk: Capacity, provisioned, used + +For a detailed list of commonly available metrics, please refer to [METRICS.md](METRICS.md) + +## Tags + +- all metrics + - vcenter (vcenter url) +- all host metrics + - cluster (vcenter cluster) +- all vm metrics + - cluster (vcenter cluster) + - esxhost (name of ESXi host) + - guest (guest operating system id) +- cpu stats for Host and VM + - cpu (cpu core - not all CPU fields will have this tag) +- datastore stats for Host and VM + - datastore (id of datastore) +- disk stats for Host and VM + - disk (name of disk) +- disk.used.capacity for Datastore + - disk (type of disk) +- net stats for Host and VM + - interface (name of network interface) +- storageAdapter stats for Host + - adapter (name of storage adapter) +- storagePath stats for Host + - path (id of storage path) +- sys.resource* stats for Host + - resource (resource type) +- vflashModule stats for Host + - module (name of flash module) +- virtualDisk stats for VM + - disk (name of virtual disk) + +## Sample output + +``` +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 run_summation=2608i,ready_summation=129i,usage_average=5.01,used_summation=2134i,demand_average=326i 1535660299000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 bytesRx_average=321i,bytesTx_average=335i 1535660299000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 write_average=144i,read_average=4i 1535660299000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 bytesRx_average=242i,bytesTx_average=308i 1535660299000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 write_average=232i,read_average=4i 1535660299000000000 +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 usage_average=5.49,used_summation=1804i,demand_average=308i,run_summation=2001i,ready_summation=120i 1535660299000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 usage_average=4.19,used_summation=2108i,demand_average=285i,run_summation=1793i,ready_summation=93i 1535660299000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 bytesRx_average=272i,bytesTx_average=419i 1535660299000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 write_average=229i,read_average=4i 1535660299000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 run_summation=2277i,ready_summation=118i,usage_average=4.67,used_summation=2546i,demand_average=289i 1535660299000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 bytesRx_average=243i,bytesTx_average=296i 1535660299000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 write_average=158i,read_average=4i 1535660299000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,interface=vmnic0,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=1042i,bytesTx_average=753i,bytesRx_average=660i 1535660299000000000 +vsphere_host_cpu,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 utilization_average=10.46,usage_average=22.4,readiness_average=0.4,costop_summation=2i,coreUtilization_average=19.61,wait_summation=5148518i,idle_summation=58581i,latency_average=0.6,ready_summation=13370i,used_summation=19219i 1535660299000000000 +vsphere_host_cpu,cpu=0,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=25.6,utilization_average=11.58,used_summation=24306i,usage_average=24.26,idle_summation=86688i 1535660299000000000 +vsphere_host_cpu,cpu=1,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=12.29,utilization_average=8.32,used_summation=31312i,usage_average=22.47,idle_summation=94934i 1535660299000000000 +vsphere_host_disk,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 read_average=331i,write_average=2800i 1535660299000000000 +vsphere_host_disk,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 write_average=2701i,read_average=258i 1535660299000000000 +vsphere_host_mem,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=93.27 1535660299000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 bytesTx_average=650i,usage_average=1414i,bytesRx_average=569i 1535660299000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=1,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 utilization_average=12.6,used_summation=25775i,usage_average=24.44,idle_summation=68886i,coreUtilization_average=17.59 1535660299000000000 +vsphere_host_disk,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 read_average=340i,write_average=2340i 1535660299000000000 +vsphere_host_disk,clustername=DC0_C0,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 write_average=2277i,read_average=282i 1535660299000000000 +vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=104.78 1535660299000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesTx_average=463i,usage_average=1131i,bytesRx_average=719i 1535660299000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,interface=vmnic0,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=1668i,bytesTx_average=838i,bytesRx_average=921i 1535660299000000000 +vsphere_host_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 used_summation=28952i,utilization_average=11.36,idle_summation=93261i,latency_average=0.46,ready_summation=12837i,usage_average=21.56,readiness_average=0.39,costop_summation=2i,coreUtilization_average=27.19,wait_summation=3820829i 1535660299000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 coreUtilization_average=24.12,utilization_average=13.83,used_summation=22462i,usage_average=24.69,idle_summation=96993i 1535660299000000000 +internal_vsphere,host=host.example.com,os=Mac,vcenter=localhost:8989 connect_ns=4727607i,discover_ns=65389011i,discovered_objects=8i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=localhost:8989 gather_duration_ns=296223i,gather_count=0i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=192.168.1.151 gather_duration_ns=136050i,gather_count=0i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=localhost:8989 gather_count=62i,gather_duration_ns=8788033i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=192.168.1.151 gather_count=0i,gather_duration_ns=162002i 1535660309000000000 +internal_gather,host=host.example.com,input=vsphere,os=Mac gather_time_ns=17483653i,metrics_gathered=28i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,vcenter=192.168.1.151 connect_ns=0i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=localhost:8989 gather_duration_ns=7291897i,gather_count=36i 1535660309000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=192.168.1.151 gather_duration_ns=958474i,gather_count=0i 1535660309000000000 +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 usage_average=8.82,used_summation=3192i,demand_average=283i,run_summation=2419i,ready_summation=115i 1535660319000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 bytesRx_average=277i,bytesTx_average=343i 1535660319000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 read_average=1i,write_average=741i 1535660319000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 bytesRx_average=386i,bytesTx_average=369i 1535660319000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 write_average=814i,read_average=1i 1535660319000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 run_summation=1778i,ready_summation=111i,usage_average=7.54,used_summation=2339i,demand_average=297i 1535660319000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 usage_average=6.98,used_summation=2125i,demand_average=211i,run_summation=2990i,ready_summation=141i 1535660319000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 bytesRx_average=357i,bytesTx_average=268i 1535660319000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 write_average=528i,read_average=1i 1535660319000000000 +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 used_summation=2374i,demand_average=195i,run_summation=3454i,ready_summation=110i,usage_average=7.34 1535660319000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 bytesRx_average=308i,bytesTx_average=246i 1535660319000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 write_average=1178i,read_average=1i 1535660319000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,interface=vmnic0,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 bytesRx_average=773i,usage_average=1521i,bytesTx_average=890i 1535660319000000000 +vsphere_host_cpu,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 wait_summation=3421258i,idle_summation=67994i,latency_average=0.36,usage_average=29.86,readiness_average=0.37,used_summation=25244i,costop_summation=2i,coreUtilization_average=21.94,utilization_average=17.19,ready_summation=15897i 1535660319000000000 +vsphere_host_cpu,cpu=0,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 utilization_average=11.32,used_summation=19333i,usage_average=14.29,idle_summation=92708i,coreUtilization_average=27.68 1535660319000000000 +vsphere_host_cpu,cpu=1,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 used_summation=28596i,usage_average=25.32,idle_summation=79553i,coreUtilization_average=28.01,utilization_average=11.33 1535660319000000000 +vsphere_host_disk,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 read_average=86i,write_average=1659i 1535660319000000000 +vsphere_host_disk,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 write_average=1997i,read_average=58i 1535660319000000000 +vsphere_host_mem,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=68.45 1535660319000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 bytesTx_average=679i,usage_average=2286i,bytesRx_average=719i 1535660319000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=1,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 utilization_average=10.52,used_summation=21693i,usage_average=23.09,idle_summation=84590i,coreUtilization_average=29.92 1535660319000000000 +vsphere_host_disk,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 read_average=113i,write_average=1236i 1535660319000000000 +vsphere_host_disk,clustername=DC0_C0,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 write_average=1708i,read_average=110i 1535660319000000000 +vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=111.46 1535660319000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesTx_average=998i,usage_average=2000i,bytesRx_average=881i 1535660319000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,interface=vmnic0,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=1683i,bytesTx_average=675i,bytesRx_average=1078i 1535660319000000000 +vsphere_host_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 used_summation=28531i,wait_summation=3139129i,utilization_average=9.99,idle_summation=98579i,latency_average=0.51,costop_summation=2i,coreUtilization_average=14.35,ready_summation=16121i,usage_average=34.19,readiness_average=0.4 1535660319000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 utilization_average=12.2,used_summation=22750i,usage_average=18.84,idle_summation=99539i,coreUtilization_average=23.05 1535660319000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=localhost:8989 gather_duration_ns=7076543i,gather_count=62i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=host,vcenter=192.168.1.151 gather_duration_ns=4051303i,gather_count=0i 1535660339000000000 +internal_gather,host=host.example.com,input=vsphere,os=Mac metrics_gathered=56i,gather_time_ns=13555029i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,vcenter=192.168.1.151 connect_ns=0i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=localhost:8989 gather_duration_ns=6335467i,gather_count=36i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=192.168.1.151 gather_duration_ns=958474i,gather_count=0i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,vcenter=localhost:8989 discover_ns=65389011i,discovered_objects=8i,connect_ns=4727607i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=datastore,vcenter=localhost:8989 gather_duration_ns=296223i,gather_count=0i 1535660339000000000 +internal_vsphere,host=host.example.com,os=Mac,resourcetype=vm,vcenter=192.168.1.151 gather_count=0i,gather_duration_ns=1540920i 1535660339000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 write_average=302i,read_average=11i 1535660339000000000 +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 usage_average=5.58,used_summation=2941i,demand_average=298i,run_summation=3255i,ready_summation=96i 1535660339000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-35,os=Mac,source=DC0_H0_VM0,vcenter=localhost:8989,vmname=DC0_H0_VM0 bytesRx_average=155i,bytesTx_average=241i 1535660339000000000 +vsphere_vm_cpu,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 usage_average=10.3,used_summation=3053i,demand_average=346i,run_summation=3289i,ready_summation=122i 1535660339000000000 +vsphere_vm_net,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 bytesRx_average=215i,bytesTx_average=275i 1535660339000000000 +vsphere_vm_virtualDisk,esxhostname=DC0_H0,guest=other,host=host.example.com,moid=vm-38,os=Mac,source=DC0_H0_VM1,vcenter=localhost:8989,vmname=DC0_H0_VM1 write_average=252i,read_average=14i 1535660339000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 usage_average=8,used_summation=2183i,demand_average=354i,run_summation=3542i,ready_summation=128i 1535660339000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 bytesRx_average=178i,bytesTx_average=200i 1535660339000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-41,os=Mac,source=DC0_C0_RP0_VM0,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM0 write_average=283i,read_average=12i 1535660339000000000 +vsphere_vm_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 demand_average=328i,run_summation=3481i,ready_summation=122i,usage_average=7.95,used_summation=2167i 1535660339000000000 +vsphere_vm_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 bytesTx_average=282i,bytesRx_average=196i 1535660339000000000 +vsphere_vm_virtualDisk,clustername=DC0_C0,esxhostname=DC0_C0_H0,guest=other,host=host.example.com,moid=vm-44,os=Mac,source=DC0_C0_RP0_VM1,vcenter=localhost:8989,vmname=DC0_C0_RP0_VM1 write_average=321i,read_average=13i 1535660339000000000 +vsphere_host_disk,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 read_average=39i,write_average=2635i 1535660339000000000 +vsphere_host_disk,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 write_average=2635i,read_average=30i 1535660339000000000 +vsphere_host_mem,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=98.5 1535660339000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=1887i,bytesRx_average=662i,bytesTx_average=251i 1535660339000000000 +vsphere_host_net,esxhostname=DC0_H0,host=host.example.com,interface=vmnic0,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 usage_average=1481i,bytesTx_average=899i,bytesRx_average=992i 1535660339000000000 +vsphere_host_cpu,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 used_summation=50405i,costop_summation=2i,utilization_average=17.32,latency_average=0.61,ready_summation=14843i,usage_average=27.94,coreUtilization_average=32.12,wait_summation=3058787i,idle_summation=56600i,readiness_average=0.36 1535660339000000000 +vsphere_host_cpu,cpu=0,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=37.61,utilization_average=17.05,used_summation=38013i,usage_average=32.66,idle_summation=89575i 1535660339000000000 +vsphere_host_cpu,cpu=1,esxhostname=DC0_H0,host=host.example.com,moid=host-19,os=Mac,source=DC0_H0,vcenter=localhost:8989 coreUtilization_average=25.92,utilization_average=18.72,used_summation=39790i,usage_average=40.42,idle_summation=69457i 1535660339000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,interface=vmnic0,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=1246i,bytesTx_average=673i,bytesRx_average=781i 1535660339000000000 +vsphere_host_cpu,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 coreUtilization_average=33.8,idle_summation=77121i,ready_summation=15857i,readiness_average=0.39,used_summation=29554i,costop_summation=2i,wait_summation=4338417i,utilization_average=17.87,latency_average=0.44,usage_average=28.78 1535660339000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 idle_summation=86610i,coreUtilization_average=34.36,utilization_average=19.03,used_summation=28766i,usage_average=23.72 1535660339000000000 +vsphere_host_cpu,clustername=DC0_C0,cpu=1,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 coreUtilization_average=33.15,utilization_average=16.8,used_summation=44282i,usage_average=30.08,idle_summation=93490i 1535660339000000000 +vsphere_host_disk,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 read_average=56i,write_average=1672i 1535660339000000000 +vsphere_host_disk,clustername=DC0_C0,disk=/var/folders/rf/txwdm4pj409f70wnkdlp7sz80000gq/T/govcsim-DC0-LocalDS_0-367088371@folder-5,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 write_average=2110i,read_average=48i 1535660339000000000 +vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=116.21 1535660339000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesRx_average=726i,bytesTx_average=643i,usage_average=1504i 1535660339000000000 +vsphere_host_mem,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 usage_average=116.21 1535660339000000000 +vsphere_host_net,clustername=DC0_C0,esxhostname=DC0_C0_H0,host=host.example.com,moid=host-30,os=Mac,source=DC0_C0_H0,vcenter=localhost:8989 bytesRx_average=726i,bytesTx_average=643i,usage_average=1504i 1535660339000000000 +``` diff --git a/plugins/inputs/vsphere/client.go b/plugins/inputs/vsphere/client.go new file mode 100644 index 000000000..b3096f7be --- /dev/null +++ b/plugins/inputs/vsphere/client.go @@ -0,0 +1,323 @@ +package vsphere + +import ( + "context" + "crypto/tls" + "fmt" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/performance" + "github.com/vmware/govmomi/session" + "github.com/vmware/govmomi/view" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/soap" + "github.com/vmware/govmomi/vim25/types" +) + +// The highest number of metrics we can query for, no matter what settings +// and server say. +const absoluteMaxMetrics = 10000 + +// ClientFactory is used to obtain Clients to be used throughout the plugin. Typically, +// a single Client is reused across all functions and goroutines, but the client +// is periodically recycled to avoid authentication expiration issues. +type ClientFactory struct { + client *Client + mux sync.Mutex + url *url.URL + parent *VSphere +} + +// Client represents a connection to vSphere and is backed by a govmomi connection +type Client struct { + Client *govmomi.Client + Views *view.Manager + Root *view.ContainerView + Perf *performance.Manager + Valid bool + Timeout time.Duration + closeGate sync.Once + log telegraf.Logger +} + +// NewClientFactory creates a new ClientFactory and prepares it for use. +func NewClientFactory(ctx context.Context, url *url.URL, parent *VSphere) *ClientFactory { + return &ClientFactory{ + client: nil, + parent: parent, + url: url, + } +} + +// GetClient returns a client. The caller is responsible for calling Release() +// on the client once it's done using it. +func (cf *ClientFactory) GetClient(ctx context.Context) (*Client, error) { + cf.mux.Lock() + defer cf.mux.Unlock() + retrying := false + for { + if cf.client == nil { + var err error + if cf.client, err = NewClient(ctx, cf.url, cf.parent); err != nil { + return nil, err + } + } + + // Execute a dummy call against the server to make sure the client is + // still functional. If not, try to log back in. If that doesn't work, + // we give up. + ctx1, cancel1 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) + defer cancel1() + if _, err := methods.GetCurrentTime(ctx1, cf.client.Client); err != nil { + cf.parent.Log.Info("Client session seems to have time out. Reauthenticating!") + ctx2, cancel2 := context.WithTimeout(ctx, cf.parent.Timeout.Duration) + defer cancel2() + if err := cf.client.Client.SessionManager.Login(ctx2, url.UserPassword(cf.parent.Username, cf.parent.Password)); err != nil { + if !retrying { + // The client went stale. Probably because someone rebooted vCenter. Clear it to + // force us to create a fresh one. We only get one chance at this. If we fail a second time + // we will simply skip this collection round and hope things have stabilized for the next one. + retrying = true + cf.client = nil + continue + } + return nil, fmt.Errorf("renewing authentication failed: %s", err.Error()) + } + } + + return cf.client, nil + } +} + +// NewClient creates a new vSphere client based on the url and setting passed as parameters. +func NewClient(ctx context.Context, u *url.URL, vs *VSphere) (*Client, error) { + sw := NewStopwatch("connect", u.Host) + defer sw.Stop() + + tlsCfg, err := vs.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + // Use a default TLS config if it's missing + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + if vs.Username != "" { + u.User = url.UserPassword(vs.Username, vs.Password) + } + + vs.Log.Debugf("Creating client: %s", u.Host) + soapClient := soap.NewClient(u, tlsCfg.InsecureSkipVerify) + + // Add certificate if we have it. Use it to log us in. + if tlsCfg != nil && len(tlsCfg.Certificates) > 0 { + soapClient.SetCertificate(tlsCfg.Certificates[0]) + } + + // Set up custom CA chain if specified. We need to do this before we create the vim25 client, + // since it might fail on missing CA chains otherwise. + if vs.TLSCA != "" { + if err := soapClient.SetRootCAs(vs.TLSCA); err != nil { + return nil, err + } + } + + ctx1, cancel1 := context.WithTimeout(ctx, vs.Timeout.Duration) + defer cancel1() + vimClient, err := vim25.NewClient(ctx1, soapClient) + if err != nil { + return nil, err + } + sm := session.NewManager(vimClient) + + // If TSLKey is specified, try to log in as an extension using a cert. + if vs.TLSKey != "" { + ctx2, cancel2 := context.WithTimeout(ctx, vs.Timeout.Duration) + defer cancel2() + if err := sm.LoginExtensionByCertificate(ctx2, vs.TLSKey); err != nil { + return nil, err + } + } + + // Create the govmomi client. + c := &govmomi.Client{ + Client: vimClient, + SessionManager: sm, + } + + // Only login if the URL contains user information. + if u.User != nil { + if err := c.Login(ctx, u.User); err != nil { + return nil, err + } + } + + c.Timeout = vs.Timeout.Duration + m := view.NewManager(c.Client) + + v, err := m.CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{}, true) + if err != nil { + return nil, err + } + + p := performance.NewManager(c.Client) + + client := &Client{ + log: vs.Log, + Client: c, + Views: m, + Root: v, + Perf: p, + Valid: true, + Timeout: vs.Timeout.Duration, + } + // Adjust max query size if needed + ctx3, cancel3 := context.WithTimeout(ctx, vs.Timeout.Duration) + defer cancel3() + n, err := client.GetMaxQueryMetrics(ctx3) + if err != nil { + return nil, err + } + vs.Log.Debugf("vCenter says max_query_metrics should be %d", n) + if n < vs.MaxQueryMetrics { + vs.Log.Warnf("Configured max_query_metrics is %d, but server limits it to %d. Reducing.", vs.MaxQueryMetrics, n) + vs.MaxQueryMetrics = n + } + return client, nil +} + +// Close shuts down a ClientFactory and releases any resources associated with it. +func (cf *ClientFactory) Close() { + cf.mux.Lock() + defer cf.mux.Unlock() + if cf.client != nil { + cf.client.close() + } +} + +func (c *Client) close() { + // Use a Once to prevent us from panics stemming from trying + // to close it multiple times. + c.closeGate.Do(func() { + ctx, cancel := context.WithTimeout(context.Background(), c.Timeout) + defer cancel() + if c.Client != nil { + if err := c.Client.Logout(ctx); err != nil { + c.log.Errorf("Logout: %s", err.Error()) + } + } + }) +} + +// GetServerTime returns the time at the vCenter server +func (c *Client) GetServerTime(ctx context.Context) (time.Time, error) { + ctx, cancel := context.WithTimeout(ctx, c.Timeout) + defer cancel() + t, err := methods.GetCurrentTime(ctx, c.Client) + if err != nil { + return time.Time{}, err + } + return *t, nil +} + +// GetMaxQueryMetrics returns the max_query_metrics setting as configured in vCenter +func (c *Client) GetMaxQueryMetrics(ctx context.Context) (int, error) { + ctx, cancel := context.WithTimeout(ctx, c.Timeout) + defer cancel() + + om := object.NewOptionManager(c.Client.Client, *c.Client.Client.ServiceContent.Setting) + res, err := om.Query(ctx, "config.vpxd.stats.maxQueryMetrics") + if err == nil { + if len(res) > 0 { + if s, ok := res[0].GetOptionValue().Value.(string); ok { + v, err := strconv.Atoi(s) + if err == nil { + c.log.Debugf("vCenter maxQueryMetrics is defined: %d", v) + if v == -1 { + // Whatever the server says, we never ask for more metrics than this. + return absoluteMaxMetrics, nil + } + return v, nil + } + } + // Fall through version-based inference if value isn't usable + } + } else { + c.log.Debug("Option query for maxQueryMetrics failed. Using default") + } + + // No usable maxQueryMetrics setting. Infer based on version + ver := c.Client.Client.ServiceContent.About.Version + parts := strings.Split(ver, ".") + if len(parts) < 2 { + c.log.Warnf("vCenter returned an invalid version string: %s. Using default query size=64", ver) + return 64, nil + } + c.log.Debugf("vCenter version is: %s", ver) + major, err := strconv.Atoi(parts[0]) + if err != nil { + return 0, err + } + if major < 6 || major == 6 && parts[1] == "0" { + return 64, nil + } + return 256, nil +} + +// QueryMetrics wraps performance.Query to give it proper timeouts +func (c *Client) QueryMetrics(ctx context.Context, pqs []types.PerfQuerySpec) ([]performance.EntityMetric, error) { + ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) + defer cancel1() + metrics, err := c.Perf.Query(ctx1, pqs) + if err != nil { + return nil, err + } + + ctx2, cancel2 := context.WithTimeout(ctx, c.Timeout) + defer cancel2() + return c.Perf.ToMetricSeries(ctx2, metrics) +} + +// CounterInfoByName wraps performance.CounterInfoByName to give it proper timeouts +func (c *Client) CounterInfoByName(ctx context.Context) (map[string]*types.PerfCounterInfo, error) { + ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) + defer cancel1() + return c.Perf.CounterInfoByName(ctx1) +} + +// CounterInfoByKey wraps performance.CounterInfoByKey to give it proper timeouts +func (c *Client) CounterInfoByKey(ctx context.Context) (map[int32]*types.PerfCounterInfo, error) { + ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) + defer cancel1() + return c.Perf.CounterInfoByKey(ctx1) +} + +// ListResources wraps property.Collector.Retrieve to give it proper timeouts +func (c *Client) ListResources(ctx context.Context, root *view.ContainerView, kind []string, ps []string, dst interface{}) error { + ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) + defer cancel1() + return root.Retrieve(ctx1, kind, ps, dst) +} + +func (c *Client) GetCustomFields(ctx context.Context) (map[int32]string, error) { + ctx1, cancel1 := context.WithTimeout(ctx, c.Timeout) + defer cancel1() + cfm := object.NewCustomFieldsManager(c.Client.Client) + fields, err := cfm.Field(ctx1) + if err != nil { + return nil, err + } + r := make(map[int32]string) + for _, f := range fields { + r[f.Key] = f.Name + } + return r, nil +} diff --git a/plugins/inputs/vsphere/endpoint.go b/plugins/inputs/vsphere/endpoint.go new file mode 100644 index 000000000..93d74e63f --- /dev/null +++ b/plugins/inputs/vsphere/endpoint.go @@ -0,0 +1,1289 @@ +package vsphere + +import ( + "context" + "errors" + "fmt" + "math" + "math/rand" + "net/url" + "regexp" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/influxdata/telegraf/filter" + + "github.com/influxdata/telegraf" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/performance" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +var isolateLUN = regexp.MustCompile(".*/([^/]+)/?$") + +var isIPv4 = regexp.MustCompile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$") + +var isIPv6 = regexp.MustCompile("^(?:[A-Fa-f0-9]{0,4}:){1,7}[A-Fa-f0-9]{1,4}$") + +const metricLookback = 3 // Number of time periods to look back at for non-realtime metrics + +const rtMetricLookback = 3 // Number of time periods to look back at for realtime metrics + +const maxSampleConst = 10 // Absolute maximum number of samples regardless of period + +const maxMetadataSamples = 100 // Number of resources to sample for metric metadata + +const hwMarkTTL = time.Duration(4 * time.Hour) + +type queryChunk []types.PerfQuerySpec + +type queryJob func(queryChunk) + +// Endpoint is a high-level representation of a connected vCenter endpoint. It is backed by the lower +// level Client type. +type Endpoint struct { + Parent *VSphere + URL *url.URL + resourceKinds map[string]*resourceKind + hwMarks *TSCache + lun2ds map[string]string + discoveryTicker *time.Ticker + collectMux sync.RWMutex + initialized bool + clientFactory *ClientFactory + busy sync.Mutex + customFields map[int32]string + customAttrFilter filter.Filter + customAttrEnabled bool + metricNameLookup map[int32]string + metricNameMux sync.RWMutex + log telegraf.Logger +} + +type resourceKind struct { + name string + vcName string + pKey string + parentTag string + enabled bool + realTime bool + sampling int32 + objects objectMap + filters filter.Filter + paths []string + excludePaths []string + collectInstances bool + getObjects func(context.Context, *Endpoint, *ResourceFilter) (objectMap, error) + include []string + simple bool + metrics performance.MetricList + parent string + latestSample time.Time + lastColl time.Time +} + +type metricEntry struct { + tags map[string]string + name string + ts time.Time + fields map[string]interface{} +} + +type objectMap map[string]*objectRef + +type objectRef struct { + name string + altID string + ref types.ManagedObjectReference + parentRef *types.ManagedObjectReference //Pointer because it must be nillable + guest string + dcname string + customValues map[string]string + lookup map[string]string +} + +func (e *Endpoint) getParent(obj *objectRef, res *resourceKind) (*objectRef, bool) { + if pKind, ok := e.resourceKinds[res.parent]; ok { + if p, ok := pKind.objects[obj.parentRef.Value]; ok { + return p, true + } + } + return nil, false +} + +// NewEndpoint returns a new connection to a vCenter based on the URL and configuration passed +// as parameters. +func NewEndpoint(ctx context.Context, parent *VSphere, url *url.URL, log telegraf.Logger) (*Endpoint, error) { + e := Endpoint{ + URL: url, + Parent: parent, + hwMarks: NewTSCache(hwMarkTTL), + lun2ds: make(map[string]string), + initialized: false, + clientFactory: NewClientFactory(ctx, url, parent), + customAttrFilter: newFilterOrPanic(parent.CustomAttributeInclude, parent.CustomAttributeExclude), + customAttrEnabled: anythingEnabled(parent.CustomAttributeExclude), + log: log, + } + + e.resourceKinds = map[string]*resourceKind{ + "datacenter": { + name: "datacenter", + vcName: "Datacenter", + pKey: "dcname", + parentTag: "", + enabled: anythingEnabled(parent.DatacenterMetricExclude), + realTime: false, + sampling: 300, + objects: make(objectMap), + filters: newFilterOrPanic(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), + paths: parent.DatacenterInclude, + excludePaths: parent.DatacenterExclude, + simple: isSimple(parent.DatacenterMetricInclude, parent.DatacenterMetricExclude), + include: parent.DatacenterMetricInclude, + collectInstances: parent.DatacenterInstances, + getObjects: getDatacenters, + parent: "", + }, + "cluster": { + name: "cluster", + vcName: "ClusterComputeResource", + pKey: "clustername", + parentTag: "dcname", + enabled: anythingEnabled(parent.ClusterMetricExclude), + realTime: false, + sampling: 300, + objects: make(objectMap), + filters: newFilterOrPanic(parent.ClusterMetricInclude, parent.ClusterMetricExclude), + paths: parent.ClusterInclude, + excludePaths: parent.ClusterExclude, + simple: isSimple(parent.ClusterMetricInclude, parent.ClusterMetricExclude), + include: parent.ClusterMetricInclude, + collectInstances: parent.ClusterInstances, + getObjects: getClusters, + parent: "datacenter", + }, + "host": { + name: "host", + vcName: "HostSystem", + pKey: "esxhostname", + parentTag: "clustername", + enabled: anythingEnabled(parent.HostMetricExclude), + realTime: true, + sampling: 20, + objects: make(objectMap), + filters: newFilterOrPanic(parent.HostMetricInclude, parent.HostMetricExclude), + paths: parent.HostInclude, + excludePaths: parent.HostExclude, + simple: isSimple(parent.HostMetricInclude, parent.HostMetricExclude), + include: parent.HostMetricInclude, + collectInstances: parent.HostInstances, + getObjects: getHosts, + parent: "cluster", + }, + "vm": { + name: "vm", + vcName: "VirtualMachine", + pKey: "vmname", + parentTag: "esxhostname", + enabled: anythingEnabled(parent.VMMetricExclude), + realTime: true, + sampling: 20, + objects: make(objectMap), + filters: newFilterOrPanic(parent.VMMetricInclude, parent.VMMetricExclude), + paths: parent.VMInclude, + excludePaths: parent.VMExclude, + simple: isSimple(parent.VMMetricInclude, parent.VMMetricExclude), + include: parent.VMMetricInclude, + collectInstances: parent.VMInstances, + getObjects: getVMs, + parent: "host", + }, + "datastore": { + name: "datastore", + vcName: "Datastore", + pKey: "dsname", + enabled: anythingEnabled(parent.DatastoreMetricExclude), + realTime: false, + sampling: 300, + objects: make(objectMap), + filters: newFilterOrPanic(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), + paths: parent.DatastoreInclude, + excludePaths: parent.DatastoreExclude, + simple: isSimple(parent.DatastoreMetricInclude, parent.DatastoreMetricExclude), + include: parent.DatastoreMetricInclude, + collectInstances: parent.DatastoreInstances, + getObjects: getDatastores, + parent: "", + }, + } + + // Start discover and other goodness + err := e.init(ctx) + + return &e, err +} + +func anythingEnabled(ex []string) bool { + for _, s := range ex { + if s == "*" { + return false + } + } + return true +} + +func newFilterOrPanic(include []string, exclude []string) filter.Filter { + f, err := filter.NewIncludeExcludeFilter(include, exclude) + if err != nil { + panic(fmt.Sprintf("Include/exclude filters are invalid: %s", err)) + } + return f +} + +func isSimple(include []string, exclude []string) bool { + if len(exclude) > 0 || len(include) == 0 { + return false + } + for _, s := range include { + if strings.Contains(s, "*") { + return false + } + } + return true +} + +func (e *Endpoint) startDiscovery(ctx context.Context) { + e.discoveryTicker = time.NewTicker(e.Parent.ObjectDiscoveryInterval.Duration) + go func() { + for { + select { + case <-e.discoveryTicker.C: + err := e.discover(ctx) + if err != nil && err != context.Canceled { + e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) + } + case <-ctx.Done(): + e.log.Debugf("Exiting discovery goroutine for %s", e.URL.Host) + e.discoveryTicker.Stop() + return + } + } + }() +} + +func (e *Endpoint) initalDiscovery(ctx context.Context) { + err := e.discover(ctx) + if err != nil && err != context.Canceled { + e.log.Errorf("Discovery for %s: %s", e.URL.Host, err.Error()) + } + e.startDiscovery(ctx) +} + +func (e *Endpoint) init(ctx context.Context) error { + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return err + } + + // Initial load of custom field metadata + if e.customAttrEnabled { + fields, err := client.GetCustomFields(ctx) + if err != nil { + e.log.Warn("Could not load custom field metadata") + } else { + e.customFields = fields + } + } + + if e.Parent.ObjectDiscoveryInterval.Duration > 0 { + e.Parent.Log.Debug("Running initial discovery") + e.initalDiscovery(ctx) + } + e.initialized = true + return nil +} + +func (e *Endpoint) getMetricNameForId(id int32) string { + e.metricNameMux.RLock() + defer e.metricNameMux.RUnlock() + return e.metricNameLookup[id] +} + +func (e *Endpoint) reloadMetricNameMap(ctx context.Context) error { + e.metricNameMux.Lock() + defer e.metricNameMux.Unlock() + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return err + } + + mn, err := client.CounterInfoByName(ctx) + if err != nil { + return err + } + e.metricNameLookup = make(map[int32]string) + for name, m := range mn { + e.metricNameLookup[m.Key] = name + } + return nil +} + +func (e *Endpoint) getMetadata(ctx context.Context, obj *objectRef, sampling int32) (performance.MetricList, error) { + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return nil, err + } + + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + metrics, err := client.Perf.AvailableMetric(ctx1, obj.ref.Reference(), sampling) + if err != nil { + return nil, err + } + return metrics, nil +} + +func (e *Endpoint) getDatacenterName(ctx context.Context, client *Client, cache map[string]string, r types.ManagedObjectReference) string { + path := make([]string, 0) + returnVal := "" + here := r + done := false + for !done { + done = func() bool { + if name, ok := cache[here.Reference().String()]; ok { + // Populate cache for the entire chain of objects leading here. + returnVal = name + return true + } + path = append(path, here.Reference().String()) + o := object.NewCommon(client.Client.Client, r) + var result mo.ManagedEntity + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := o.Properties(ctx1, here, []string{"parent", "name"}, &result) + if err != nil { + e.Parent.Log.Warnf("Error while resolving parent. Assuming no parent exists. Error: %s", err.Error()) + return true + } + if result.Reference().Type == "Datacenter" { + // Populate cache for the entire chain of objects leading here. + returnVal = result.Name + return true + } + if result.Parent == nil { + e.Parent.Log.Debugf("No parent found for %s (ascending from %s)", here.Reference(), r.Reference()) + return true + } + here = result.Parent.Reference() + return false + }() + } + for _, s := range path { + cache[s] = returnVal + } + return returnVal +} + +func (e *Endpoint) discover(ctx context.Context) error { + e.busy.Lock() + defer e.busy.Unlock() + if ctx.Err() != nil { + return ctx.Err() + } + + err := e.reloadMetricNameMap(ctx) + if err != nil { + return err + } + + sw := NewStopwatch("discover", e.URL.Host) + + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return err + } + + e.log.Debugf("Discover new objects for %s", e.URL.Host) + dcNameCache := make(map[string]string) + + numRes := int64(0) + + // Populate resource objects, and endpoint instance info. + newObjects := make(map[string]objectMap) + for k, res := range e.resourceKinds { + e.log.Debugf("Discovering resources for %s", res.name) + // Need to do this for all resource types even if they are not enabled + if res.enabled || k != "vm" { + rf := ResourceFilter{ + finder: &Finder{client}, + resType: res.vcName, + paths: res.paths, + excludePaths: res.excludePaths} + + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + objects, err := res.getObjects(ctx1, e, &rf) + cancel1() + if err != nil { + return err + } + + // Fill in datacenter names where available (no need to do it for Datacenters) + if res.name != "Datacenter" { + for k, obj := range objects { + if obj.parentRef != nil { + obj.dcname = e.getDatacenterName(ctx, client, dcNameCache, *obj.parentRef) + objects[k] = obj + } + } + } + + // No need to collect metric metadata if resource type is not enabled + if res.enabled { + if res.simple { + e.simpleMetadataSelect(ctx, client, res) + } else { + e.complexMetadataSelect(ctx, res, objects) + } + newObjects[k] = objects + + SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": res.name}, int64(len(objects))) + numRes += int64(len(objects)) + } + } + if err != nil { + e.log.Error(err) + } + } + + // Build lun2ds map + dss := newObjects["datastore"] + l2d := make(map[string]string) + for _, ds := range dss { + lunId := ds.altID + m := isolateLUN.FindStringSubmatch(lunId) + if m != nil { + l2d[m[1]] = ds.name + } + } + + // Load custom field metadata + var fields map[int32]string + if e.customAttrEnabled { + fields, err = client.GetCustomFields(ctx) + if err != nil { + e.log.Warn("Could not load custom field metadata") + fields = nil + } + } + + // Atomically swap maps + e.collectMux.Lock() + defer e.collectMux.Unlock() + + for k, v := range newObjects { + e.resourceKinds[k].objects = v + } + e.lun2ds = l2d + + if fields != nil { + e.customFields = fields + } + + sw.Stop() + SendInternalCounterWithTags("discovered_objects", e.URL.Host, map[string]string{"type": "instance-total"}, numRes) + return nil +} + +func (e *Endpoint) simpleMetadataSelect(ctx context.Context, client *Client, res *resourceKind) { + e.log.Debugf("Using fast metric metadata selection for %s", res.name) + m, err := client.CounterInfoByName(ctx) + if err != nil { + e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error()) + return + } + res.metrics = make(performance.MetricList, 0, len(res.include)) + for _, s := range res.include { + if pci, ok := m[s]; ok { + cnt := types.PerfMetricId{ + CounterId: pci.Key, + } + if res.collectInstances { + cnt.Instance = "*" + } else { + cnt.Instance = "" + } + res.metrics = append(res.metrics, cnt) + } else { + e.log.Warnf("Metric name %s is unknown. Will not be collected", s) + } + } +} + +func (e *Endpoint) complexMetadataSelect(ctx context.Context, res *resourceKind, objects objectMap) { + // We're only going to get metadata from maxMetadataSamples resources. If we have + // more resources than that, we pick maxMetadataSamples samples at random. + sampledObjects := make([]*objectRef, len(objects)) + i := 0 + for _, obj := range objects { + sampledObjects[i] = obj + i++ + } + n := len(sampledObjects) + if n > maxMetadataSamples { + // Shuffle samples into the maxMetadataSamples positions + for i := 0; i < maxMetadataSamples; i++ { + j := int(rand.Int31n(int32(i + 1))) + t := sampledObjects[i] + sampledObjects[i] = sampledObjects[j] + sampledObjects[j] = t + } + sampledObjects = sampledObjects[0:maxMetadataSamples] + } + + instInfoMux := sync.Mutex{} + te := NewThrottledExecutor(e.Parent.DiscoverConcurrency) + for _, obj := range sampledObjects { + func(obj *objectRef) { + te.Run(ctx, func() { + metrics, err := e.getMetadata(ctx, obj, res.sampling) + if err != nil { + e.log.Errorf("Getting metric metadata. Discovery will be incomplete. Error: %s", err.Error()) + } + mMap := make(map[string]types.PerfMetricId) + for _, m := range metrics { + if m.Instance != "" && res.collectInstances { + m.Instance = "*" + } else { + m.Instance = "" + } + if res.filters.Match(e.getMetricNameForId(m.CounterId)) { + mMap[strconv.Itoa(int(m.CounterId))+"|"+m.Instance] = m + } + } + e.log.Debugf("Found %d metrics for %s", len(mMap), obj.name) + instInfoMux.Lock() + defer instInfoMux.Unlock() + if len(mMap) > len(res.metrics) { + res.metrics = make(performance.MetricList, len(mMap)) + i := 0 + for _, m := range mMap { + res.metrics[i] = m + i++ + } + } + }) + }(obj) + } + te.Wait() +} + +func getDatacenters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { + var resources []mo.Datacenter + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := filter.FindAll(ctx1, &resources) + if err != nil { + return nil, err + } + m := make(objectMap, len(resources)) + for _, r := range resources { + m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ + name: r.Name, + ref: r.ExtensibleManagedObject.Reference(), + parentRef: r.Parent, + dcname: r.Name, + customValues: e.loadCustomAttributes(&r.ManagedEntity), + } + } + return m, nil +} + +func getClusters(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { + var resources []mo.ClusterComputeResource + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := filter.FindAll(ctx1, &resources) + if err != nil { + return nil, err + } + cache := make(map[string]*types.ManagedObjectReference) + m := make(objectMap, len(resources)) + for _, r := range resources { + // Wrap in a function to make defer work correctly. + err := func() error { + // We're not interested in the immediate parent (a folder), but the data center. + p, ok := cache[r.Parent.Value] + if !ok { + ctx2, cancel2 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel2() + client, err := e.clientFactory.GetClient(ctx2) + if err != nil { + return err + } + o := object.NewFolder(client.Client.Client, *r.Parent) + var folder mo.Folder + ctx3, cancel3 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel3() + err = o.Properties(ctx3, *r.Parent, []string{"parent"}, &folder) + if err != nil { + e.Parent.Log.Warnf("Error while getting folder parent: %s", err.Error()) + p = nil + } else { + pp := folder.Parent.Reference() + p = &pp + cache[r.Parent.Value] = p + } + } + return nil + }() + if err != nil { + return nil, err + } + } + return m, nil +} + +//noinspection GoUnusedParameter +func getHosts(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { + var resources []mo.HostSystem + err := filter.FindAll(ctx, &resources) + if err != nil { + return nil, err + } + m := make(objectMap) + for _, r := range resources { + m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ + name: r.Name, + ref: r.ExtensibleManagedObject.Reference(), + parentRef: r.Parent, + customValues: e.loadCustomAttributes(&r.ManagedEntity), + } + } + return m, nil +} + +func getVMs(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { + var resources []mo.VirtualMachine + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := filter.FindAll(ctx1, &resources) + if err != nil { + return nil, err + } + m := make(objectMap) + for _, r := range resources { + if r.Runtime.PowerState != "poweredOn" { + continue + } + guest := "unknown" + uuid := "" + lookup := make(map[string]string) + + // Extract host name + if r.Guest != nil && r.Guest.HostName != "" { + lookup["guesthostname"] = r.Guest.HostName + } + + // Collect network information + for _, net := range r.Guest.Net { + if net.DeviceConfigId == -1 { + continue + } + if net.IpConfig == nil || net.IpConfig.IpAddress == nil { + continue + } + ips := make(map[string][]string) + for _, ip := range net.IpConfig.IpAddress { + addr := ip.IpAddress + for _, ipType := range e.Parent.IpAddresses { + if !(ipType == "ipv4" && isIPv4.MatchString(addr) || + ipType == "ipv6" && isIPv6.MatchString(addr)) { + continue + } + + // By convention, we want the preferred addresses to appear first in the array. + if _, ok := ips[ipType]; !ok { + ips[ipType] = make([]string, 0) + } + if ip.State == "preferred" { + ips[ipType] = append([]string{addr}, ips[ipType]...) + } else { + ips[ipType] = append(ips[ipType], addr) + } + } + } + for ipType, ipList := range ips { + lookup["nic/"+strconv.Itoa(int(net.DeviceConfigId))+"/"+ipType] = strings.Join(ipList, ",") + } + } + + // Sometimes Config is unknown and returns a nil pointer + if r.Config != nil { + guest = cleanGuestID(r.Config.GuestId) + uuid = r.Config.Uuid + } + cvs := make(map[string]string) + if e.customAttrEnabled { + for _, cv := range r.Summary.CustomValue { + val := cv.(*types.CustomFieldStringValue) + if val.Value == "" { + continue + } + key, ok := e.customFields[val.Key] + if !ok { + e.log.Warnf("Metadata for custom field %d not found. Skipping", val.Key) + continue + } + if e.customAttrFilter.Match(key) { + cvs[key] = val.Value + } + } + } + m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ + name: r.Name, + ref: r.ExtensibleManagedObject.Reference(), + parentRef: r.Runtime.Host, + guest: guest, + altID: uuid, + customValues: e.loadCustomAttributes(&r.ManagedEntity), + lookup: lookup, + } + } + return m, nil +} + +func getDatastores(ctx context.Context, e *Endpoint, filter *ResourceFilter) (objectMap, error) { + var resources []mo.Datastore + ctx1, cancel1 := context.WithTimeout(ctx, e.Parent.Timeout.Duration) + defer cancel1() + err := filter.FindAll(ctx1, &resources) + if err != nil { + return nil, err + } + m := make(objectMap) + for _, r := range resources { + lunId := "" + if r.Info != nil { + info := r.Info.GetDatastoreInfo() + if info != nil { + lunId = info.Url + } + } + m[r.ExtensibleManagedObject.Reference().Value] = &objectRef{ + name: r.Name, + ref: r.ExtensibleManagedObject.Reference(), + parentRef: r.Parent, + altID: lunId, + customValues: e.loadCustomAttributes(&r.ManagedEntity), + } + } + return m, nil +} + +func (e *Endpoint) loadCustomAttributes(entity *mo.ManagedEntity) map[string]string { + if !e.customAttrEnabled { + return map[string]string{} + } + cvs := make(map[string]string) + for _, v := range entity.CustomValue { + cv, ok := v.(*types.CustomFieldStringValue) + if !ok { + e.Parent.Log.Warnf("Metadata for custom field %d not of string type. Skipping", cv.Key) + continue + } + key, ok := e.customFields[cv.Key] + if !ok { + e.Parent.Log.Warnf("Metadata for custom field %d not found. Skipping", cv.Key) + continue + } + if e.customAttrFilter.Match(key) { + cvs[key] = cv.Value + } + } + return cvs +} + +// Close shuts down an Endpoint and releases any resources associated with it. +func (e *Endpoint) Close() { + e.clientFactory.Close() +} + +// Collect runs a round of data collections as specified in the configuration. +func (e *Endpoint) Collect(ctx context.Context, acc telegraf.Accumulator) error { + + // If we never managed to do a discovery, collection will be a no-op. Therefore, + // we need to check that a connection is available, or the collection will + // silently fail. + if _, err := e.clientFactory.GetClient(ctx); err != nil { + return err + } + + e.collectMux.RLock() + defer e.collectMux.RUnlock() + + if ctx.Err() != nil { + return ctx.Err() + } + + // If discovery interval is disabled (0), discover on each collection cycle + if e.Parent.ObjectDiscoveryInterval.Duration == 0 { + err := e.discover(ctx) + if err != nil { + return err + } + } + var wg sync.WaitGroup + for k, res := range e.resourceKinds { + if res.enabled { + wg.Add(1) + go func(k string) { + defer wg.Done() + err := e.collectResource(ctx, k, acc) + if err != nil { + acc.AddError(err) + } + }(k) + } + } + wg.Wait() + + // Purge old timestamps from the cache + e.hwMarks.Purge() + return nil +} + +// Workaround to make sure pqs is a copy of the loop variable and won't change. +func submitChunkJob(ctx context.Context, te *ThrottledExecutor, job queryJob, pqs queryChunk) { + te.Run(ctx, func() { + job(pqs) + }) +} + +func (e *Endpoint) chunkify(ctx context.Context, res *resourceKind, now time.Time, latest time.Time, acc telegraf.Accumulator, job queryJob) { + te := NewThrottledExecutor(e.Parent.CollectConcurrency) + maxMetrics := e.Parent.MaxQueryMetrics + if maxMetrics < 1 { + maxMetrics = 1 + } + + // Workaround for vCenter weirdness. Cluster metrics seem to count multiple times + // when checking query size, so keep it at a low value. + // Revisit this when we better understand the reason why vCenter counts it this way! + if res.name == "cluster" && maxMetrics > 10 { + maxMetrics = 10 + } + + pqs := make(queryChunk, 0, e.Parent.MaxQueryObjects) + + for _, object := range res.objects { + timeBuckets := make(map[int64]*types.PerfQuerySpec, 0) + for metricIdx, metric := range res.metrics { + + // Determine time of last successful collection + metricName := e.getMetricNameForId(metric.CounterId) + if metricName == "" { + e.log.Info("Unable to find metric name for id %d. Skipping!", metric.CounterId) + continue + } + start, ok := e.hwMarks.Get(object.ref.Value, metricName) + if !ok { + start = latest.Add(time.Duration(-res.sampling) * time.Second * (metricLookback - 1)) + } + start = start.Truncate(20 * time.Second) // Truncate to maximum resolution + + // Create bucket if we don't already have it + bucket, ok := timeBuckets[start.Unix()] + if !ok { + bucket = &types.PerfQuerySpec{ + Entity: object.ref, + MaxSample: maxSampleConst, + MetricId: make([]types.PerfMetricId, 0), + IntervalId: res.sampling, + Format: "normal", + } + bucket.StartTime = &start + bucket.EndTime = &now + timeBuckets[start.Unix()] = bucket + } + + // Add this metric to the bucket + bucket.MetricId = append(bucket.MetricId, metric) + + // Bucket filled to capacity? (Only applies to non real time) + // OR if we're past the absolute maximum limit + if (!res.realTime && len(bucket.MetricId) >= maxMetrics) || len(bucket.MetricId) > 100000 { + e.log.Debugf("Submitting partial query: %d metrics (%d remaining) of type %s for %s. Total objects %d", + len(bucket.MetricId), len(res.metrics)-metricIdx, res.name, e.URL.Host, len(res.objects)) + + // Don't send work items if the context has been cancelled. + if ctx.Err() == context.Canceled { + return + } + + // Run collection job + delete(timeBuckets, start.Unix()) + submitChunkJob(ctx, te, job, queryChunk{*bucket}) + } + } + // Handle data in time bucket and submit job if we've reached the maximum number of object. + for _, bucket := range timeBuckets { + pqs = append(pqs, *bucket) + if (!res.realTime && len(pqs) > e.Parent.MaxQueryObjects) || len(pqs) > 100000 { + e.log.Debugf("Submitting final bucket job for %s: %d metrics", res.name, len(bucket.MetricId)) + submitChunkJob(ctx, te, job, pqs) + pqs = make(queryChunk, 0, e.Parent.MaxQueryObjects) + } + } + } + // Submit any jobs left in the queue + if len(pqs) > 0 { + e.log.Debugf("Submitting job for %s: %d objects", res.name, len(pqs)) + submitChunkJob(ctx, te, job, pqs) + } + + // Wait for background collection to finish + te.Wait() +} + +func (e *Endpoint) collectResource(ctx context.Context, resourceType string, acc telegraf.Accumulator) error { + res := e.resourceKinds[resourceType] + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return err + } + now, err := client.GetServerTime(ctx) + if err != nil { + return err + } + + // Estimate the interval at which we're invoked. Use local time (not server time) + // since this is about how we got invoked locally. + localNow := time.Now() + estInterval := time.Minute + if !res.lastColl.IsZero() { + s := time.Duration(res.sampling) * time.Second + rawInterval := localNow.Sub(res.lastColl) + paddedInterval := rawInterval + time.Duration(res.sampling/2)*time.Second + estInterval = paddedInterval.Truncate(s) + if estInterval < s { + estInterval = s + } + e.log.Debugf("Raw interval %s, padded: %s, estimated: %s", rawInterval, paddedInterval, estInterval) + } + e.log.Debugf("Interval estimated to %s", estInterval) + res.lastColl = localNow + + latest := res.latestSample + if !latest.IsZero() { + elapsed := now.Sub(latest).Seconds() + 5.0 // Allow 5 second jitter. + e.log.Debugf("Latest: %s, elapsed: %f, resource: %s", latest, elapsed, resourceType) + if !res.realTime && elapsed < float64(res.sampling) { + // No new data would be available. We're outta here! + e.log.Debugf("Sampling period for %s of %d has not elapsed on %s", + resourceType, res.sampling, e.URL.Host) + return nil + } + } else { + latest = now.Add(time.Duration(-res.sampling) * time.Second) + } + + internalTags := map[string]string{"resourcetype": resourceType} + sw := NewStopwatchWithTags("gather_duration", e.URL.Host, internalTags) + + e.log.Debugf("Collecting metrics for %d objects of type %s for %s", + len(res.objects), resourceType, e.URL.Host) + + count := int64(0) + + var tsMux sync.Mutex + latestSample := time.Time{} + + // Divide workload into chunks and process them concurrently + e.chunkify(ctx, res, now, latest, acc, + func(chunk queryChunk) { + n, localLatest, err := e.collectChunk(ctx, chunk, res, acc, now, estInterval) + e.log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n) + if err != nil { + acc.AddError(errors.New("while collecting " + res.name + ": " + err.Error())) + return + } + e.Parent.Log.Debugf("CollectChunk for %s returned %d metrics", resourceType, n) + atomic.AddInt64(&count, int64(n)) + tsMux.Lock() + defer tsMux.Unlock() + if localLatest.After(latestSample) && !localLatest.IsZero() { + latestSample = localLatest + } + }) + + e.log.Debugf("Latest sample for %s set to %s", resourceType, latestSample) + if !latestSample.IsZero() { + res.latestSample = latestSample + } + sw.Stop() + SendInternalCounterWithTags("gather_count", e.URL.Host, internalTags, count) + return nil +} + +func (e *Endpoint) alignSamples(info []types.PerfSampleInfo, values []int64, interval time.Duration) ([]types.PerfSampleInfo, []float64) { + rInfo := make([]types.PerfSampleInfo, 0, len(info)) + rValues := make([]float64, 0, len(values)) + bi := 1.0 + var lastBucket time.Time + for idx := range info { + // According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted + // data coming back with missing values. Take care of that gracefully! + if idx >= len(values) { + e.log.Debugf("len(SampleInfo)>len(Value) %d > %d during alignment", len(info), len(values)) + break + } + v := float64(values[idx]) + if v < 0 { + continue + } + ts := info[idx].Timestamp + roundedTs := ts.Truncate(interval) + + // Are we still working on the same bucket? + if roundedTs == lastBucket { + bi++ + p := len(rValues) - 1 + rValues[p] = ((bi-1)/bi)*rValues[p] + v/bi + } else { + rValues = append(rValues, v) + roundedInfo := types.PerfSampleInfo{ + Timestamp: roundedTs, + Interval: info[idx].Interval, + } + rInfo = append(rInfo, roundedInfo) + bi = 1.0 + lastBucket = roundedTs + } + } + return rInfo, rValues +} + +func (e *Endpoint) collectChunk(ctx context.Context, pqs queryChunk, res *resourceKind, acc telegraf.Accumulator, now time.Time, interval time.Duration) (int, time.Time, error) { + e.log.Debugf("Query for %s has %d QuerySpecs", res.name, len(pqs)) + latestSample := time.Time{} + count := 0 + resourceType := res.name + prefix := "vsphere" + e.Parent.Separator + resourceType + + client, err := e.clientFactory.GetClient(ctx) + if err != nil { + return count, latestSample, err + } + + metricInfo, err := client.CounterInfoByName(ctx) + if err != nil { + return count, latestSample, err + } + + ems, err := client.QueryMetrics(ctx, pqs) + if err != nil { + return count, latestSample, err + } + + e.log.Debugf("Query for %s returned metrics for %d objects", resourceType, len(ems)) + + // Iterate through results + for _, em := range ems { + moid := em.Entity.Reference().Value + instInfo, found := res.objects[moid] + if !found { + e.log.Errorf("MOID %s not found in cache. Skipping! (This should not happen!)", moid) + continue + } + buckets := make(map[string]metricEntry) + for _, v := range em.Value { + name := v.Name + t := map[string]string{ + "vcenter": e.URL.Host, + "source": instInfo.name, + "moid": moid, + } + + // Populate tags + objectRef, ok := res.objects[moid] + if !ok { + e.log.Errorf("MOID %s not found in cache. Skipping", moid) + continue + } + e.populateTags(objectRef, resourceType, res, t, &v) + + nValues := 0 + alignedInfo, alignedValues := e.alignSamples(em.SampleInfo, v.Value, interval) + + for idx, sample := range alignedInfo { + // According to the docs, SampleInfo and Value should have the same length, but we've seen corrupted + // data coming back with missing values. Take care of that gracefully! + if idx >= len(alignedValues) { + e.log.Debugf("Len(SampleInfo)>len(Value) %d > %d", len(alignedInfo), len(alignedValues)) + break + } + ts := sample.Timestamp + if ts.After(latestSample) { + latestSample = ts + } + nValues++ + + // Organize the metrics into a bucket per measurement. + mn, fn := e.makeMetricIdentifier(prefix, name) + bKey := mn + " " + v.Instance + " " + strconv.FormatInt(ts.UnixNano(), 10) + bucket, found := buckets[bKey] + if !found { + bucket = metricEntry{name: mn, ts: ts, fields: make(map[string]interface{}), tags: t} + buckets[bKey] = bucket + } + + // Percentage values must be scaled down by 100. + info, ok := metricInfo[name] + if !ok { + e.log.Errorf("Could not determine unit for %s. Skipping", name) + } + v := alignedValues[idx] + if info.UnitInfo.GetElementDescription().Key == "percent" { + bucket.fields[fn] = v / 100.0 + } else { + if e.Parent.UseIntSamples { + bucket.fields[fn] = int64(round(v)) + } else { + bucket.fields[fn] = v + } + } + count++ + + // Update hiwater marks + e.hwMarks.Put(moid, name, ts) + } + if nValues == 0 { + e.log.Debugf("Missing value for: %s, %s", name, objectRef.name) + continue + } + } + // We've iterated through all the metrics and collected buckets for each + // measurement name. Now emit them! + for _, bucket := range buckets { + acc.AddFields(bucket.name, bucket.fields, bucket.tags, bucket.ts) + } + } + return count, latestSample, nil +} + +func (e *Endpoint) populateTags(objectRef *objectRef, resourceType string, resource *resourceKind, t map[string]string, v *performance.MetricSeries) { + // Map name of object. + if resource.pKey != "" { + t[resource.pKey] = objectRef.name + } + + if resourceType == "vm" && objectRef.altID != "" { + t["uuid"] = objectRef.altID + } + + // Map parent reference + parent, found := e.getParent(objectRef, resource) + if found { + t[resource.parentTag] = parent.name + if resourceType == "vm" { + if objectRef.guest != "" { + t["guest"] = objectRef.guest + } + if gh := objectRef.lookup["guesthostname"]; gh != "" { + t["guesthostname"] = gh + } + if c, ok := e.resourceKinds["cluster"].objects[parent.parentRef.Value]; ok { + t["clustername"] = c.name + } + } + } + + // Fill in Datacenter name + if objectRef.dcname != "" { + t["dcname"] = objectRef.dcname + } + + // Determine which point tag to map to the instance + name := v.Name + instance := "instance-total" + if v.Instance != "" { + instance = v.Instance + } + if strings.HasPrefix(name, "cpu.") { + t["cpu"] = instance + } else if strings.HasPrefix(name, "datastore.") { + t["lun"] = instance + if ds, ok := e.lun2ds[instance]; ok { + t["dsname"] = ds + } else { + t["dsname"] = instance + } + } else if strings.HasPrefix(name, "disk.") { + t["disk"] = cleanDiskTag(instance) + } else if strings.HasPrefix(name, "net.") { + t["interface"] = instance + + // Add IP addresses to NIC data. + if resourceType == "vm" && objectRef.lookup != nil { + key := "nic/" + t["interface"] + "/" + if ip, ok := objectRef.lookup[key+"ipv6"]; ok { + t["ipv6"] = ip + } + if ip, ok := objectRef.lookup[key+"ipv4"]; ok { + t["ipv4"] = ip + } + } + } else if strings.HasPrefix(name, "storageAdapter.") { + t["adapter"] = instance + } else if strings.HasPrefix(name, "storagePath.") { + t["path"] = instance + } else if strings.HasPrefix(name, "sys.resource") { + t["resource"] = instance + } else if strings.HasPrefix(name, "vflashModule.") { + t["module"] = instance + } else if strings.HasPrefix(name, "virtualDisk.") { + t["disk"] = instance + } else if v.Instance != "" { + // default + t["instance"] = v.Instance + } + + // Fill in custom values if they exist + if objectRef.customValues != nil { + for k, v := range objectRef.customValues { + if v != "" { + t[k] = v + } + } + } +} + +func (e *Endpoint) makeMetricIdentifier(prefix, metric string) (string, string) { + parts := strings.Split(metric, ".") + if len(parts) == 1 { + return prefix, parts[0] + } + return prefix + e.Parent.Separator + parts[0], strings.Join(parts[1:], e.Parent.Separator) +} + +func cleanGuestID(id string) string { + return strings.TrimSuffix(id, "Guest") +} + +func cleanDiskTag(disk string) string { + // Remove enclosing "<>" + return strings.TrimSuffix(strings.TrimPrefix(disk, "<"), ">") +} + +func round(x float64) float64 { + t := math.Trunc(x) + if math.Abs(x-t) >= 0.5 { + return t + math.Copysign(1, x) + } + return t +} diff --git a/plugins/inputs/vsphere/finder.go b/plugins/inputs/vsphere/finder.go new file mode 100644 index 000000000..e49bf80f3 --- /dev/null +++ b/plugins/inputs/vsphere/finder.go @@ -0,0 +1,275 @@ +package vsphere + +import ( + "context" + "reflect" + "strings" + + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/view" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +var childTypes map[string][]string + +var addFields map[string][]string + +var containers map[string]interface{} + +// Finder allows callers to find resources in vCenter given a query string. +type Finder struct { + client *Client +} + +// ResourceFilter is a convenience class holding a finder and a set of paths. It is useful when you need a +// self contained object capable of returning a certain set of resources. +type ResourceFilter struct { + finder *Finder + resType string + paths []string + excludePaths []string +} + +// FindAll returns the union of resources found given the supplied resource type and paths. +func (f *Finder) FindAll(ctx context.Context, resType string, paths, excludePaths []string, dst interface{}) error { + objs := make(map[string]types.ObjectContent) + for _, p := range paths { + if err := f.find(ctx, resType, p, objs); err != nil { + return err + } + } + if len(excludePaths) > 0 { + excludes := make(map[string]types.ObjectContent) + for _, p := range excludePaths { + if err := f.find(ctx, resType, p, excludes); err != nil { + return err + } + } + for k := range excludes { + delete(objs, k) + } + } + return objectContentToTypedArray(objs, dst) +} + +// Find returns the resources matching the specified path. +func (f *Finder) Find(ctx context.Context, resType, path string, dst interface{}) error { + objs := make(map[string]types.ObjectContent) + err := f.find(ctx, resType, path, objs) + if err != nil { + return err + } + return objectContentToTypedArray(objs, dst) +} + +func (f *Finder) find(ctx context.Context, resType, path string, objs map[string]types.ObjectContent) error { + p := strings.Split(path, "/") + flt := make([]property.Filter, len(p)-1) + for i := 1; i < len(p); i++ { + flt[i-1] = property.Filter{"name": p[i]} + } + err := f.descend(ctx, f.client.Client.ServiceContent.RootFolder, resType, flt, 0, objs) + if err != nil { + return err + } + f.client.log.Debugf("Find(%s, %s) returned %d objects", resType, path, len(objs)) + return nil +} + +func (f *Finder) descend(ctx context.Context, root types.ManagedObjectReference, resType string, + tokens []property.Filter, pos int, objs map[string]types.ObjectContent) error { + isLeaf := pos == len(tokens)-1 + + // No more tokens to match? + if pos >= len(tokens) { + return nil + } + + // Determine child types + + ct, ok := childTypes[root.Reference().Type] + if !ok { + // We don't know how to handle children of this type. Stop descending. + return nil + } + + m := view.NewManager(f.client.Client.Client) + v, err := m.CreateContainerView(ctx, root, ct, false) + if err != nil { + return err + } + defer v.Destroy(ctx) + var content []types.ObjectContent + + fields := []string{"name"} + recurse := tokens[pos]["name"] == "**" + + types := ct + if isLeaf { + if af, ok := addFields[resType]; ok { + fields = append(fields, af...) + } + if recurse { + // Special case: The last token is a recursive wildcard, so we can grab everything + // recursively in a single call. + v2, err := m.CreateContainerView(ctx, root, []string{resType}, true) + if err != nil { + return err + } + defer v2.Destroy(ctx) + err = v2.Retrieve(ctx, []string{resType}, fields, &content) + if err != nil { + return err + } + for _, c := range content { + objs[c.Obj.String()] = c + } + return nil + } + types = []string{resType} // Only load wanted object type at leaf level + } + err = v.Retrieve(ctx, types, fields, &content) + if err != nil { + return err + } + + rerunAsLeaf := false + for _, c := range content { + if !matchName(tokens[pos], c.PropSet) { + continue + } + + // Already been here through another path? Skip! + if _, ok := objs[root.Reference().String()]; ok { + continue + } + + if c.Obj.Type == resType && isLeaf { + // We found what we're looking for. Consider it a leaf and stop descending + objs[c.Obj.String()] = c + continue + } + + // Deal with recursive wildcards (**) + var inc int + if recurse { + inc = 0 // By default, we stay on this token + if !isLeaf { + // Lookahead to next token. + if matchName(tokens[pos+1], c.PropSet) { + // Are we looking ahead at a leaf node that has the wanted type? + // Rerun the entire level as a leaf. This is needed since all properties aren't loaded + // when we're processing non-leaf nodes. + if pos == len(tokens)-2 { + if c.Obj.Type == resType { + rerunAsLeaf = true + continue + } + } else if _, ok := containers[c.Obj.Type]; ok { + // Tokens match and we're looking ahead at a container type that's not a leaf + // Consume this token and the next. + inc = 2 + } + } + } + } else { + // The normal case: Advance to next token before descending + inc = 1 + } + err := f.descend(ctx, c.Obj, resType, tokens, pos+inc, objs) + if err != nil { + return err + } + } + + if rerunAsLeaf { + // We're at a "pseudo leaf", i.e. we looked ahead a token and found that this level contains leaf nodes. + // Rerun the entire level as a leaf to get those nodes. This will only be executed when pos is one token + // before the last, to pos+1 will always point to a leaf token. + return f.descend(ctx, root, resType, tokens, pos+1, objs) + } + + return nil +} + +func objectContentToTypedArray(objs map[string]types.ObjectContent, dst interface{}) error { + rt := reflect.TypeOf(dst) + if rt == nil || rt.Kind() != reflect.Ptr { + panic("need pointer") + } + + rv := reflect.ValueOf(dst).Elem() + if !rv.CanSet() { + panic("cannot set dst") + } + for _, p := range objs { + v, err := mo.ObjectContentToType(p) + if err != nil { + return err + } + + vt := reflect.TypeOf(v) + + if !rv.Type().AssignableTo(vt) { + // For example: dst is []ManagedEntity, res is []HostSystem + if field, ok := vt.FieldByName(rt.Elem().Elem().Name()); ok && field.Anonymous { + rv.Set(reflect.Append(rv, reflect.ValueOf(v).FieldByIndex(field.Index))) + continue + } + } + + rv.Set(reflect.Append(rv, reflect.ValueOf(v))) + } + return nil +} + +// FindAll finds all resources matching the paths that were specified upon creation of +// the ResourceFilter. +func (r *ResourceFilter) FindAll(ctx context.Context, dst interface{}) error { + return r.finder.FindAll(ctx, r.resType, r.paths, r.excludePaths, dst) +} + +func matchName(f property.Filter, props []types.DynamicProperty) bool { + for _, prop := range props { + if prop.Name == "name" { + return f.MatchProperty(prop) + } + } + return false +} + +func init() { + childTypes = map[string][]string{ + "HostSystem": {"VirtualMachine"}, + "ComputeResource": {"HostSystem", "ResourcePool", "VirtualApp"}, + "ClusterComputeResource": {"HostSystem", "ResourcePool", "VirtualApp"}, + "Datacenter": {"Folder"}, + "Folder": { + "Folder", + "Datacenter", + "VirtualMachine", + "ComputeResource", + "ClusterComputeResource", + "Datastore", + }, + } + + addFields = map[string][]string{ + "HostSystem": {"parent", "summary.customValue", "customValue"}, + "VirtualMachine": {"runtime.host", "config.guestId", "config.uuid", "runtime.powerState", + "summary.customValue", "guest.net", "guest.hostName", "customValue"}, + "Datastore": {"parent", "info", "customValue"}, + "ClusterComputeResource": {"parent", "customValue"}, + "Datacenter": {"parent", "customValue"}, + } + + containers = map[string]interface{}{ + "HostSystem": nil, + "ComputeResource": nil, + "Datacenter": nil, + "ResourcePool": nil, + "Folder": nil, + "VirtualApp": nil, + } +} diff --git a/plugins/inputs/vsphere/selfhealth.go b/plugins/inputs/vsphere/selfhealth.go new file mode 100644 index 000000000..66069ca75 --- /dev/null +++ b/plugins/inputs/vsphere/selfhealth.go @@ -0,0 +1,53 @@ +package vsphere + +import ( + "time" + + "github.com/influxdata/telegraf/selfstat" +) + +// Stopwatch is a simple helper for recording timing information, +// such as gather times and discovery times. +type Stopwatch struct { + stat selfstat.Stat + start time.Time +} + +// NewStopwatch creates a new StopWatch and starts measuring time +// its creation. +func NewStopwatch(name, vCenter string) *Stopwatch { + return &Stopwatch{ + stat: selfstat.RegisterTiming("vsphere", name+"_ns", map[string]string{"vcenter": vCenter}), + start: time.Now(), + } +} + +// NewStopwatchWithTags creates a new StopWatch and starts measuring time +// its creation. Allows additional tags. +func NewStopwatchWithTags(name, vCenter string, tags map[string]string) *Stopwatch { + tags["vcenter"] = vCenter + return &Stopwatch{ + stat: selfstat.RegisterTiming("vsphere", name+"_ns", tags), + start: time.Now(), + } +} + +// Stop stops a Stopwatch and records the time. +func (s *Stopwatch) Stop() { + s.stat.Set(time.Since(s.start).Nanoseconds()) +} + +// SendInternalCounter is a convenience method for sending +// non-timing internal metrics. +func SendInternalCounter(name, vCenter string, value int64) { + s := selfstat.Register("vsphere", name, map[string]string{"vcenter": vCenter}) + s.Set(value) +} + +// SendInternalCounterWithTags is a convenience method for sending +// non-timing internal metrics. Allows additional tags +func SendInternalCounterWithTags(name, vCenter string, tags map[string]string, value int64) { + tags["vcenter"] = vCenter + s := selfstat.Register("vsphere", name, tags) + s.Set(value) +} diff --git a/plugins/inputs/vsphere/throttled_exec.go b/plugins/inputs/vsphere/throttled_exec.go new file mode 100644 index 000000000..ac95b496c --- /dev/null +++ b/plugins/inputs/vsphere/throttled_exec.go @@ -0,0 +1,45 @@ +package vsphere + +import ( + "context" + "sync" +) + +// ThrottledExecutor provides a simple mechanism for running jobs in separate +// goroutines while limit the number of concurrent jobs running at any given time. +type ThrottledExecutor struct { + limiter chan struct{} + wg sync.WaitGroup +} + +// NewThrottledExecutor creates a new ThrottlesExecutor with a specified maximum +// number of concurrent jobs +func NewThrottledExecutor(limit int) *ThrottledExecutor { + if limit == 0 { + panic("Limit must be > 0") + } + return &ThrottledExecutor{limiter: make(chan struct{}, limit)} +} + +// Run schedules a job for execution as soon as possible while respecting the +// maximum concurrency limit. +func (t *ThrottledExecutor) Run(ctx context.Context, job func()) { + t.wg.Add(1) + go func() { + defer t.wg.Done() + select { + case t.limiter <- struct{}{}: + defer func() { + <-t.limiter + }() + job() + case <-ctx.Done(): + return + } + }() +} + +// Wait blocks until all scheduled jobs have finished +func (t *ThrottledExecutor) Wait() { + t.wg.Wait() +} diff --git a/plugins/inputs/vsphere/tscache.go b/plugins/inputs/vsphere/tscache.go new file mode 100644 index 000000000..1be75d760 --- /dev/null +++ b/plugins/inputs/vsphere/tscache.go @@ -0,0 +1,67 @@ +package vsphere + +import ( + "log" + "sync" + "time" +) + +// TSCache is a cache of timestamps used to determine the validity of datapoints +type TSCache struct { + ttl time.Duration + table map[string]time.Time + mux sync.RWMutex +} + +// NewTSCache creates a new TSCache with a specified time-to-live after which timestamps are discarded. +func NewTSCache(ttl time.Duration) *TSCache { + return &TSCache{ + ttl: ttl, + table: make(map[string]time.Time), + } +} + +// Purge removes timestamps that are older than the time-to-live +func (t *TSCache) Purge() { + t.mux.Lock() + defer t.mux.Unlock() + n := 0 + for k, v := range t.table { + if time.Now().Sub(v) > t.ttl { + delete(t.table, k) + n++ + } + } + log.Printf("D! [inputs.vsphere] purged timestamp cache. %d deleted with %d remaining", n, len(t.table)) +} + +// IsNew returns true if the supplied timestamp for the supplied key is more recent than the +// timestamp we have on record. +func (t *TSCache) IsNew(key string, metricName string, tm time.Time) bool { + t.mux.RLock() + defer t.mux.RUnlock() + v, ok := t.table[makeKey(key, metricName)] + if !ok { + return true // We've never seen this before, so consider everything a new sample + } + return !tm.Before(v) +} + +// Get returns a timestamp (if present) +func (t *TSCache) Get(key string, metricName string) (time.Time, bool) { + t.mux.RLock() + defer t.mux.RUnlock() + ts, ok := t.table[makeKey(key, metricName)] + return ts, ok +} + +// Put updates the latest timestamp for the supplied key. +func (t *TSCache) Put(key string, metricName string, time time.Time) { + t.mux.Lock() + defer t.mux.Unlock() + t.table[makeKey(key, metricName)] = time +} + +func makeKey(resource string, metric string) string { + return resource + "|" + metric +} diff --git a/plugins/inputs/vsphere/vsphere.go b/plugins/inputs/vsphere/vsphere.go new file mode 100644 index 000000000..e9a75510f --- /dev/null +++ b/plugins/inputs/vsphere/vsphere.go @@ -0,0 +1,372 @@ +package vsphere + +import ( + "context" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" + "github.com/vmware/govmomi/vim25/soap" +) + +// VSphere is the top level type for the vSphere input plugin. It contains all the configuration +// and a list of connected vSphere endpoints +type VSphere struct { + Vcenters []string + Username string + Password string + DatacenterInstances bool + DatacenterMetricInclude []string + DatacenterMetricExclude []string + DatacenterInclude []string + DatacenterExclude []string + ClusterInstances bool + ClusterMetricInclude []string + ClusterMetricExclude []string + ClusterInclude []string + ClusterExclude []string + HostInstances bool + HostMetricInclude []string + HostMetricExclude []string + HostInclude []string + HostExclude []string + VMInstances bool `toml:"vm_instances"` + VMMetricInclude []string `toml:"vm_metric_include"` + VMMetricExclude []string `toml:"vm_metric_exclude"` + VMInclude []string `toml:"vm_include"` + VMExclude []string `toml:"vm_exclude"` + DatastoreInstances bool + DatastoreMetricInclude []string + DatastoreMetricExclude []string + DatastoreInclude []string + DatastoreExclude []string + Separator string + CustomAttributeInclude []string + CustomAttributeExclude []string + UseIntSamples bool + IpAddresses []string + + MaxQueryObjects int + MaxQueryMetrics int + CollectConcurrency int + DiscoverConcurrency int + ForceDiscoverOnInit bool + ObjectDiscoveryInterval internal.Duration + Timeout internal.Duration + + endpoints []*Endpoint + cancel context.CancelFunc + + // Mix in the TLS/SSL goodness from core + tls.ClientConfig + + Log telegraf.Logger +} + +var sampleConfig = ` + ## List of vCenter URLs to be monitored. These three lines must be uncommented + ## and edited for the plugin to work. + vcenters = [ "https://vcenter.local/sdk" ] + username = "user@corp.local" + password = "secret" + + ## VMs + ## Typical VM metrics (if omitted or empty, all metrics are collected) + # vm_include = [ "/*/vm/**"] # Inventory path to VMs to collect (by default all are collected) + # vm_exclude = [] # Inventory paths to exclude + vm_metric_include = [ + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.run.summation", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.wait.summation", + "mem.active.average", + "mem.granted.average", + "mem.latency.average", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.usage.average", + "power.power.average", + "virtualDisk.numberReadAveraged.average", + "virtualDisk.numberWriteAveraged.average", + "virtualDisk.read.average", + "virtualDisk.readOIO.latest", + "virtualDisk.throughput.usage.average", + "virtualDisk.totalReadLatency.average", + "virtualDisk.totalWriteLatency.average", + "virtualDisk.write.average", + "virtualDisk.writeOIO.latest", + "sys.uptime.latest", + ] + # vm_metric_exclude = [] ## Nothing is excluded by default + # vm_instances = true ## true by default + + ## Hosts + ## Typical host metrics (if omitted or empty, all metrics are collected) + # host_include = [ "/*/host/**"] # Inventory path to hosts to collect (by default all are collected) + # host_exclude [] # Inventory paths to exclude + host_metric_include = [ + "cpu.coreUtilization.average", + "cpu.costop.summation", + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.swapwait.summation", + "cpu.usage.average", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.utilization.average", + "cpu.wait.summation", + "disk.deviceReadLatency.average", + "disk.deviceWriteLatency.average", + "disk.kernelReadLatency.average", + "disk.kernelWriteLatency.average", + "disk.numberReadAveraged.average", + "disk.numberWriteAveraged.average", + "disk.read.average", + "disk.totalReadLatency.average", + "disk.totalWriteLatency.average", + "disk.write.average", + "mem.active.average", + "mem.latency.average", + "mem.state.latest", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.totalCapacity.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.errorsRx.summation", + "net.errorsTx.summation", + "net.usage.average", + "power.power.average", + "storageAdapter.numberReadAveraged.average", + "storageAdapter.numberWriteAveraged.average", + "storageAdapter.read.average", + "storageAdapter.write.average", + "sys.uptime.latest", + ] + ## Collect IP addresses? Valid values are "ipv4" and "ipv6" + # ip_addresses = ["ipv6", "ipv4" ] + + # host_metric_exclude = [] ## Nothing excluded by default + # host_instances = true ## true by default + + + ## Clusters + # cluster_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) + # cluster_exclude = [] # Inventory paths to exclude + # cluster_metric_include = [] ## if omitted or empty, all metrics are collected + # cluster_metric_exclude = [] ## Nothing excluded by default + # cluster_instances = false ## false by default + + ## Datastores + # datastore_include = [ "/*/datastore/**"] # Inventory path to datastores to collect (by default all are collected) + # datastore_exclude = [] # Inventory paths to exclude + # datastore_metric_include = [] ## if omitted or empty, all metrics are collected + # datastore_metric_exclude = [] ## Nothing excluded by default + # datastore_instances = false ## false by default + + ## Datacenters + # datacenter_include = [ "/*/host/**"] # Inventory path to clusters to collect (by default all are collected) + # datacenter_exclude = [] # Inventory paths to exclude + datacenter_metric_include = [] ## if omitted or empty, all metrics are collected + datacenter_metric_exclude = [ "*" ] ## Datacenters are not collected by default. + # datacenter_instances = false ## false by default + + ## Plugin Settings + ## separator character to use for measurement and field names (default: "_") + # separator = "_" + + ## number of objects to retrieve per query for realtime resources (vms and hosts) + ## set to 64 for vCenter 5.5 and 6.0 (default: 256) + # max_query_objects = 256 + + ## number of metrics to retrieve per query for non-realtime resources (clusters and datastores) + ## set to 64 for vCenter 5.5 and 6.0 (default: 256) + # max_query_metrics = 256 + + ## number of go routines to use for collection and discovery of objects and metrics + # collect_concurrency = 1 + # discover_concurrency = 1 + + ## the interval before (re)discovering objects subject to metrics collection (default: 300s) + # object_discovery_interval = "300s" + + ## timeout applies to any of the api request made to vcenter + # timeout = "60s" + + ## When set to true, all samples are sent as integers. This makes the output + ## data types backwards compatible with Telegraf 1.9 or lower. Normally all + ## samples from vCenter, with the exception of percentages, are integer + ## values, but under some conditions, some averaging takes place internally in + ## the plugin. Setting this flag to "false" will send values as floats to + ## preserve the full precision when averaging takes place. + # use_int_samples = true + + ## Custom attributes from vCenter can be very useful for queries in order to slice the + ## metrics along different dimension and for forming ad-hoc relationships. They are disabled + ## by default, since they can add a considerable amount of tags to the resulting metrics. To + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include + ## to select the attributes you want to include. + ## By default, since they can add a considerable amount of tags to the resulting metrics. To + ## enable, simply set custom_attribute_exclude to [] (empty set) and use custom_attribute_include + ## to select the attributes you want to include. + # custom_attribute_include = [] + # custom_attribute_exclude = ["*"] + + ## Optional SSL Config + # ssl_ca = "/path/to/cafile" + # ssl_cert = "/path/to/certfile" + # ssl_key = "/path/to/keyfile" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false +` + +// SampleConfig returns a set of default configuration to be used as a boilerplate when setting up +// Telegraf. +func (v *VSphere) SampleConfig() string { + return sampleConfig +} + +// Description returns a short textual description of the plugin +func (v *VSphere) Description() string { + return "Read metrics from VMware vCenter" +} + +// Start is called from telegraf core when a plugin is started and allows it to +// perform initialization tasks. +func (v *VSphere) Start(acc telegraf.Accumulator) error { + v.Log.Info("Starting plugin") + ctx, cancel := context.WithCancel(context.Background()) + v.cancel = cancel + + // Check for deprecated settings + if !v.ForceDiscoverOnInit { + v.Log.Warn("The 'force_discover_on_init' configuration parameter has been deprecated. Setting it to 'false' has no effect") + } + + // Create endpoints, one for each vCenter we're monitoring + v.endpoints = make([]*Endpoint, len(v.Vcenters)) + for i, rawURL := range v.Vcenters { + u, err := soap.ParseURL(rawURL) + if err != nil { + return err + } + ep, err := NewEndpoint(ctx, v, u, v.Log) + if err != nil { + return err + } + v.endpoints[i] = ep + } + return nil +} + +// Stop is called from telegraf core when a plugin is stopped and allows it to +// perform shutdown tasks. +func (v *VSphere) Stop() { + v.Log.Info("Stopping plugin") + v.cancel() + + // Wait for all endpoints to finish. No need to wait for + // Gather() to finish here, since it Stop() will only be called + // after the last Gather() has finished. We do, however, need to + // wait for any discovery to complete by trying to grab the + // "busy" mutex. + for _, ep := range v.endpoints { + v.Log.Debugf("Waiting for endpoint %q to finish", ep.URL.Host) + func() { + ep.busy.Lock() // Wait until discovery is finished + defer ep.busy.Unlock() + ep.Close() + }() + } +} + +// Gather is the main data collection function called by the Telegraf core. It performs all +// the data collection and writes all metrics into the Accumulator passed as an argument. +func (v *VSphere) Gather(acc telegraf.Accumulator) error { + var wg sync.WaitGroup + for _, ep := range v.endpoints { + wg.Add(1) + go func(endpoint *Endpoint) { + defer wg.Done() + err := endpoint.Collect(context.Background(), acc) + if err == context.Canceled { + + // No need to signal errors if we were merely canceled. + err = nil + } + if err != nil { + acc.AddError(err) + } + }(ep) + } + + wg.Wait() + return nil +} + +func init() { + inputs.Add("vsphere", func() telegraf.Input { + return &VSphere{ + Vcenters: []string{}, + + DatacenterInstances: false, + DatacenterMetricInclude: nil, + DatacenterMetricExclude: nil, + DatacenterInclude: []string{"/*"}, + ClusterInstances: false, + ClusterMetricInclude: nil, + ClusterMetricExclude: nil, + ClusterInclude: []string{"/*/host/**"}, + HostInstances: true, + HostMetricInclude: nil, + HostMetricExclude: nil, + HostInclude: []string{"/*/host/**"}, + VMInstances: true, + VMMetricInclude: nil, + VMMetricExclude: nil, + VMInclude: []string{"/*/vm/**"}, + DatastoreInstances: false, + DatastoreMetricInclude: nil, + DatastoreMetricExclude: nil, + DatastoreInclude: []string{"/*/datastore/**"}, + Separator: "_", + CustomAttributeInclude: []string{}, + CustomAttributeExclude: []string{"*"}, + UseIntSamples: true, + IpAddresses: []string{}, + + MaxQueryObjects: 256, + MaxQueryMetrics: 256, + CollectConcurrency: 1, + DiscoverConcurrency: 1, + ForceDiscoverOnInit: true, + ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, + Timeout: internal.Duration{Duration: time.Second * 60}, + } + }) +} diff --git a/plugins/inputs/vsphere/vsphere_test.go b/plugins/inputs/vsphere/vsphere_test.go new file mode 100644 index 000000000..3c0a31312 --- /dev/null +++ b/plugins/inputs/vsphere/vsphere_test.go @@ -0,0 +1,466 @@ +package vsphere + +import ( + "context" + "crypto/tls" + "fmt" + "regexp" + "testing" + "time" + "unsafe" + + "github.com/influxdata/telegraf/internal" + itls "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/testutil" + "github.com/influxdata/toml" + "github.com/stretchr/testify/require" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/simulator" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +var configHeader = ` +[agent] + interval = "10s" + round_interval = true + metric_batch_size = 1000 + metric_buffer_limit = 10000 + collection_jitter = "0s" + flush_interval = "10s" + flush_jitter = "0s" + precision = "" + debug = false + quiet = false + logfile = "" + hostname = "" + omit_hostname = false +` + +func defaultVSphere() *VSphere { + return &VSphere{ + Log: testutil.Logger{}, + ClusterMetricInclude: []string{ + "cpu.usage.*", + "cpu.usagemhz.*", + "mem.usage.*", + "mem.active.*"}, + ClusterMetricExclude: nil, + ClusterInclude: []string{"/**"}, + HostMetricInclude: []string{ + "cpu.coreUtilization.average", + "cpu.costop.summation", + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.swapwait.summation", + "cpu.usage.average", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.utilization.average", + "cpu.wait.summation", + "disk.deviceReadLatency.average", + "disk.deviceWriteLatency.average", + "disk.kernelReadLatency.average", + "disk.kernelWriteLatency.average", + "disk.numberReadAveraged.average", + "disk.numberWriteAveraged.average", + "disk.read.average", + "disk.totalReadLatency.average", + "disk.totalWriteLatency.average", + "disk.write.average", + "mem.active.average", + "mem.latency.average", + "mem.state.latest", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.totalCapacity.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.errorsRx.summation", + "net.errorsTx.summation", + "net.usage.average", + "power.power.average", + "storageAdapter.numberReadAveraged.average", + "storageAdapter.numberWriteAveraged.average", + "storageAdapter.read.average", + "storageAdapter.write.average", + "sys.uptime.latest"}, + HostMetricExclude: nil, + HostInclude: []string{"/**"}, + VMMetricInclude: []string{ + "cpu.demand.average", + "cpu.idle.summation", + "cpu.latency.average", + "cpu.readiness.average", + "cpu.ready.summation", + "cpu.run.summation", + "cpu.usagemhz.average", + "cpu.used.summation", + "cpu.wait.summation", + "mem.active.average", + "mem.granted.average", + "mem.latency.average", + "mem.swapin.average", + "mem.swapinRate.average", + "mem.swapout.average", + "mem.swapoutRate.average", + "mem.usage.average", + "mem.vmmemctl.average", + "net.bytesRx.average", + "net.bytesTx.average", + "net.droppedRx.summation", + "net.droppedTx.summation", + "net.usage.average", + "power.power.average", + "virtualDisk.numberReadAveraged.average", + "virtualDisk.numberWriteAveraged.average", + "virtualDisk.read.average", + "virtualDisk.readOIO.latest", + "virtualDisk.throughput.usage.average", + "virtualDisk.totalReadLatency.average", + "virtualDisk.totalWriteLatency.average", + "virtualDisk.write.average", + "virtualDisk.writeOIO.latest", + "sys.uptime.latest"}, + VMMetricExclude: nil, + VMInclude: []string{"/**"}, + DatastoreMetricInclude: []string{ + "disk.used.*", + "disk.provisioned.*"}, + DatastoreMetricExclude: nil, + DatastoreInclude: []string{"/**"}, + DatacenterMetricInclude: nil, + DatacenterMetricExclude: nil, + DatacenterInclude: []string{"/**"}, + ClientConfig: itls.ClientConfig{InsecureSkipVerify: true}, + + MaxQueryObjects: 256, + MaxQueryMetrics: 256, + ObjectDiscoveryInterval: internal.Duration{Duration: time.Second * 300}, + Timeout: internal.Duration{Duration: time.Second * 20}, + ForceDiscoverOnInit: true, + DiscoverConcurrency: 1, + CollectConcurrency: 1, + } +} + +func createSim(folders int) (*simulator.Model, *simulator.Server, error) { + model := simulator.VPX() + + model.Folder = folders + model.Datacenter = 2 + //model.App = 1 + + err := model.Create() + if err != nil { + return nil, nil, err + } + + model.Service.TLS = new(tls.Config) + + s := model.Service.NewServer() + return model, s, nil +} + +func testAlignUniform(t *testing.T, n int) { + now := time.Now().Truncate(60 * time.Second) + info := make([]types.PerfSampleInfo, n) + values := make([]int64, n) + for i := 0; i < n; i++ { + info[i] = types.PerfSampleInfo{ + Timestamp: now.Add(time.Duration(20*i) * time.Second), + Interval: 20, + } + values[i] = 1 + } + e := Endpoint{log: testutil.Logger{}} + newInfo, newValues := e.alignSamples(info, values, 60*time.Second) + require.Equal(t, n/3, len(newInfo), "Aligned infos have wrong size") + require.Equal(t, n/3, len(newValues), "Aligned values have wrong size") + for _, v := range newValues { + require.Equal(t, 1.0, v, "Aligned value should be 1") + } +} + +func TestAlignMetrics(t *testing.T) { + testAlignUniform(t, 3) + testAlignUniform(t, 30) + testAlignUniform(t, 333) + + // 20s to 60s of 1,2,3,1,2,3... (should average to 2) + n := 30 + now := time.Now().Truncate(60 * time.Second) + info := make([]types.PerfSampleInfo, n) + values := make([]int64, n) + for i := 0; i < n; i++ { + info[i] = types.PerfSampleInfo{ + Timestamp: now.Add(time.Duration(20*i) * time.Second), + Interval: 20, + } + values[i] = int64(i%3 + 1) + } + e := Endpoint{log: testutil.Logger{}} + newInfo, newValues := e.alignSamples(info, values, 60*time.Second) + require.Equal(t, n/3, len(newInfo), "Aligned infos have wrong size") + require.Equal(t, n/3, len(newValues), "Aligned values have wrong size") + for _, v := range newValues { + require.Equal(t, 2.0, v, "Aligned value should be 2") + } +} + +func TestParseConfig(t *testing.T) { + v := VSphere{} + c := v.SampleConfig() + p := regexp.MustCompile("\n#") + fmt.Printf("Source=%s", p.ReplaceAllLiteralString(c, "\n")) + c = configHeader + "\n[[inputs.vsphere]]\n" + p.ReplaceAllLiteralString(c, "\n") + fmt.Printf("Source=%s", c) + tab, err := toml.Parse([]byte(c)) + require.NoError(t, err) + require.NotNil(t, tab) +} + +func TestMaxQuery(t *testing.T) { + // Don't run test on 32-bit machines due to bug in simulator. + // https://github.com/vmware/govmomi/issues/1330 + var i int + if unsafe.Sizeof(i) < 8 { + return + } + m, s, err := createSim(0) + if err != nil { + t.Fatal(err) + } + defer m.Remove() + defer s.Close() + + v := defaultVSphere() + v.MaxQueryMetrics = 256 + ctx := context.Background() + c, err := NewClient(ctx, s.URL, v) + if err != nil { + t.Fatal(err) + } + require.Equal(t, 256, v.MaxQueryMetrics) + + om := object.NewOptionManager(c.Client.Client, *c.Client.Client.ServiceContent.Setting) + err = om.Update(ctx, []types.BaseOptionValue{&types.OptionValue{ + Key: "config.vpxd.stats.maxQueryMetrics", + Value: "42", + }}) + if err != nil { + t.Fatal(err) + } + + v.MaxQueryMetrics = 256 + ctx = context.Background() + c2, err := NewClient(ctx, s.URL, v) + if err != nil { + t.Fatal(err) + } + require.Equal(t, 42, v.MaxQueryMetrics) + c.close() + c2.close() +} + +func testLookupVM(ctx context.Context, t *testing.T, f *Finder, path string, expected int, expectedName string) { + poweredOn := types.VirtualMachinePowerState("poweredOn") + var vm []mo.VirtualMachine + err := f.Find(ctx, "VirtualMachine", path, &vm) + require.NoError(t, err) + require.Equal(t, expected, len(vm)) + if expectedName != "" { + require.Equal(t, expectedName, vm[0].Name) + } + for _, v := range vm { + require.Equal(t, poweredOn, v.Runtime.PowerState) + } +} + +func TestFinder(t *testing.T) { + // Don't run test on 32-bit machines due to bug in simulator. + // https://github.com/vmware/govmomi/issues/1330 + var i int + if unsafe.Sizeof(i) < 8 { + return + } + + m, s, err := createSim(0) + if err != nil { + t.Fatal(err) + } + defer m.Remove() + defer s.Close() + + v := defaultVSphere() + ctx := context.Background() + + c, err := NewClient(ctx, s.URL, v) + + f := Finder{c} + + var dc []mo.Datacenter + err = f.Find(ctx, "Datacenter", "/DC0", &dc) + require.NoError(t, err) + require.Equal(t, 1, len(dc)) + require.Equal(t, "DC0", dc[0].Name) + + var host []mo.HostSystem + err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_H0/DC0_H0", &host) + require.NoError(t, err) + require.Equal(t, 1, len(host)) + require.Equal(t, "DC0_H0", host[0].Name) + + host = []mo.HostSystem{} + err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_C0/DC0_C0_H0", &host) + require.NoError(t, err) + require.Equal(t, 1, len(host)) + require.Equal(t, "DC0_C0_H0", host[0].Name) + + host = []mo.HostSystem{} + err = f.Find(ctx, "HostSystem", "/DC0/host/DC0_C0/*", &host) + require.NoError(t, err) + require.Equal(t, 3, len(host)) + + var vm []mo.VirtualMachine + testLookupVM(ctx, t, &f, "/DC0/vm/DC0_H0_VM0", 1, "") + testLookupVM(ctx, t, &f, "/DC0/vm/DC0_C0*", 2, "") + testLookupVM(ctx, t, &f, "/DC0/*/DC0_H0_VM0", 1, "DC0_H0_VM0") + testLookupVM(ctx, t, &f, "/DC0/*/DC0_H0_*", 2, "") + testLookupVM(ctx, t, &f, "/DC0/**/DC0_H0_VM*", 2, "") + testLookupVM(ctx, t, &f, "/DC0/**", 4, "") + testLookupVM(ctx, t, &f, "/DC1/**", 4, "") + testLookupVM(ctx, t, &f, "/**", 8, "") + testLookupVM(ctx, t, &f, "/**/vm/**", 8, "") + testLookupVM(ctx, t, &f, "/*/host/**/*DC*", 8, "") + testLookupVM(ctx, t, &f, "/*/host/**/*DC*VM*", 8, "") + testLookupVM(ctx, t, &f, "/*/host/**/*DC*/*/*DC*", 4, "") + + vm = []mo.VirtualMachine{} + err = f.FindAll(ctx, "VirtualMachine", []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, []string{}, &vm) + require.NoError(t, err) + require.Equal(t, 4, len(vm)) + + rf := ResourceFilter{ + finder: &f, + paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, + excludePaths: []string{"/DC0/vm/DC0_H0_VM0"}, + resType: "VirtualMachine", + } + vm = []mo.VirtualMachine{} + require.NoError(t, rf.FindAll(ctx, &vm)) + require.Equal(t, 3, len(vm)) + + rf = ResourceFilter{ + finder: &f, + paths: []string{"/DC0/vm/DC0_H0*", "/DC0/vm/DC0_C0*"}, + excludePaths: []string{"/**"}, + resType: "VirtualMachine", + } + vm = []mo.VirtualMachine{} + require.NoError(t, rf.FindAll(ctx, &vm)) + require.Equal(t, 0, len(vm)) + + rf = ResourceFilter{ + finder: &f, + paths: []string{"/**"}, + excludePaths: []string{"/**"}, + resType: "VirtualMachine", + } + vm = []mo.VirtualMachine{} + require.NoError(t, rf.FindAll(ctx, &vm)) + require.Equal(t, 0, len(vm)) + + rf = ResourceFilter{ + finder: &f, + paths: []string{"/**"}, + excludePaths: []string{"/this won't match anything"}, + resType: "VirtualMachine", + } + vm = []mo.VirtualMachine{} + require.NoError(t, rf.FindAll(ctx, &vm)) + require.Equal(t, 8, len(vm)) + + rf = ResourceFilter{ + finder: &f, + paths: []string{"/**"}, + excludePaths: []string{"/**/*VM0"}, + resType: "VirtualMachine", + } + vm = []mo.VirtualMachine{} + require.NoError(t, rf.FindAll(ctx, &vm)) + require.Equal(t, 4, len(vm)) +} + +func TestFolders(t *testing.T) { + // Don't run test on 32-bit machines due to bug in simulator. + // https://github.com/vmware/govmomi/issues/1330 + var i int + if unsafe.Sizeof(i) < 8 { + return + } + + m, s, err := createSim(1) + if err != nil { + t.Fatal(err) + } + defer m.Remove() + defer s.Close() + + v := defaultVSphere() + ctx := context.Background() + + c, err := NewClient(ctx, s.URL, v) + + f := Finder{c} + + var folder []mo.Folder + err = f.Find(ctx, "Folder", "/F0", &folder) + require.NoError(t, err) + require.Equal(t, 1, len(folder)) + require.Equal(t, "F0", folder[0].Name) + + var dc []mo.Datacenter + err = f.Find(ctx, "Datacenter", "/F0/DC1", &dc) + require.NoError(t, err) + require.Equal(t, 1, len(dc)) + require.Equal(t, "DC1", dc[0].Name) + + testLookupVM(ctx, t, &f, "/F0/DC0/vm/**/F*", 0, "") + testLookupVM(ctx, t, &f, "/F0/DC1/vm/**/F*/*VM*", 4, "") + testLookupVM(ctx, t, &f, "/F0/DC1/vm/**/F*/**", 4, "") +} + +func TestAll(t *testing.T) { + // Don't run test on 32-bit machines due to bug in simulator. + // https://github.com/vmware/govmomi/issues/1330 + var i int + if unsafe.Sizeof(i) < 8 { + return + } + + m, s, err := createSim(0) + if err != nil { + t.Fatal(err) + } + defer m.Remove() + defer s.Close() + + var acc testutil.Accumulator + v := defaultVSphere() + v.Vcenters = []string{s.URL.String()} + v.Start(&acc) + defer v.Stop() + require.NoError(t, v.Gather(&acc)) + require.Equal(t, 0, len(acc.Errors), fmt.Sprintf("Errors found: %s", acc.Errors)) + require.True(t, len(acc.Metrics) > 0, "No metrics were collected") +} diff --git a/plugins/inputs/webhooks/README.md b/plugins/inputs/webhooks/README.md index 13141fc4b..c6c7daf35 100644 --- a/plugins/inputs/webhooks/README.md +++ b/plugins/inputs/webhooks/README.md @@ -13,7 +13,36 @@ $ cp config.conf.new /etc/telegraf/telegraf.conf $ sudo service telegraf start ``` -## Available webhooks + +### Configuration: + +```toml +[[inputs.webhooks]] + ## Address and port to host Webhook listener on + service_address = ":1619" + + [inputs.webhooks.filestack] + path = "/filestack" + + [inputs.webhooks.github] + path = "/github" + # secret = "" + + [inputs.webhooks.mandrill] + path = "/mandrill" + + [inputs.webhooks.rollbar] + path = "/rollbar" + + [inputs.webhooks.papertrail] + path = "/papertrail" + + [inputs.webhooks.particle] + path = "/particle" +``` + + +### Available webhooks - [Filestack](filestack/) - [Github](github/) @@ -23,7 +52,7 @@ $ sudo service telegraf start - [Particle](particle/) -## Adding new webhooks plugin +### Adding new webhooks plugin 1. Add your webhook plugin inside the `webhooks` folder 1. Your plugin must implement the `Webhook` interface diff --git a/plugins/inputs/webhooks/filestack/README.md b/plugins/inputs/webhooks/filestack/README.md index 585e6f202..7af2a780d 100644 --- a/plugins/inputs/webhooks/filestack/README.md +++ b/plugins/inputs/webhooks/filestack/README.md @@ -1,6 +1,6 @@ # Filestack webhook -You should configure your Filestack's Webhooks to point at the `webhooks` service. To do this go to `filestack.com/`, select your app and click `Credentials > Webhooks`. In the resulting page, set the `URL` to `http://:1619/filestack`, and click on `Add`. +You should configure your Filestack's Webhooks to point at the `webhooks` service. To do this go to [filestack.com](https://www.filestack.com/), select your app and click `Credentials > Webhooks`. In the resulting page, set the `URL` to `http://:1619/filestack`, and click on `Add`. ## Events diff --git a/plugins/inputs/webhooks/github/README.md b/plugins/inputs/webhooks/github/README.md index 5115d287c..4a4e64c73 100644 --- a/plugins/inputs/webhooks/github/README.md +++ b/plugins/inputs/webhooks/github/README.md @@ -78,7 +78,7 @@ The tag values and field values show the place on the incoming JSON object where * 'issues' = `event.repository.open_issues_count` int * 'commit' = `event.deployment.sha` string * 'task' = `event.deployment.task` string -* 'environment' = `event.deployment.evnironment` string +* 'environment' = `event.deployment.environment` string * 'description' = `event.deployment.description` string #### [`deployment_status` event](https://developer.github.com/v3/activity/events/types/#deploymentstatusevent) @@ -96,7 +96,7 @@ The tag values and field values show the place on the incoming JSON object where * 'issues' = `event.repository.open_issues_count` int * 'commit' = `event.deployment.sha` string * 'task' = `event.deployment.task` string -* 'environment' = `event.deployment.evnironment` string +* 'environment' = `event.deployment.environment` string * 'description' = `event.deployment.description` string * 'depState' = `event.deployment_status.state` string * 'depDescription' = `event.deployment_status.description` string diff --git a/plugins/inputs/webhooks/mandrill/README.md b/plugins/inputs/webhooks/mandrill/README.md index 2fb4914e1..9c4f3a58c 100644 --- a/plugins/inputs/webhooks/mandrill/README.md +++ b/plugins/inputs/webhooks/mandrill/README.md @@ -1,6 +1,6 @@ # mandrill webhook -You should configure your Mandrill's Webhooks to point at the `webhooks` service. To do this go to `mandrillapp.com/` and click `Settings > Webhooks`. In the resulting page, click on `Add a Webhook`, select all events, and set the `URL` to `http://:1619/mandrill`, and click on `Create Webhook`. +You should configure your Mandrill's Webhooks to point at the `webhooks` service. To do this go to [mandrillapp.com](https://mandrillapp.com) and click `Settings > Webhooks`. In the resulting page, click on `Add a Webhook`, select all events, and set the `URL` to `http://:1619/mandrill`, and click on `Create Webhook`. ## Events diff --git a/plugins/inputs/webhooks/particle/README.md b/plugins/inputs/webhooks/particle/README.md index 4e3426da5..4dc83b347 100644 --- a/plugins/inputs/webhooks/particle/README.md +++ b/plugins/inputs/webhooks/particle/README.md @@ -1,7 +1,7 @@ # particle webhooks -You should configure your Particle.io's Webhooks to point at the `webhooks` service. To do this go to `(https://console.particle.io/)[https://console.particle.io]` and click `Integrations > New Integration > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and under `Advanced Settings` click on `JSON` and add: +You should configure your Particle.io's Webhooks to point at the `webhooks` service. To do this go to [https://console.particle.io](https://console.particle.io/) and click `Integrations > New Integration > Webhook`. In the resulting page set `URL` to `http://:1619/particle`, and under `Advanced Settings` click on `JSON` and add: ``` { @@ -31,7 +31,7 @@ String data = String::format("{ \"tags\" : { ``` Escaping the "" is required in the source file. -The number of tag values and field values is not restrictied so you can send as many values per webhook call as you'd like. +The number of tag values and field values is not restricted so you can send as many values per webhook call as you'd like. You will need to enable JSON messages in the Webhooks setup of Particle.io, and make sure to check the "include default data" box as well. diff --git a/plugins/inputs/webhooks/rollbar/README.md b/plugins/inputs/webhooks/rollbar/README.md index b3f1bfeaa..471dc9fd0 100644 --- a/plugins/inputs/webhooks/rollbar/README.md +++ b/plugins/inputs/webhooks/rollbar/README.md @@ -1,6 +1,6 @@ # rollbar webhooks -You should configure your Rollbar's Webhooks to point at the `webhooks` service. To do this go to `rollbar.com/` and click `Settings > Notifications > Webhook`. In the resulting page set `URL` to `http://:1619/rollbar`, and click on `Enable Webhook Integration`. +You should configure your Rollbar's Webhooks to point at the `webhooks` service. To do this go to [rollbar.com](https://rollbar.com/) and click `Settings > Notifications > Webhook`. In the resulting page set `URL` to `http://:1619/rollbar`, and click on `Enable Webhook Integration`. ## Events diff --git a/plugins/inputs/webhooks/webhooks.go b/plugins/inputs/webhooks/webhooks.go index fa31ec490..4baaf6ffb 100644 --- a/plugins/inputs/webhooks/webhooks.go +++ b/plugins/inputs/webhooks/webhooks.go @@ -67,7 +67,7 @@ func (wb *Webhooks) SampleConfig() string { [inputs.webhooks.particle] path = "/particle" - ` +` } func (wb *Webhooks) Description() string { diff --git a/plugins/inputs/win_perf_counters/README.md b/plugins/inputs/win_perf_counters/README.md index cf2ba4d64..11496baff 100644 --- a/plugins/inputs/win_perf_counters/README.md +++ b/plugins/inputs/win_perf_counters/README.md @@ -174,7 +174,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ### Generic Queries ``` - +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # Processor usage, alternative to native, reports on a per core. ObjectName = "Processor" @@ -218,6 +218,9 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ### Active Directory Domain Controller ``` +[[inputs.win_perf_counters]] + [inputs.win_perf_counters.tags] + monitorgroup = "ActiveDirectory" [[inputs.win_perf_counters.object]] ObjectName = "DirectoryServices" Instances = ["*"] @@ -243,6 +246,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ### DFS Namespace + Domain Controllers ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # AD, DFS N, Useful if the server hosts a DFS Namespace or is a Domain Controller ObjectName = "DFS Namespace Service Referrals" @@ -253,9 +257,9 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. #WarnOnMissing = false # Print out when the performance counter is missing, either of object, counter or instance. ``` - ### DFS Replication + Domain Controllers ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # AD, DFS R, Useful if the server hosts a DFS Replication folder or is a Domain Controller ObjectName = "DFS Replication Service Volumes" @@ -266,9 +270,9 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. #WarnOnMissing = false # Print out when the performance counter is missing, either of object, counter or instance. ``` - ### DNS Server + Domain Controllers ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] ObjectName = "DNS" Counters = ["Dynamic Update Received","Dynamic Update Rejected","Recursive Queries","Recursive Queries Failure","Secure Update Failure","Secure Update Received","TCP Query Received","TCP Response Sent","UDP Query Received","UDP Response Sent","Total Query Received","Total Response Sent"] @@ -279,6 +283,7 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. ### IIS / ASP.NET ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # HTTP Service request queues in the Kernel before being handed over to User Mode. ObjectName = "HTTP Service Request Queues" @@ -320,9 +325,9 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). ``` - ### Process ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # Process metrics, in this case for IIS only ObjectName = "Process" @@ -332,9 +337,9 @@ if any of the combinations of ObjectName/Instances/Counters are invalid. #IncludeTotal=false #Set to true to include _Total instance when querying for all (*). ``` - ### .NET Monitoring ``` +[[inputs.win_perf_counters]] [[inputs.win_perf_counters.object]] # .NET CLR Exceptions, in this case for IIS only ObjectName = ".NET CLR Exceptions" diff --git a/plugins/inputs/win_perf_counters/pdh.go b/plugins/inputs/win_perf_counters/pdh.go index 6a8dff10b..3a24761b9 100644 --- a/plugins/inputs/win_perf_counters/pdh.go +++ b/plugins/inputs/win_perf_counters/pdh.go @@ -214,7 +214,7 @@ func init() { // // To view all (internationalized...) counters on a system, there are three non-programmatic ways: perfmon utility, // the typeperf command, and the the registry editor. perfmon.exe is perhaps the easiest way, because it's basically a -// full implemention of the pdh.dll API, except with a GUI and all that. The registry setting also provides an +// full implementation of the pdh.dll API, except with a GUI and all that. The registry setting also provides an // interface to the available counters, and can be found at the following key: // // HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Perflib\CurrentLanguage diff --git a/plugins/inputs/win_perf_counters/performance_query.go b/plugins/inputs/win_perf_counters/performance_query.go index ce247a495..a59f96b84 100644 --- a/plugins/inputs/win_perf_counters/performance_query.go +++ b/plugins/inputs/win_perf_counters/performance_query.go @@ -74,7 +74,7 @@ func (m *PerformanceQueryImpl) Open() error { // Close closes the counterPath, releases associated counter handles and frees resources func (m *PerformanceQueryImpl) Close() error { if m.query == 0 { - return errors.New("uninitialised query") + return errors.New("uninitialized query") } if ret := PdhCloseQuery(m.query); ret != ERROR_SUCCESS { @@ -87,7 +87,7 @@ func (m *PerformanceQueryImpl) Close() error { func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { var counterHandle PDH_HCOUNTER if m.query == 0 { - return 0, errors.New("uninitialised query") + return 0, errors.New("uninitialized query") } if ret := PdhAddCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS { @@ -99,7 +99,7 @@ func (m *PerformanceQueryImpl) AddCounterToQuery(counterPath string) (PDH_HCOUNT func (m *PerformanceQueryImpl) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { var counterHandle PDH_HCOUNTER if m.query == 0 { - return 0, errors.New("uninitialised query") + return 0, errors.New("uninitialized query") } if ret := PdhAddEnglishCounter(m.query, counterPath, 0, &counterHandle); ret != ERROR_SUCCESS { return 0, NewPdhError(ret) @@ -184,7 +184,7 @@ func (m *PerformanceQueryImpl) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN func (m *PerformanceQueryImpl) CollectData() error { var ret uint32 if m.query == 0 { - return errors.New("uninitialised query") + return errors.New("uninitialized query") } if ret = PdhCollectQueryData(m.query); ret != ERROR_SUCCESS { @@ -195,7 +195,7 @@ func (m *PerformanceQueryImpl) CollectData() error { func (m *PerformanceQueryImpl) CollectDataWithTime() (time.Time, error) { if m.query == 0 { - return time.Now(), errors.New("uninitialised query") + return time.Now(), errors.New("uninitialized query") } ret, mtime := PdhCollectQueryDataWithTime(m.query) if ret != ERROR_SUCCESS { diff --git a/plugins/inputs/win_perf_counters/win_perf_counters.go b/plugins/inputs/win_perf_counters/win_perf_counters.go index d2ace5231..bd130a3fd 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters.go @@ -5,8 +5,6 @@ package win_perf_counters import ( "errors" "fmt" - "log" - "regexp" "strings" "time" @@ -37,9 +35,12 @@ var sampleConfig = ` ObjectName = "Processor" Instances = ["*"] Counters = [ - "%% Idle Time", "%% Interrupt Time", - "%% Privileged Time", "%% User Time", - "%% Processor Time" + "% Idle Time", + "% Interrupt Time", + "% Privileged Time", + "% User Time", + "% Processor Time", + "% DPC Time", ] Measurement = "win_cpu" # Set to true to include _Total instance when querying for all (*). @@ -52,14 +53,56 @@ var sampleConfig = ` ObjectName = "LogicalDisk" Instances = ["*"] Counters = [ - "%% Idle Time", "%% Disk Time","%% Disk Read Time", - "%% Disk Write Time", "%% User Time", "Current Disk Queue Length" + "% Idle Time", + "% Disk Time", + "% Disk Read Time", + "% Disk Write Time", + "% User Time", + "% Free Space", + "Current Disk Queue Length", + "Free Megabytes", ] Measurement = "win_disk" + [[inputs.win_perf_counters.object]] + ObjectName = "PhysicalDisk" + Instances = ["*"] + Counters = [ + "Disk Read Bytes/sec", + "Disk Write Bytes/sec", + "Current Disk Queue Length", + "Disk Reads/sec", + "Disk Writes/sec", + "% Disk Time", + "% Disk Read Time", + "% Disk Write Time", + ] + Measurement = "win_diskio" + + [[inputs.win_perf_counters.object]] + ObjectName = "Network Interface" + Instances = ["*"] + Counters = [ + "Bytes Received/sec", + "Bytes Sent/sec", + "Packets Received/sec", + "Packets Sent/sec", + "Packets Received Discarded", + "Packets Outbound Discarded", + "Packets Received Errors", + "Packets Outbound Errors", + ] + Measurement = "win_net" + + [[inputs.win_perf_counters.object]] ObjectName = "System" - Counters = ["Context Switches/sec","System Calls/sec"] + Counters = [ + "Context Switches/sec", + "System Calls/sec", + "Processor Queue Length", + "System Up Time", + ] Instances = ["------"] Measurement = "win_system" @@ -68,12 +111,30 @@ var sampleConfig = ` # such as from the Memory object. ObjectName = "Memory" Counters = [ - "Available Bytes", "Cache Faults/sec", "Demand Zero Faults/sec", - "Page Faults/sec", "Pages/sec", "Transition Faults/sec", - "Pool Nonpaged Bytes", "Pool Paged Bytes" + "Available Bytes", + "Cache Faults/sec", + "Demand Zero Faults/sec", + "Page Faults/sec", + "Pages/sec", + "Transition Faults/sec", + "Pool Nonpaged Bytes", + "Pool Paged Bytes", + "Standby Cache Reserve Bytes", + "Standby Cache Normal Priority Bytes", + "Standby Cache Core Bytes", ] Instances = ["------"] # Use 6 x - to remove the Instance bit from the counterPath. Measurement = "win_mem" + + [[inputs.win_perf_counters.object]] + # Example query where the Instance portion must be removed to get data back, + # such as from the Paging File object. + ObjectName = "Paging File" + Counters = [ + "% Usage", + ] + Instances = ["_Total"] + Measurement = "win_swap" ` type Win_PerfCounters struct { @@ -85,6 +146,8 @@ type Win_PerfCounters struct { CountersRefreshInterval internal.Duration UseWildcardsExpansion bool + Log telegraf.Logger + lastRefreshed time.Time counters []*counter query PerformanceQuery @@ -119,28 +182,57 @@ type instanceGrouping struct { var sanitizedChars = strings.NewReplacer("/sec", "_persec", "/Sec", "_persec", " ", "_", "%", "Percent", `\`, "") -//General Counter path pattern is: \\computer\object(parent/instance#index)\counter -//parent/instance#index part is skipped in single instance objects (e.g. Memory): \\computer\object\counter +// extractCounterInfoFromCounterPath gets object name, instance name (if available) and counter name from counter path +// General Counter path pattern is: \\computer\object(parent/instance#index)\counter +// parent/instance#index part is skipped in single instance objects (e.g. Memory): \\computer\object\counter +func extractCounterInfoFromCounterPath(counterPath string) (object string, instance string, counter string, err error) { -var counterPathRE = regexp.MustCompile(`.*\\(.*)\\(.*)`) -var objectInstanceRE = regexp.MustCompile(`(.*)\((.*)\)`) + rightObjectBorderIndex := -1 + leftObjectBorderIndex := -1 + leftCounterBorderIndex := -1 + rightInstanceBorderIndex := -1 + leftInstanceBorderIndex := -1 + bracketLevel := 0 -//extractObjectInstanceCounterFromQuery gets object name, instance name (if available) and counter name from counter path -func extractObjectInstanceCounterFromQuery(query string) (object string, instance string, counter string, err error) { - pathParts := counterPathRE.FindAllStringSubmatch(query, -1) - if pathParts == nil || len(pathParts[0]) != 3 { - err = errors.New("Could not extract counter info from: " + query) + for i := len(counterPath) - 1; i >= 0; i-- { + switch counterPath[i] { + case '\\': + if bracketLevel == 0 { + if leftCounterBorderIndex == -1 { + leftCounterBorderIndex = i + } else if leftObjectBorderIndex == -1 { + leftObjectBorderIndex = i + } + } + case '(': + bracketLevel-- + if leftInstanceBorderIndex == -1 && bracketLevel == 0 && leftObjectBorderIndex == -1 && leftCounterBorderIndex > -1 { + leftInstanceBorderIndex = i + rightObjectBorderIndex = i + } + case ')': + if rightInstanceBorderIndex == -1 && bracketLevel == 0 && leftCounterBorderIndex > -1 { + rightInstanceBorderIndex = i + } + bracketLevel++ + } + } + if rightObjectBorderIndex == -1 { + rightObjectBorderIndex = leftCounterBorderIndex + } + if rightObjectBorderIndex == -1 || leftObjectBorderIndex == -1 { + err = errors.New("cannot parse object from: " + counterPath) return } - counter = pathParts[0][2] - //try to get instance name - objectInstanceParts := objectInstanceRE.FindAllStringSubmatch(pathParts[0][1], -1) - if objectInstanceParts == nil || len(objectInstanceParts[0]) != 3 { - object = pathParts[0][1] - } else { - object = objectInstanceParts[0][1] - instance = objectInstanceParts[0][2] + + if leftInstanceBorderIndex > -1 && rightInstanceBorderIndex > -1 { + instance = counterPath[leftInstanceBorderIndex+1 : rightInstanceBorderIndex] + } else if (leftInstanceBorderIndex == -1 && rightInstanceBorderIndex > -1) || (leftInstanceBorderIndex > -1 && rightInstanceBorderIndex == -1) { + err = errors.New("cannot parse instance from: " + counterPath) + return } + object = counterPath[leftObjectBorderIndex+1 : rightObjectBorderIndex] + counter = counterPath[leftCounterBorderIndex+1:] return } @@ -184,7 +276,7 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan var err error counterHandle, err := m.query.AddCounterToQuery(counterPath) - objectName, instance, counterName, err = extractObjectInstanceCounterFromQuery(counterPath) + objectName, instance, counterName, err = extractCounterInfoFromCounterPath(counterPath) if err != nil { return err } @@ -198,7 +290,7 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan m.counters = append(m.counters, newItem) if m.PrintValid { - log.Printf("Valid: %s\n", counterPath) + m.Log.Infof("Valid: %s", counterPath) } } } else { @@ -206,7 +298,7 @@ func (m *Win_PerfCounters) AddItem(counterPath string, objectName string, instan includeTotal, counterHandle} m.counters = append(m.counters, newItem) if m.PrintValid { - log.Printf("Valid: %s\n", counterPath) + m.Log.Infof("Valid: %s", counterPath) } } @@ -232,7 +324,7 @@ func (m *Win_PerfCounters) ParseConfig() error { if err != nil { if PerfObject.FailOnMissing || PerfObject.WarnOnMissing { - log.Printf("Invalid counterPath: '%s'. Error: %s\n", counterPath, err.Error()) + m.Log.Errorf("Invalid counterPath: '%s'. Error: %s\n", counterPath, err.Error()) } if PerfObject.FailOnMissing { return err @@ -366,7 +458,8 @@ func addCounterMeasurement(metric *counter, instanceName string, value float64, func isKnownCounterDataError(err error) bool { if pdhErr, ok := err.(*PdhError); ok && (pdhErr.ErrorCode == PDH_INVALID_DATA || pdhErr.ErrorCode == PDH_CALC_NEGATIVE_VALUE || - pdhErr.ErrorCode == PDH_CSTATUS_INVALID_DATA) { + pdhErr.ErrorCode == PDH_CSTATUS_INVALID_DATA || + pdhErr.ErrorCode == PDH_NO_DATA) { return true } return false diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go index 546dfa143..822943949 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_integration_test.go @@ -27,15 +27,15 @@ func TestWinPerformanceQueryImpl(t *testing.T) { _, err = query.AddCounterToQuery("") require.Error(t, err, "uninitialized query must return errors") - assert.True(t, strings.Contains(err.Error(), "uninitialised")) + assert.True(t, strings.Contains(err.Error(), "uninitialized")) _, err = query.AddEnglishCounterToQuery("") require.Error(t, err, "uninitialized query must return errors") - assert.True(t, strings.Contains(err.Error(), "uninitialised")) + assert.True(t, strings.Contains(err.Error(), "uninitialized")) err = query.CollectData() require.Error(t, err, "uninitialized query must return errors") - assert.True(t, strings.Contains(err.Error(), "uninitialised")) + assert.True(t, strings.Contains(err.Error(), "uninitialized")) err = query.Open() require.NoError(t, err) diff --git a/plugins/inputs/win_perf_counters/win_perf_counters_test.go b/plugins/inputs/win_perf_counters/win_perf_counters_test.go index 07e1941a9..a11f0ace8 100644 --- a/plugins/inputs/win_perf_counters/win_perf_counters_test.go +++ b/plugins/inputs/win_perf_counters/win_perf_counters_test.go @@ -5,18 +5,20 @@ package win_perf_counters import ( "errors" "fmt" + "testing" + "time" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" - "time" ) type testCounter struct { handle PDH_HCOUNTER path string value float64 + status uint32 // allows for tests against specific pdh_error codes, rather than assuming all cases of "value == 0" to indicate error conditions } type FakePerformanceQuery struct { counters map[string]testCounter @@ -28,7 +30,7 @@ type FakePerformanceQuery struct { var MetricTime = time.Date(2018, 5, 28, 12, 0, 0, 0, time.UTC) func (m *testCounter) ToCounterValue() *CounterValue { - _, inst, _, _ := extractObjectInstanceCounterFromQuery(m.path) + _, inst, _, _ := extractCounterInfoFromCounterPath(m.path) if inst == "" { inst = "--" } @@ -48,7 +50,7 @@ func (m *FakePerformanceQuery) Open() error { func (m *FakePerformanceQuery) Close() error { if !m.openCalled { - return errors.New("CloSe: uninitialised query") + return errors.New("CloSe: uninitialized query") } m.openCalled = false return nil @@ -56,7 +58,7 @@ func (m *FakePerformanceQuery) Close() error { func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { if !m.openCalled { - return 0, errors.New("AddCounterToQuery: uninitialised query") + return 0, errors.New("AddCounterToQuery: uninitialized query") } if c, ok := m.counters[counterPath]; ok { return c.handle, nil @@ -67,7 +69,7 @@ func (m *FakePerformanceQuery) AddCounterToQuery(counterPath string) (PDH_HCOUNT func (m *FakePerformanceQuery) AddEnglishCounterToQuery(counterPath string) (PDH_HCOUNTER, error) { if !m.openCalled { - return 0, errors.New("AddEnglishCounterToQuery: uninitialised query") + return 0, errors.New("AddEnglishCounterToQuery: uninitialized query") } if c, ok := m.counters[counterPath]; ok { return c.handle, nil @@ -95,21 +97,14 @@ func (m *FakePerformanceQuery) ExpandWildCardPath(counterPath string) ([]string, func (m *FakePerformanceQuery) GetFormattedCounterValueDouble(counterHandle PDH_HCOUNTER) (float64, error) { if !m.openCalled { - return 0, errors.New("GetFormattedCounterValueDouble: uninitialised query") + return 0, errors.New("GetFormattedCounterValueDouble: uninitialized query") } for _, counter := range m.counters { if counter.handle == counterHandle { - if counter.value > 0 { - return counter.value, nil - } else { - if counter.value == 0 { - return 0, NewPdhError(PDH_INVALID_DATA) - } else if counter.value == -1 { - return 0, NewPdhError(PDH_CALC_NEGATIVE_VALUE) - } else { - return 0, NewPdhError(PDH_ACCESS_DENIED) - } + if counter.status > 0 { + return 0, NewPdhError(counter.status) } + return counter.value, nil } } return 0, fmt.Errorf("GetFormattedCounterValueDouble: invalid handle: %d", counterHandle) @@ -134,7 +129,7 @@ func (m *FakePerformanceQuery) findCounterByHandle(counterHandle PDH_HCOUNTER) * func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUNTER) ([]CounterValue, error) { if !m.openCalled { - return nil, errors.New("GetFormattedCounterArrayDouble: uninitialised query") + return nil, errors.New("GetFormattedCounterArrayDouble: uninitialized query") } for _, c := range m.counters { if c.handle == hCounter { @@ -143,17 +138,10 @@ func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN for _, p := range e { counter := m.findCounterByPath(p) if counter != nil { - if counter.value > 0 { - counters = append(counters, *counter.ToCounterValue()) - } else { - if counter.value == 0 { - return nil, NewPdhError(PDH_INVALID_DATA) - } else if counter.value == -1 { - return nil, NewPdhError(PDH_CALC_NEGATIVE_VALUE) - } else { - return nil, NewPdhError(PDH_ACCESS_DENIED) - } + if counter.status > 0 { + return nil, NewPdhError(counter.status) } + counters = append(counters, *counter.ToCounterValue()) } else { return nil, fmt.Errorf("GetFormattedCounterArrayDouble: invalid counter : %s", p) } @@ -169,14 +157,14 @@ func (m *FakePerformanceQuery) GetFormattedCounterArrayDouble(hCounter PDH_HCOUN func (m *FakePerformanceQuery) CollectData() error { if !m.openCalled { - return errors.New("CollectData: uninitialised query") + return errors.New("CollectData: uninitialized query") } return nil } func (m *FakePerformanceQuery) CollectDataWithTime() (time.Time, error) { if !m.openCalled { - return time.Now(), errors.New("CollectData: uninitialised query") + return time.Now(), errors.New("CollectData: uninitialized query") } return MetricTime, nil } @@ -199,28 +187,77 @@ func createPerfObject(measurement string, object string, instances []string, cou return perfobjects } -func createCounterMap(counterPaths []string, values []float64) map[string]testCounter { +func createCounterMap(counterPaths []string, values []float64, status []uint32) map[string]testCounter { counters := make(map[string]testCounter) for i, cp := range counterPaths { counters[cp] = testCounter{ PDH_HCOUNTER(i), cp, values[i], + status[i], } } return counters } +var counterPathsAndRes = map[string][]string{ + "\\O\\CT": {"O", "", "CT"}, + "\\O\\CT(i)": {"O", "", "CT(i)"}, + "\\O\\CT(d:\\f\\i)": {"O", "", "CT(d:\\f\\i)"}, + "\\\\CM\\O\\CT": {"O", "", "CT"}, + "\\O(I)\\CT": {"O", "I", "CT"}, + "\\O(I)\\CT(i)": {"O", "I", "CT(i)"}, + "\\O(I)\\CT(i)x": {"O", "I", "CT(i)x"}, + "\\O(I)\\CT(d:\\f\\i)": {"O", "I", "CT(d:\\f\\i)"}, + "\\\\CM\\O(I)\\CT": {"O", "I", "CT"}, + "\\O(d:\\f\\I)\\CT": {"O", "d:\\f\\I", "CT"}, + "\\O(d:\\f\\I(d))\\CT": {"O", "d:\\f\\I(d)", "CT"}, + "\\O(d:\\f\\I(d)x)\\CT": {"O", "d:\\f\\I(d)x", "CT"}, + "\\O(d:\\f\\I)\\CT(i)": {"O", "d:\\f\\I", "CT(i)"}, + "\\O(d:\\f\\I)\\CT(d:\\f\\i)": {"O", "d:\\f\\I", "CT(d:\\f\\i)"}, + "\\\\CM\\O(d:\\f\\I)\\CT": {"O", "d:\\f\\I", "CT"}, + "\\\\CM\\O(d:\\f\\I)\\CT(d:\\f\\i)": {"O", "d:\\f\\I", "CT(d:\\f\\i)"}, + "\\O(I(info))\\CT": {"O", "I(info)", "CT"}, + "\\\\CM\\O(I(info))\\CT": {"O", "I(info)", "CT"}, +} + +var invalidCounterPaths = []string{ + "\\O(I\\C", + "\\OI)\\C", + "\\O(I\\C", + "\\O/C", + "\\O(I/C", + "\\O(I/C)", + "\\O(I\\)C", + "\\O(I\\C)", +} + +func TestCounterPathParsing(t *testing.T) { + for path, vals := range counterPathsAndRes { + o, i, c, err := extractCounterInfoFromCounterPath(path) + require.NoError(t, err) + require.True(t, assert.ObjectsAreEqual(vals, []string{o, i, c}), "arrays: %#v and %#v are not equal", vals, []string{o, i, c}) + } + for _, path := range invalidCounterPaths { + _, _, _, err := extractCounterInfoFromCounterPath(path) + require.Error(t, err) + } +} + func TestAddItemSimple(t *testing.T) { var err error cps1 := []string{"\\O(I)\\C"} - m := Win_PerfCounters{PrintValid: false, Object: nil, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1}), - expandPaths: map[string][]string{ - cps1[0]: cps1, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: nil, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}), + expandPaths: map[string][]string{ + cps1[0]: cps1, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.AddItem(cps1[0], "O", "I", "c", "test", false) @@ -232,13 +269,18 @@ func TestAddItemSimple(t *testing.T) { func TestAddItemInvalidCountPath(t *testing.T) { var err error cps1 := []string{"\\O\\C"} - m := Win_PerfCounters{PrintValid: false, Object: nil, UseWildcardsExpansion: true, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1}), - expandPaths: map[string][]string{ - cps1[0]: {"\\O/C"}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: nil, + UseWildcardsExpansion: true, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1}, []uint32{0}), + expandPaths: map[string][]string{ + cps1[0]: {"\\O/C"}, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.AddItem("\\O\\C", "O", "------", "C", "test", false) @@ -251,16 +293,20 @@ func TestParseConfigBasic(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3, 1.4}), - expandPaths: map[string][]string{ - cps1[0]: {cps1[0]}, - cps1[1]: {cps1[1]}, - cps1[2]: {cps1[2]}, - cps1[3]: {cps1[3]}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0}), + expandPaths: map[string][]string{ + cps1[0]: {cps1[0]}, + cps1[1]: {cps1[1]}, + cps1[2]: {cps1[2]}, + cps1[3]: {cps1[3]}, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -285,14 +331,19 @@ func TestParseConfigNoInstance(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"------"}, []string{"C1", "C2"}, false, false) cps1 := []string{"\\O\\C1", "\\O\\C2"} - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: false, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2}), - expandPaths: map[string][]string{ - cps1[0]: {cps1[0]}, - cps1[1]: {cps1[1]}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + UseWildcardsExpansion: false, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1, 1.2}, []uint32{0, 0}), + expandPaths: map[string][]string{ + cps1[0]: {cps1[0]}, + cps1[1]: {cps1[1]}, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -317,15 +368,19 @@ func TestParseConfigInvalidCounterError(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, true, false) cps1 := []string{"\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}), - expandPaths: map[string][]string{ - cps1[0]: {cps1[0]}, - cps1[1]: {cps1[1]}, - cps1[2]: {cps1[2]}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}), + expandPaths: map[string][]string{ + cps1[0]: {cps1[0]}, + cps1[1]: {cps1[1]}, + cps1[2]: {cps1[2]}, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -348,15 +403,19 @@ func TestParseConfigInvalidCounterNoError(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"I1", "I2"}, []string{"C1", "C2"}, false, false) cps1 := []string{"\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}), - expandPaths: map[string][]string{ - cps1[0]: {cps1[0]}, - cps1[1]: {cps1[1]}, - cps1[2]: {cps1[2]}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.1, 1.2, 1.3}, []uint32{0, 0, 0}), + expandPaths: map[string][]string{ + cps1[0]: {cps1[0]}, + cps1[1]: {cps1[1]}, + cps1[2]: {cps1[2]}, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -380,13 +439,18 @@ func TestParseConfigTotalExpansion(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, true, true) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"} - m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}), - expandPaths: map[string][]string{ - "\\O(*)\\*": cps1, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UseWildcardsExpansion: true, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), + expandPaths: map[string][]string{ + "\\O(*)\\*": cps1, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -397,13 +461,18 @@ func TestParseConfigTotalExpansion(t *testing.T) { perfObjects[0].IncludeTotal = false - m = Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}), - expandPaths: map[string][]string{ - "\\O(*)\\*": cps1, - }, - vistaAndNewer: true, - }} + m = Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UseWildcardsExpansion: true, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), + expandPaths: map[string][]string{ + "\\O(*)\\*": cps1, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -417,13 +486,18 @@ func TestParseConfigExpand(t *testing.T) { var err error perfObjects := createPerfObject("m", "O", []string{"*"}, []string{"*"}, false, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} - m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}), - expandPaths: map[string][]string{ - "\\O(*)\\*": cps1, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UseWildcardsExpansion: true, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), + expandPaths: map[string][]string{ + "\\O(*)\\*": cps1, + }, + vistaAndNewer: true, + }} err = m.query.Open() require.NoError(t, err) err = m.ParseConfig() @@ -441,13 +515,17 @@ func TestSimpleGather(t *testing.T) { measurement := "test" perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) cp1 := "\\O(I)\\C" - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap([]string{cp1}, []float64{1.2}), - expandPaths: map[string][]string{ - cp1: {cp1}, - }, - vistaAndNewer: false, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}), + expandPaths: map[string][]string{ + cp1: {cp1}, + }, + vistaAndNewer: false, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) require.NoError(t, err) @@ -472,6 +550,52 @@ func TestSimpleGather(t *testing.T) { acc1.AssertContainsTaggedFields(t, measurement, fields1, tags1) } +func TestSimpleGatherNoData(t *testing.T) { + var err error + if testing.Short() { + t.Skip("Skipping long taking test in short mode") + } + measurement := "test" + perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) + cp1 := "\\O(I)\\C" + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{PDH_NO_DATA}), + expandPaths: map[string][]string{ + cp1: {cp1}, + }, + vistaAndNewer: false, + }} + var acc1 testutil.Accumulator + err = m.Gather(&acc1) + // this "PDH_NO_DATA" error should not be returned to caller, but checked, and handled + require.NoError(t, err) + + // fields would contain if the error was ignored, and we simply added garbage + fields1 := map[string]interface{}{ + "C": float32(1.2), + } + // tags would contain if the error was ignored, and we simply added garbage + tags1 := map[string]string{ + "instance": "I", + "objectname": "O", + } + acc1.AssertDoesNotContainsTaggedFields(t, measurement, fields1, tags1) + + m.UseWildcardsExpansion = true + m.counters = nil + m.lastRefreshed = time.Time{} + + var acc2 testutil.Accumulator + + err = m.Gather(&acc2) + require.NoError(t, err) + acc1.AssertDoesNotContainsTaggedFields(t, measurement, fields1, tags1) +} + func TestSimpleGatherWithTimestamp(t *testing.T) { var err error if testing.Short() { @@ -480,13 +604,18 @@ func TestSimpleGatherWithTimestamp(t *testing.T) { measurement := "test" perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) cp1 := "\\O(I)\\C" - m := Win_PerfCounters{PrintValid: false, UsePerfCounterTime: true, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap([]string{cp1}, []float64{1.2}), - expandPaths: map[string][]string{ - cp1: {cp1}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UsePerfCounterTime: true, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap([]string{cp1}, []float64{1.2}, []uint32{0}), + expandPaths: map[string][]string{ + cp1: {cp1}, + }, + vistaAndNewer: true, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) require.NoError(t, err) @@ -504,22 +633,28 @@ func TestSimpleGatherWithTimestamp(t *testing.T) { func TestGatherError(t *testing.T) { var err error + expected_error := "error while getting value for counter \\O(I)\\C: The information passed is not valid.\r\n" if testing.Short() { t.Skip("Skipping long taking test in short mode") } measurement := "test" perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C"}, false, false) cp1 := "\\O(I)\\C" - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap([]string{cp1}, []float64{-2}), - expandPaths: map[string][]string{ - cp1: {cp1}, - }, - vistaAndNewer: false, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap([]string{cp1}, []float64{-2}, []uint32{PDH_PLA_VALIDATION_WARNING}), + expandPaths: map[string][]string{ + cp1: {cp1}, + }, + vistaAndNewer: false, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) require.Error(t, err) + require.Equal(t, expected_error, err.Error()) m.UseWildcardsExpansion = true m.counters = nil @@ -529,6 +664,7 @@ func TestGatherError(t *testing.T) { err = m.Gather(&acc2) require.Error(t, err) + require.Equal(t, expected_error, err.Error()) } func TestGatherInvalidDataIgnore(t *testing.T) { @@ -539,21 +675,26 @@ func TestGatherInvalidDataIgnore(t *testing.T) { measurement := "test" perfObjects := createPerfObject(measurement, "O", []string{"I"}, []string{"C1", "C2", "C3"}, false, false) cps1 := []string{"\\O(I)\\C1", "\\O(I)\\C2", "\\O(I)\\C3"} - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(cps1, []float64{1.2, -1, 0}), - expandPaths: map[string][]string{ - cps1[0]: {cps1[0]}, - cps1[1]: {cps1[1]}, - cps1[2]: {cps1[2]}, - }, - vistaAndNewer: false, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(cps1, []float64{1.2, 1, 0}, []uint32{0, PDH_INVALID_DATA, 0}), + expandPaths: map[string][]string{ + cps1[0]: {cps1[0]}, + cps1[1]: {cps1[1]}, + cps1[2]: {cps1[2]}, + }, + vistaAndNewer: false, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) require.NoError(t, err) fields1 := map[string]interface{}{ "C1": float32(1.2), + "C3": float32(0), } tags1 := map[string]string{ "instance": "I", @@ -581,13 +722,20 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"*"}, true, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} fpm := &FakePerformanceQuery{ - counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}), + counters: createCounterMap(append(cps1, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 0}, []uint32{0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\*": cps1, }, vistaAndNewer: true, } - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: true, query: fpm, CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + UseWildcardsExpansion: true, + query: fpm, + CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}, + } var acc1 testutil.Accumulator err = m.Gather(&acc1) assert.Len(t, m.counters, 4) @@ -615,7 +763,7 @@ func TestGatherRefreshingWithExpansion(t *testing.T) { acc1.AssertContainsTaggedFields(t, measurement, fields2, tags2) cps2 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I3)\\C1", "\\O(I3)\\C2"} fpm = &FakePerformanceQuery{ - counters: createCounterMap(append(cps2, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 0}), + counters: createCounterMap(append(cps2, "\\O(*)\\*"), []float64{1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 0}, []uint32{0, 0, 0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\*": cps2, }, @@ -666,14 +814,20 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, false) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2"} fpm := &FakePerformanceQuery{ - counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}), + counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\C1": {cps1[0], cps1[2]}, "\\O(*)\\C2": {cps1[1], cps1[3]}, }, vistaAndNewer: true, } - m := Win_PerfCounters{PrintValid: false, Object: perfObjects, UseWildcardsExpansion: false, query: fpm, CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + Object: perfObjects, + UseWildcardsExpansion: false, + query: fpm, + CountersRefreshInterval: internal.Duration{Duration: time.Second * 10}} var acc1 testutil.Accumulator err = m.Gather(&acc1) assert.Len(t, m.counters, 2) @@ -702,7 +856,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { //test finding new instance cps2 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I3)\\C1", "\\O(I3)\\C2"} fpm = &FakePerformanceQuery{ - counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps2...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}), + counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps2...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}, []uint32{0, 0, 0, 0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\C1": {cps2[0], cps2[2], cps2[4]}, "\\O(*)\\C2": {cps2[1], cps2[3], cps2[5]}, @@ -735,7 +889,7 @@ func TestGatherRefreshingWithoutExpansion(t *testing.T) { perfObjects = createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2", "C3"}, true, false) cps3 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(I1)\\C3", "\\O(I2)\\C1", "\\O(I2)\\C2", "\\O(I2)\\C3"} fpm = &FakePerformanceQuery{ - counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2", "\\O(*)\\C3"}, cps3...), []float64{0, 0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}), + counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2", "\\O(*)\\C3"}, cps3...), []float64{0, 0, 0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6}, []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0}), expandPaths: map[string][]string{ "\\O(*)\\C1": {cps3[0], cps3[3]}, "\\O(*)\\C2": {cps3[1], cps3[4]}, @@ -783,14 +937,19 @@ func TestGatherTotalNoExpansion(t *testing.T) { measurement := "m" perfObjects := createPerfObject(measurement, "O", []string{"*"}, []string{"C1", "C2"}, true, true) cps1 := []string{"\\O(I1)\\C1", "\\O(I1)\\C2", "\\O(_Total)\\C1", "\\O(_Total)\\C2"} - m := Win_PerfCounters{PrintValid: false, UseWildcardsExpansion: false, Object: perfObjects, query: &FakePerformanceQuery{ - counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}), - expandPaths: map[string][]string{ - "\\O(*)\\C1": {cps1[0], cps1[2]}, - "\\O(*)\\C2": {cps1[1], cps1[3]}, - }, - vistaAndNewer: true, - }} + m := Win_PerfCounters{ + Log: testutil.Logger{}, + PrintValid: false, + UseWildcardsExpansion: false, + Object: perfObjects, + query: &FakePerformanceQuery{ + counters: createCounterMap(append([]string{"\\O(*)\\C1", "\\O(*)\\C2"}, cps1...), []float64{0, 0, 1.1, 1.2, 1.3, 1.4}, []uint32{0, 0, 0, 0, 0, 0}), + expandPaths: map[string][]string{ + "\\O(*)\\C1": {cps1[0], cps1[2]}, + "\\O(*)\\C2": {cps1[1], cps1[3]}, + }, + vistaAndNewer: true, + }} var acc1 testutil.Accumulator err = m.Gather(&acc1) require.NoError(t, err) diff --git a/plugins/inputs/win_services/README.md b/plugins/inputs/win_services/README.md index 4aa9e6b86..eef641718 100644 --- a/plugins/inputs/win_services/README.md +++ b/plugins/inputs/win_services/README.md @@ -1,7 +1,9 @@ -# Telegraf Plugin: win_services -Input plugin to report Windows services info. +# Windows Services Input Plugin + +Reports information about Windows service status. + +Monitoring some services may require running Telegraf with administrator privileges. -It requires that Telegraf must be running under the administrator privileges. ### Configuration: ```toml @@ -25,7 +27,7 @@ The `state` field can have the following values: - 3 - stop pending - 4 - running - 5 - continue pending -- 6 - pause pending +- 6 - pause pending - 7 - paused The `startup_mode` field can have the following values: @@ -33,7 +35,7 @@ The `startup_mode` field can have the following values: - 1 - system start - 2 - auto start - 3 - demand start -- 4 - disabled +- 4 - disabled ### Tags: @@ -43,14 +45,13 @@ The `startup_mode` field can have the following values: ### Example Output: ``` -* Plugin: inputs.win_services, Collection 1 -> win_services,host=WIN2008R2H401,display_name=Server,service_name=LanmanServer state=4i,startup_mode=2i 1500040669000000000 -> win_services,display_name=Remote\ Desktop\ Services,service_name=TermService,host=WIN2008R2H401 state=1i,startup_mode=3i 1500040669000000000 +win_services,host=WIN2008R2H401,display_name=Server,service_name=LanmanServer state=4i,startup_mode=2i 1500040669000000000 +win_services,display_name=Remote\ Desktop\ Services,service_name=TermService,host=WIN2008R2H401 state=1i,startup_mode=3i 1500040669000000000 ``` ### TICK Scripts A sample TICK script for a notification about a not running service. -It sends a notification whenever any service changes its state to be not _running_ and when it changes that state back to _running_. +It sends a notification whenever any service changes its state to be not _running_ and when it changes that state back to _running_. The notification is sent via an HTTP POST call. ``` diff --git a/plugins/inputs/win_services/win_services.go b/plugins/inputs/win_services/win_services.go index 8e56a96d0..6ac1bde68 100644 --- a/plugins/inputs/win_services/win_services.go +++ b/plugins/inputs/win_services/win_services.go @@ -4,32 +4,51 @@ package win_services import ( "fmt" + "os" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/inputs" "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" ) -//WinService provides interface for svc.Service +type ServiceErr struct { + Message string + Service string + Err error +} + +func (e *ServiceErr) Error() string { + return fmt.Sprintf("%s: '%s': %v", e.Message, e.Service, e.Err) +} + +func IsPermission(err error) bool { + if err, ok := err.(*ServiceErr); ok { + return os.IsPermission(err.Err) + } + return false +} + +// WinService provides interface for svc.Service type WinService interface { Close() error Config() (mgr.Config, error) Query() (svc.Status, error) } -//WinServiceManagerProvider sets interface for acquiring manager instance, like mgr.Mgr -type WinServiceManagerProvider interface { +// ManagerProvider sets interface for acquiring manager instance, like mgr.Mgr +type ManagerProvider interface { Connect() (WinServiceManager, error) } -//WinServiceManager provides interface for mgr.Mgr +// WinServiceManager provides interface for mgr.Mgr type WinServiceManager interface { Disconnect() error OpenService(name string) (WinService, error) ListServices() ([]string, error) } -//WinSvcMgr is wrapper for mgr.Mgr implementing WinServiceManager interface +// WinSvcMgr is wrapper for mgr.Mgr implementing WinServiceManager interface type WinSvcMgr struct { realMgr *mgr.Mgr } @@ -45,7 +64,7 @@ func (m *WinSvcMgr) ListServices() ([]string, error) { return m.realMgr.ListServices() } -//MgProvider is an implementation of WinServiceManagerProvider interface returning WinSvcMgr +// MgProvider is an implementation of WinServiceManagerProvider interface returning WinSvcMgr type MgProvider struct { } @@ -70,8 +89,10 @@ var description = "Input plugin to report Windows services info." //WinServices is an implementation if telegraf.Input interface, providing info about Windows Services type WinServices struct { + Log telegraf.Logger + ServiceNames []string `toml:"service_names"` - mgrProvider WinServiceManagerProvider + mgrProvider ManagerProvider } type ServiceInfo struct { @@ -79,7 +100,6 @@ type ServiceInfo struct { DisplayName string State int StartUpMode int - Error error } func (m *WinServices) Description() string { @@ -91,93 +111,102 @@ func (m *WinServices) SampleConfig() string { } func (m *WinServices) Gather(acc telegraf.Accumulator) error { + scmgr, err := m.mgrProvider.Connect() + if err != nil { + return fmt.Errorf("Could not open service manager: %s", err) + } + defer scmgr.Disconnect() - serviceInfos, err := listServices(m.mgrProvider, m.ServiceNames) - + serviceNames, err := listServices(scmgr, m.ServiceNames) if err != nil { return err } - for _, service := range serviceInfos { - if service.Error == nil { - fields := make(map[string]interface{}) - tags := make(map[string]string) - - //display name could be empty, but still valid service - if len(service.DisplayName) > 0 { - tags["display_name"] = service.DisplayName + for _, srvName := range serviceNames { + service, err := collectServiceInfo(scmgr, srvName) + if err != nil { + if IsPermission(err) { + m.Log.Debug(err.Error()) + } else { + m.Log.Error(err.Error()) } - tags["service_name"] = service.ServiceName - - fields["state"] = service.State - fields["startup_mode"] = service.StartUpMode - - acc.AddFields("win_services", fields, tags) - } else { - acc.AddError(service.Error) + continue } + + tags := map[string]string{ + "service_name": service.ServiceName, + } + //display name could be empty, but still valid service + if len(service.DisplayName) > 0 { + tags["display_name"] = service.DisplayName + } + + fields := map[string]interface{}{ + "state": service.State, + "startup_mode": service.StartUpMode, + } + acc.AddFields("win_services", fields, tags) } return nil } -//listServices gathers info about given services. If userServices is empty, it return info about all services on current Windows host. Any a critical error is returned. -func listServices(mgrProv WinServiceManagerProvider, userServices []string) ([]ServiceInfo, error) { - scmgr, err := mgrProv.Connect() +// listServices returns a list of services to gather. +func listServices(scmgr WinServiceManager, userServices []string) ([]string, error) { + if len(userServices) != 0 { + return userServices, nil + } + + names, err := scmgr.ListServices() if err != nil { - return nil, fmt.Errorf("Could not open service manager: %s", err) + return nil, fmt.Errorf("Could not list services: %s", err) } - defer scmgr.Disconnect() - - var serviceNames []string - if len(userServices) == 0 { - //Listing service names from system - serviceNames, err = scmgr.ListServices() - if err != nil { - return nil, fmt.Errorf("Could not list services: %s", err) - } - } else { - serviceNames = userServices - } - serviceInfos := make([]ServiceInfo, len(serviceNames)) - - for i, srvName := range serviceNames { - serviceInfos[i] = collectServiceInfo(scmgr, srvName) - } - - return serviceInfos, nil + return names, nil } -//collectServiceInfo gathers info about a service from WindowsAPI -func collectServiceInfo(scmgr WinServiceManager, serviceName string) (serviceInfo ServiceInfo) { - - serviceInfo.ServiceName = serviceName +// collectServiceInfo gathers info about a service. +func collectServiceInfo(scmgr WinServiceManager, serviceName string) (*ServiceInfo, error) { srv, err := scmgr.OpenService(serviceName) if err != nil { - serviceInfo.Error = fmt.Errorf("Could not open service '%s': %s", serviceName, err) - return + return nil, &ServiceErr{ + Message: "could not open service", + Service: serviceName, + Err: err, + } } defer srv.Close() srvStatus, err := srv.Query() - if err == nil { - serviceInfo.State = int(srvStatus.State) - } else { - serviceInfo.Error = fmt.Errorf("Could not query service '%s': %s", serviceName, err) - //finish collecting info on first found error - return + if err != nil { + return nil, &ServiceErr{ + Message: "could not query service", + Service: serviceName, + Err: err, + } } srvCfg, err := srv.Config() - if err == nil { - serviceInfo.DisplayName = srvCfg.DisplayName - serviceInfo.StartUpMode = int(srvCfg.StartType) - } else { - serviceInfo.Error = fmt.Errorf("Could not get config of service '%s': %s", serviceName, err) + if err != nil { + return nil, &ServiceErr{ + Message: "could not get config of service", + Service: serviceName, + Err: err, + } } - return + + serviceInfo := &ServiceInfo{ + ServiceName: serviceName, + DisplayName: srvCfg.DisplayName, + StartUpMode: int(srvCfg.StartType), + State: int(srvStatus.State), + } + return serviceInfo, nil } func init() { - inputs.Add("win_services", func() telegraf.Input { return &WinServices{mgrProvider: &MgProvider{}} }) + inputs.Add("win_services", func() telegraf.Input { + return &WinServices{ + mgrProvider: &MgProvider{}, + } + }) } diff --git a/plugins/inputs/win_services/win_services_integration_test.go b/plugins/inputs/win_services/win_services_integration_test.go index 201746514..0c375c3dd 100644 --- a/plugins/inputs/win_services/win_services_integration_test.go +++ b/plugins/inputs/win_services/win_services_integration_test.go @@ -4,11 +4,10 @@ package win_services import ( - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/sys/windows/svc/mgr" "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" ) var InvalidServices = []string{"XYZ1@", "ZYZ@", "SDF_@#"} @@ -18,98 +17,43 @@ func TestList(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } - services, err := listServices(&MgProvider{}, KnownServices) + provider := &MgProvider{} + scmgr, err := provider.Connect() require.NoError(t, err) - assert.Len(t, services, 2, "Different number of services") - assert.Equal(t, services[0].ServiceName, KnownServices[0]) - assert.Nil(t, services[0].Error) - assert.Equal(t, services[1].ServiceName, KnownServices[1]) - assert.Nil(t, services[1].Error) + defer scmgr.Disconnect() + + services, err := listServices(scmgr, KnownServices) + require.NoError(t, err) + require.Len(t, services, 2, "Different number of services") + require.Equal(t, services[0], KnownServices[0]) + require.Equal(t, services[1], KnownServices[1]) } func TestEmptyList(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } - services, err := listServices(&MgProvider{}, []string{}) + provider := &MgProvider{} + scmgr, err := provider.Connect() require.NoError(t, err) - assert.Condition(t, func() bool { return len(services) > 20 }, "Too few service") -} + defer scmgr.Disconnect() -func TestListEr(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - services, err := listServices(&MgProvider{}, InvalidServices) + services, err := listServices(scmgr, []string{}) require.NoError(t, err) - assert.Len(t, services, 3, "Different number of services") - for i := 0; i < 3; i++ { - assert.Equal(t, services[i].ServiceName, InvalidServices[i]) - assert.NotNil(t, services[i].Error) - } -} - -func TestGather(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - ws := &WinServices{KnownServices, &MgProvider{}} - assert.Len(t, ws.ServiceNames, 2, "Different number of services") - var acc testutil.Accumulator - require.NoError(t, ws.Gather(&acc)) - assert.Len(t, acc.Errors, 0, "There should be no errors after gather") - - for i := 0; i < 2; i++ { - fields := make(map[string]interface{}) - tags := make(map[string]string) - si := getServiceInfo(KnownServices[i]) - fields["state"] = int(si.State) - fields["startup_mode"] = int(si.StartUpMode) - tags["service_name"] = si.ServiceName - tags["display_name"] = si.DisplayName - acc.AssertContainsTaggedFields(t, "win_services", fields, tags) - } + require.Condition(t, func() bool { return len(services) > 20 }, "Too few service") } func TestGatherErrors(t *testing.T) { if testing.Short() { t.Skip("Skipping integration test in short mode") } - ws := &WinServices{InvalidServices, &MgProvider{}} - assert.Len(t, ws.ServiceNames, 3, "Different number of services") + ws := &WinServices{ + Log: testutil.Logger{}, + ServiceNames: InvalidServices, + mgrProvider: &MgProvider{}, + } + require.Len(t, ws.ServiceNames, 3, "Different number of services") var acc testutil.Accumulator require.NoError(t, ws.Gather(&acc)) - assert.Len(t, acc.Errors, 3, "There should be 3 errors after gather") -} - -func getServiceInfo(srvName string) *ServiceInfo { - - scmgr, err := mgr.Connect() - if err != nil { - return nil - } - defer scmgr.Disconnect() - - srv, err := scmgr.OpenService(srvName) - if err != nil { - return nil - } - var si ServiceInfo - si.ServiceName = srvName - srvStatus, err := srv.Query() - if err == nil { - si.State = int(srvStatus.State) - } else { - si.Error = err - } - - srvCfg, err := srv.Config() - if err == nil { - si.DisplayName = srvCfg.DisplayName - si.StartUpMode = int(srvCfg.StartType) - } else { - si.Error = err - } - srv.Close() - return &si + require.Len(t, acc.Errors, 3, "There should be 3 errors after gather") } diff --git a/plugins/inputs/win_services/win_services_test.go b/plugins/inputs/win_services/win_services_test.go index 3c05e85c5..e33ab2ddc 100644 --- a/plugins/inputs/win_services/win_services_test.go +++ b/plugins/inputs/win_services/win_services_test.go @@ -3,14 +3,17 @@ package win_services import ( + "bytes" "errors" "fmt" + "log" + "testing" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/mgr" - "testing" ) //testData is DD wrapper for unit testing of WinServices @@ -84,14 +87,31 @@ func (m *FakeWinSvc) Config() (mgr.Config, error) { if m.testData.serviceConfigError != nil { return mgr.Config{}, m.testData.serviceConfigError } else { - return mgr.Config{0, uint32(m.testData.startUpMode), 0, "", "", 0, nil, m.testData.serviceName, m.testData.displayName, "", ""}, nil + return mgr.Config{ + ServiceType: 0, + StartType: uint32(m.testData.startUpMode), + ErrorControl: 0, + BinaryPathName: "", + LoadOrderGroup: "", + TagId: 0, + Dependencies: nil, + ServiceStartName: m.testData.serviceName, + DisplayName: m.testData.displayName, + Password: "", + Description: "", + }, nil } } func (m *FakeWinSvc) Query() (svc.Status, error) { if m.testData.serviceQueryError != nil { return svc.Status{}, m.testData.serviceQueryError } else { - return svc.Status{svc.State(m.testData.state), 0, 0, 0}, nil + return svc.Status{ + State: svc.State(m.testData.state), + Accepts: 0, + CheckPoint: 0, + WaitHint: 0, + }, nil } } @@ -110,47 +130,51 @@ var testErrors = []testData{ func TestBasicInfo(t *testing.T) { - winServices := &WinServices{nil, &FakeMgProvider{testErrors[0]}} + winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[0]}} assert.NotEmpty(t, winServices.SampleConfig()) assert.NotEmpty(t, winServices.Description()) } func TestMgrErrors(t *testing.T) { //mgr.connect error - winServices := &WinServices{nil, &FakeMgProvider{testErrors[0]}} + winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[0]}} var acc1 testutil.Accumulator err := winServices.Gather(&acc1) require.Error(t, err) assert.Contains(t, err.Error(), testErrors[0].mgrConnectError.Error()) ////mgr.listServices error - winServices = &WinServices{nil, &FakeMgProvider{testErrors[1]}} + winServices = &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[1]}} var acc2 testutil.Accumulator err = winServices.Gather(&acc2) require.Error(t, err) assert.Contains(t, err.Error(), testErrors[1].mgrListServicesError.Error()) ////mgr.listServices error 2 - winServices = &WinServices{[]string{"Fake service 1"}, &FakeMgProvider{testErrors[3]}} + winServices = &WinServices{testutil.Logger{}, []string{"Fake service 1"}, &FakeMgProvider{testErrors[3]}} var acc3 testutil.Accumulator - err = winServices.Gather(&acc3) - require.NoError(t, err) - assert.Len(t, acc3.Errors, 1) + buf := &bytes.Buffer{} + log.SetOutput(buf) + require.NoError(t, winServices.Gather(&acc3)) + + require.Contains(t, buf.String(), testErrors[2].services[0].serviceOpenError.Error()) } func TestServiceErrors(t *testing.T) { - winServices := &WinServices{nil, &FakeMgProvider{testErrors[2]}} + winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testErrors[2]}} var acc1 testutil.Accumulator - require.NoError(t, winServices.Gather(&acc1)) - assert.Len(t, acc1.Errors, 3) - //open service error - assert.Contains(t, acc1.Errors[0].Error(), testErrors[2].services[0].serviceOpenError.Error()) - //query service error - assert.Contains(t, acc1.Errors[1].Error(), testErrors[2].services[1].serviceQueryError.Error()) - //config service error - assert.Contains(t, acc1.Errors[2].Error(), testErrors[2].services[2].serviceConfigError.Error()) + buf := &bytes.Buffer{} + log.SetOutput(buf) + require.NoError(t, winServices.Gather(&acc1)) + + //open service error + require.Contains(t, buf.String(), testErrors[2].services[0].serviceOpenError.Error()) + //query service error + require.Contains(t, buf.String(), testErrors[2].services[1].serviceQueryError.Error()) + //config service error + require.Contains(t, buf.String(), testErrors[2].services[2].serviceConfigError.Error()) } var testSimpleData = []testData{ @@ -161,7 +185,7 @@ var testSimpleData = []testData{ } func TestGather2(t *testing.T) { - winServices := &WinServices{nil, &FakeMgProvider{testSimpleData[0]}} + winServices := &WinServices{testutil.Logger{}, nil, &FakeMgProvider{testSimpleData[0]}} var acc1 testutil.Accumulator require.NoError(t, winServices.Gather(&acc1)) assert.Len(t, acc1.Errors, 0, "There should be no errors after gather") @@ -175,5 +199,4 @@ func TestGather2(t *testing.T) { tags["display_name"] = s.displayName acc1.AssertContainsTaggedFields(t, "win_services", fields, tags) } - } diff --git a/plugins/inputs/wireguard/README.md b/plugins/inputs/wireguard/README.md new file mode 100644 index 000000000..57e16ba49 --- /dev/null +++ b/plugins/inputs/wireguard/README.md @@ -0,0 +1,73 @@ +# Wireguard Input Plugin + +The Wireguard input plugin collects statistics on the local Wireguard server +using the [`wgctrl`](https://github.com/WireGuard/wgctrl-go) library. It +reports gauge metrics for Wireguard interface device(s) and its peers. + +### Configuration + +```toml +# Collect Wireguard server interface and peer statistics +[[inputs.wireguard]] + ## Optional list of Wireguard device/interface names to query. + ## If omitted, all Wireguard interfaces are queried. + # devices = ["wg0"] +``` + +### Metrics + +- `wireguard_device` + - tags: + - `name` (interface device name, e.g. `wg0`) + - `type` (Wireguard tunnel type, e.g. `linux_kernel` or `userspace`) + - fields: + - `listen_port` (int, UDP port on which the interface is listening) + - `firewall_mark` (int, device's current firewall mark) + - `peers` (int, number of peers associated with the device) + +- `wireguard_peer` + - tags: + - `device` (associated interface device name, e.g. `wg0`) + - `public_key` (peer public key, e.g. `NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=`) + - fields: + - `persistent_keepalive_interval_ns` (int, keepalive interval in nanoseconds; 0 if unset) + - `protocol_version` (int, Wireguard protocol version number) + - `allowed_ips` (int, number of allowed IPs for this peer) + - `last_handshake_time_ns` (int, Unix timestamp of the last handshake for this peer in nanoseconds) + - `rx_bytes` (int, number of bytes received from this peer) + - `tx_bytes` (int, number of bytes transmitted to this peer) + +### Troubleshooting + +#### Error: `operation not permitted` + +When the kernelspace implementation of Wireguard is in use (as opposed to its +userspace implementations), Telegraf communicates with the module over netlink. +This requires Telegraf to either run as root, or for the Telegraf binary to +have the `CAP_NET_ADMIN` capability. + +To add this capability to the Telegraf binary (to allow this communication under +the default user `telegraf`): + +```bash +$ sudo setcap CAP_NET_ADMIN+epi $(which telegraf) +``` + +N.B.: This capability is a filesystem attribute on the binary itself. The +attribute needs to be re-applied if the Telegraf binary is rotated (e.g. +on installation of new a Telegraf version from the system package manager). + +#### Error: `error enumerating Wireguard devices` + +This usually happens when the device names specified in config are invalid. +Ensure that `sudo wg show` succeeds, and that the device names in config match +those printed by this command. + +### Example Output + +``` +wireguard_device,host=WGVPN,name=wg0,type=linux_kernel firewall_mark=51820i,listen_port=58216i 1582513589000000000 +wireguard_device,host=WGVPN,name=wg0,type=linux_kernel peers=1i 1582513589000000000 +wireguard_peer,device=wg0,host=WGVPN,public_key=NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE= allowed_ips=2i,persistent_keepalive_interval_ns=60000000000i,protocol_version=1i 1582513589000000000 +wireguard_peer,device=wg0,host=WGVPN,public_key=NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE= last_handshake_time_ns=1582513584530013376i,rx_bytes=6484i,tx_bytes=13540i 1582513589000000000 +``` diff --git a/plugins/inputs/wireguard/wireguard.go b/plugins/inputs/wireguard/wireguard.go new file mode 100644 index 000000000..ded332837 --- /dev/null +++ b/plugins/inputs/wireguard/wireguard.go @@ -0,0 +1,139 @@ +package wireguard + +import ( + "fmt" + "log" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" + "golang.zx2c4.com/wireguard/wgctrl" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" +) + +const ( + measurementDevice = "wireguard_device" + measurementPeer = "wireguard_peer" +) + +var ( + deviceTypeNames = map[wgtypes.DeviceType]string{ + wgtypes.Unknown: "unknown", + wgtypes.LinuxKernel: "linux_kernel", + wgtypes.Userspace: "userspace", + } +) + +// Wireguard is an input that enumerates all Wireguard interfaces/devices on +// the host, and reports gauge metrics for the device itself and its peers. +type Wireguard struct { + Devices []string `toml:"devices"` + + client *wgctrl.Client +} + +func (wg *Wireguard) Description() string { + return "Collect Wireguard server interface and peer statistics" +} + +func (wg *Wireguard) SampleConfig() string { + return ` + ## Optional list of Wireguard device/interface names to query. + ## If omitted, all Wireguard interfaces are queried. + # devices = ["wg0"] +` +} + +func (wg *Wireguard) Init() error { + var err error + + wg.client, err = wgctrl.New() + + return err +} + +func (wg *Wireguard) Gather(acc telegraf.Accumulator) error { + devices, err := wg.enumerateDevices() + if err != nil { + return fmt.Errorf("error enumerating Wireguard devices: %v", err) + } + + for _, device := range devices { + wg.gatherDeviceMetrics(acc, device) + + for _, peer := range device.Peers { + wg.gatherDevicePeerMetrics(acc, device, peer) + } + } + + return nil +} + +func (wg *Wireguard) enumerateDevices() ([]*wgtypes.Device, error) { + var devices []*wgtypes.Device + + // If no device names are specified, defer to the library to enumerate + // all of them + if len(wg.Devices) == 0 { + return wg.client.Devices() + } + + // Otherwise, explicitly populate only device names specified in config + for _, name := range wg.Devices { + dev, err := wg.client.Device(name) + if err != nil { + log.Printf("W! [inputs.wireguard] No Wireguard device found with name %s", name) + continue + } + + devices = append(devices, dev) + } + + return devices, nil +} + +func (wg *Wireguard) gatherDeviceMetrics(acc telegraf.Accumulator, device *wgtypes.Device) { + fields := map[string]interface{}{ + "listen_port": device.ListenPort, + "firewall_mark": device.FirewallMark, + } + + gauges := map[string]interface{}{ + "peers": len(device.Peers), + } + + tags := map[string]string{ + "name": device.Name, + "type": deviceTypeNames[device.Type], + } + + acc.AddFields(measurementDevice, fields, tags) + acc.AddGauge(measurementDevice, gauges, tags) +} + +func (wg *Wireguard) gatherDevicePeerMetrics(acc telegraf.Accumulator, device *wgtypes.Device, peer wgtypes.Peer) { + fields := map[string]interface{}{ + "persistent_keepalive_interval_ns": peer.PersistentKeepaliveInterval.Nanoseconds(), + "protocol_version": peer.ProtocolVersion, + "allowed_ips": len(peer.AllowedIPs), + } + + gauges := map[string]interface{}{ + "last_handshake_time_ns": peer.LastHandshakeTime.UnixNano(), + "rx_bytes": peer.ReceiveBytes, + "tx_bytes": peer.TransmitBytes, + } + + tags := map[string]string{ + "device": device.Name, + "public_key": peer.PublicKey.String(), + } + + acc.AddFields(measurementPeer, fields, tags) + acc.AddGauge(measurementPeer, gauges, tags) +} + +func init() { + inputs.Add("wireguard", func() telegraf.Input { + return &Wireguard{} + }) +} diff --git a/plugins/inputs/wireguard/wireguard_test.go b/plugins/inputs/wireguard/wireguard_test.go new file mode 100644 index 000000000..0cfdba75d --- /dev/null +++ b/plugins/inputs/wireguard/wireguard_test.go @@ -0,0 +1,84 @@ +package wireguard + +import ( + "net" + "testing" + "time" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" +) + +func TestWireguard_gatherDeviceMetrics(t *testing.T) { + var acc testutil.Accumulator + + wg := &Wireguard{} + device := &wgtypes.Device{ + Name: "wg0", + Type: wgtypes.LinuxKernel, + ListenPort: 1, + FirewallMark: 2, + Peers: []wgtypes.Peer{{}, {}}, + } + + expectFields := map[string]interface{}{ + "listen_port": 1, + "firewall_mark": 2, + } + expectGauges := map[string]interface{}{ + "peers": 2, + } + expectTags := map[string]string{ + "name": "wg0", + "type": "linux_kernel", + } + + wg.gatherDeviceMetrics(&acc, device) + + assert.Equal(t, 3, acc.NFields()) + acc.AssertDoesNotContainMeasurement(t, measurementPeer) + acc.AssertContainsTaggedFields(t, measurementDevice, expectFields, expectTags) + acc.AssertContainsTaggedFields(t, measurementDevice, expectGauges, expectTags) +} + +func TestWireguard_gatherDevicePeerMetrics(t *testing.T) { + var acc testutil.Accumulator + pubkey, _ := wgtypes.ParseKey("NZTRIrv/ClTcQoNAnChEot+WL7OH7uEGQmx8oAN9rWE=") + + wg := &Wireguard{} + device := &wgtypes.Device{ + Name: "wg0", + } + peer := wgtypes.Peer{ + PublicKey: pubkey, + PersistentKeepaliveInterval: 1 * time.Minute, + LastHandshakeTime: time.Unix(100, 0), + ReceiveBytes: int64(40), + TransmitBytes: int64(60), + AllowedIPs: []net.IPNet{{}, {}}, + ProtocolVersion: 0, + } + + expectFields := map[string]interface{}{ + "persistent_keepalive_interval_ns": int64(60000000000), + "protocol_version": 0, + "allowed_ips": 2, + } + expectGauges := map[string]interface{}{ + "last_handshake_time_ns": int64(100000000000), + "rx_bytes": int64(40), + "tx_bytes": int64(60), + } + expectTags := map[string]string{ + "device": "wg0", + "public_key": pubkey.String(), + } + + wg.gatherDevicePeerMetrics(&acc, device, peer) + + assert.Equal(t, 6, acc.NFields()) + acc.AssertDoesNotContainMeasurement(t, measurementDevice) + acc.AssertContainsTaggedFields(t, measurementPeer, expectFields, expectTags) + acc.AssertContainsTaggedFields(t, measurementPeer, expectGauges, expectTags) +} diff --git a/plugins/inputs/wireless/README.md b/plugins/inputs/wireless/README.md new file mode 100644 index 000000000..6be7bd383 --- /dev/null +++ b/plugins/inputs/wireless/README.md @@ -0,0 +1,38 @@ +# Wireless Input Plugin + +The wireless plugin gathers metrics about wireless link quality by reading the `/proc/net/wireless` file. This plugin currently supports linux only. + +### Configuration: + +```toml +# Monitor wifi signal strength and quality +[[inputs.wireless]] + ## Sets 'proc' directory path + ## If not specified, then default is /proc + # host_proc = "/proc" +``` + +### Metrics: + +- metric + - tags: + - interface (wireless interface) + - fields: + - status (int64, gauge) - Its current state. This is a device dependent information + - link (int64, percentage, gauge) - general quality of the reception + - level (int64, dBm, gauge) - signal strength at the receiver + - noise (int64, dBm, gauge) - silence level (no packet) at the receiver + - nwid (int64, packets, counter) - number of discarded packets due to invalid network id + - crypt (int64, packets, counter) - number of packet unable to decrypt + - frag (int64, packets, counter) - fragmented packets + - retry (int64, packets, counter) - cumulative retry counts + - misc (int64, packets, counter) - dropped for un-specified reason + - missed_beacon (int64, packets, counter) - missed beacon packets + +### Example Output: + +This section shows example output in Line Protocol format. + +``` +wireless,host=example.localdomain,interface=wlan0 misc=0i,frag=0i,link=60i,level=-50i,noise=-256i,nwid=0i,crypt=0i,retry=1525i,missed_beacon=0i,status=0i 1519843022000000000 +``` diff --git a/plugins/inputs/wireless/wireless.go b/plugins/inputs/wireless/wireless.go new file mode 100644 index 000000000..911d7fb09 --- /dev/null +++ b/plugins/inputs/wireless/wireless.go @@ -0,0 +1,34 @@ +package wireless + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// Wireless is used to store configuration values. +type Wireless struct { + HostProc string `toml:"host_proc"` + Log telegraf.Logger `toml:"-"` +} + +var sampleConfig = ` + ## Sets 'proc' directory path + ## If not specified, then default is /proc + # host_proc = "/proc" +` + +// Description returns information about the plugin. +func (w *Wireless) Description() string { + return "Monitor wifi signal strength and quality" +} + +// SampleConfig displays configuration instructions. +func (w *Wireless) SampleConfig() string { + return sampleConfig +} + +func init() { + inputs.Add("wireless", func() telegraf.Input { + return &Wireless{} + }) +} diff --git a/plugins/inputs/wireless/wireless_linux.go b/plugins/inputs/wireless/wireless_linux.go new file mode 100644 index 000000000..75890a790 --- /dev/null +++ b/plugins/inputs/wireless/wireless_linux.go @@ -0,0 +1,144 @@ +// +build linux + +package wireless + +import ( + "bytes" + "io/ioutil" + "log" + "os" + "path" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +// default host proc path +const defaultHostProc = "/proc" + +// env host proc variable name +const envProc = "HOST_PROC" + +// length of wireless interface fields +const interfaceFieldLength = 10 + +var newLineByte = []byte("\n") + +type wirelessInterface struct { + Interface string + Status int64 + Link int64 + Level int64 + Noise int64 + Nwid int64 + Crypt int64 + Frag int64 + Retry int64 + Misc int64 + Beacon int64 +} + +// Gather collects the wireless information. +func (w *Wireless) Gather(acc telegraf.Accumulator) error { + // load proc path, get default value if config value and env variable are empty + w.loadPath() + + wirelessPath := path.Join(w.HostProc, "net", "wireless") + table, err := ioutil.ReadFile(wirelessPath) + if err != nil { + return err + } + + interfaces, err := loadWirelessTable(table) + if err != nil { + return err + } + for _, w := range interfaces { + tags := map[string]string{ + "interface": w.Interface, + } + fieldsG := map[string]interface{}{ + "status": w.Status, + "link": w.Link, + "level": w.Level, + "noise": w.Noise, + } + fieldsC := map[string]interface{}{ + "nwid": w.Nwid, + "crypt": w.Crypt, + "frag": w.Frag, + "retry": w.Retry, + "misc": w.Misc, + "beacon": w.Beacon, + } + acc.AddGauge("wireless", fieldsG, tags) + acc.AddCounter("wireless", fieldsC, tags) + } + + return nil +} + +func loadWirelessTable(table []byte) ([]*wirelessInterface, error) { + var w []*wirelessInterface + lines := bytes.Split(table, newLineByte) + + // iterate over interfaces + for i := 2; i < len(lines); i = i + 1 { + if len(lines[i]) == 0 { + continue + } + values := make([]int64, 0, interfaceFieldLength) + fields := strings.Fields(string(lines[i])) + for j := 1; j < len(fields); j = j + 1 { + v, err := strconv.ParseInt(strings.Trim(fields[j], "."), 10, 64) + if err != nil { + return nil, err + } + values = append(values, v) + } + if len(values) != interfaceFieldLength { + log.Printf("E! [input.wireless] invalid length of interface values") + continue + } + w = append(w, &wirelessInterface{ + Interface: strings.Trim(fields[0], ":"), + Status: values[0], + Link: values[1], + Level: values[2], + Noise: values[3], + Nwid: values[4], + Crypt: values[5], + Frag: values[6], + Retry: values[7], + Misc: values[8], + Beacon: values[9], + }) + } + return w, nil +} + +// loadPath can be used to read path firstly from config +// if it is empty then try read from env variable +func (w *Wireless) loadPath() { + if w.HostProc == "" { + w.HostProc = proc(envProc, defaultHostProc) + } +} + +// proc can be used to read file paths from env +func proc(env, path string) string { + // try to read full file path + if p := os.Getenv(env); p != "" { + return p + } + // return default path + return path +} + +func init() { + inputs.Add("wireless", func() telegraf.Input { + return &Wireless{} + }) +} diff --git a/plugins/inputs/wireless/wireless_notlinux.go b/plugins/inputs/wireless/wireless_notlinux.go new file mode 100644 index 000000000..4769acc97 --- /dev/null +++ b/plugins/inputs/wireless/wireless_notlinux.go @@ -0,0 +1,23 @@ +// +build !linux + +package wireless + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/inputs" +) + +func (w *Wireless) Init() error { + w.Log.Warn("Current platform is not supported") + return nil +} + +func (w *Wireless) Gather(acc telegraf.Accumulator) error { + return nil +} + +func init() { + inputs.Add("wireless", func() telegraf.Input { + return &Wireless{} + }) +} diff --git a/plugins/inputs/wireless/wireless_test.go b/plugins/inputs/wireless/wireless_test.go new file mode 100644 index 000000000..6c562887e --- /dev/null +++ b/plugins/inputs/wireless/wireless_test.go @@ -0,0 +1,52 @@ +// +build linux + +package wireless + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var testInput = []byte(`Inter-| sta-| Quality | Discarded packets | Missed | WE + face | tus | link level noise | nwid crypt frag retry misc | beacon | 22 + wlan0: 0000 60. -50. -256 0 0 0 1525 0 0 + wlan1: 0000 70. -39. -256 0 0 0 12096 191188 0`) + +func TestLoadWirelessTable(t *testing.T) { + expectedMetrics := []*wirelessInterface{ + { + Interface: "wlan0", + Status: int64(0000), + Link: int64(60), + Level: int64(-50), + Noise: int64(-256), + Nwid: int64(0), + Crypt: int64(0), + Frag: int64(0), + Retry: int64(1525), + Misc: int64(0), + Beacon: int64(0), + }, + { + Interface: "wlan1", + Status: int64(0000), + Link: int64(70), + Level: int64(-39), + Noise: int64(-256), + Nwid: int64(0), + Crypt: int64(0), + Frag: int64(0), + Retry: int64(12096), + Misc: int64(191188), + Beacon: int64(0), + }, + } + metrics, err := loadWirelessTable(testInput) + if err != nil { + t.Fatal(err) + } + + as := assert.New(t) + as.Equal(metrics, expectedMetrics) +} diff --git a/plugins/inputs/x509_cert/README.md b/plugins/inputs/x509_cert/README.md new file mode 100644 index 000000000..074bbc58c --- /dev/null +++ b/plugins/inputs/x509_cert/README.md @@ -0,0 +1,62 @@ +# X509 Cert Input Plugin + +This plugin provides information about X509 certificate accessible via local +file or network connection. + + +### Configuration + +```toml +# Reads metrics from a SSL certificate +[[inputs.x509_cert]] + ## List certificate sources + sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "https://example.org:443"] + + ## Timeout for SSL connection + # timeout = "5s" + + ## Pass a different name into the TLS request (Server Name Indication) + ## example: server_name = "myhost.example.org" + # server_name = "myhost.example.org" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +``` + + +### Metrics + +- x509_cert + - tags: + - source - source of the certificate + - organization + - organizational_unit + - country + - province + - locality + - verification + - serial_number + - signature_algorithm + - public_key_algorithm + - issuer_common_name + - issuer_serial_number + - san + - fields: + - verification_code (int) + - verification_error (string) + - expiry (int, seconds) + - age (int, seconds) + - startdate (int, seconds) + - enddate (int, seconds) + + +### Example output + +``` +x509_cert,common_name=ubuntu,source=/etc/ssl/certs/ssl-cert-snakeoil.pem,verification=valid age=7693222i,enddate=1871249033i,expiry=307666777i,startdate=1555889033i,verification_code=0i 1563582256000000000 +x509_cert,common_name=www.example.org,country=US,locality=Los\ Angeles,organization=Internet\ Corporation\ for\ Assigned\ Names\ and\ Numbers,organizational_unit=Technology,province=California,source=https://example.org:443,verification=invalid age=20219055i,enddate=1606910400i,expiry=43328144i,startdate=1543363200i,verification_code=1i,verification_error="x509: certificate signed by unknown authority" 1563582256000000000 +x509_cert,common_name=DigiCert\ SHA2\ Secure\ Server\ CA,country=US,organization=DigiCert\ Inc,source=https://example.org:443,verification=valid age=200838255i,enddate=1678276800i,expiry=114694544i,startdate=1362744000i,verification_code=0i 1563582256000000000 +x509_cert,common_name=DigiCert\ Global\ Root\ CA,country=US,organization=DigiCert\ Inc,organizational_unit=www.digicert.com,source=https://example.org:443,verification=valid age=400465455i,enddate=1952035200i,expiry=388452944i,startdate=1163116800i,verification_code=0i 1563582256000000000 +``` diff --git a/plugins/inputs/x509_cert/dev/telegraf.conf b/plugins/inputs/x509_cert/dev/telegraf.conf new file mode 100644 index 000000000..7545997a4 --- /dev/null +++ b/plugins/inputs/x509_cert/dev/telegraf.conf @@ -0,0 +1,4 @@ +[[inputs.x509_cert]] + sources = ["https://expired.badssl.com:443", "https://wrong.host.badssl.com:443"] + +[[outputs.file]] diff --git a/plugins/inputs/x509_cert/x509_cert.go b/plugins/inputs/x509_cert/x509_cert.go new file mode 100644 index 000000000..89744351f --- /dev/null +++ b/plugins/inputs/x509_cert/x509_cert.go @@ -0,0 +1,269 @@ +// Package x509_cert reports metrics from an SSL certificate. +package x509_cert + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "net" + "net/url" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + _tls "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/inputs" +) + +const sampleConfig = ` + ## List certificate sources + sources = ["/etc/ssl/certs/ssl-cert-snakeoil.pem", "tcp://example.org:443"] + + ## Timeout for SSL connection + # timeout = "5s" + + ## Pass a different name into the TLS request (Server Name Indication) + ## example: server_name = "myhost.example.org" + # server_name = "" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" +` +const description = "Reads metrics from a SSL certificate" + +// X509Cert holds the configuration of the plugin. +type X509Cert struct { + Sources []string `toml:"sources"` + Timeout internal.Duration `toml:"timeout"` + ServerName string `toml:"server_name"` + tlsCfg *tls.Config + _tls.ClientConfig +} + +// Description returns description of the plugin. +func (c *X509Cert) Description() string { + return description +} + +// SampleConfig returns configuration sample for the plugin. +func (c *X509Cert) SampleConfig() string { + return sampleConfig +} + +func (c *X509Cert) locationToURL(location string) (*url.URL, error) { + if strings.HasPrefix(location, "/") { + location = "file://" + location + } + + u, err := url.Parse(location) + if err != nil { + return nil, fmt.Errorf("failed to parse cert location - %s", err.Error()) + } + + return u, nil +} + +func (c *X509Cert) getCert(u *url.URL, timeout time.Duration) ([]*x509.Certificate, error) { + switch u.Scheme { + case "https": + u.Scheme = "tcp" + fallthrough + case "udp", "udp4", "udp6": + fallthrough + case "tcp", "tcp4", "tcp6": + ipConn, err := net.DialTimeout(u.Scheme, u.Host, timeout) + if err != nil { + return nil, err + } + defer ipConn.Close() + + if c.ServerName == "" { + c.tlsCfg.ServerName = u.Hostname() + } else { + c.tlsCfg.ServerName = c.ServerName + } + + c.tlsCfg.InsecureSkipVerify = true + conn := tls.Client(ipConn, c.tlsCfg) + defer conn.Close() + + hsErr := conn.Handshake() + if hsErr != nil { + return nil, hsErr + } + + certs := conn.ConnectionState().PeerCertificates + + return certs, nil + case "file": + content, err := ioutil.ReadFile(u.Path) + if err != nil { + return nil, err + } + var certs []*x509.Certificate + for { + block, rest := pem.Decode(bytes.TrimSpace(content)) + if block == nil { + return nil, fmt.Errorf("failed to parse certificate PEM") + } + + if block.Type == "CERTIFICATE" { + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + certs = append(certs, cert) + } + if rest == nil || len(rest) == 0 { + break + } + content = rest + } + return certs, nil + default: + return nil, fmt.Errorf("unsupported scheme '%s' in location %s", u.Scheme, u.String()) + } +} + +func getFields(cert *x509.Certificate, now time.Time) map[string]interface{} { + age := int(now.Sub(cert.NotBefore).Seconds()) + expiry := int(cert.NotAfter.Sub(now).Seconds()) + startdate := cert.NotBefore.Unix() + enddate := cert.NotAfter.Unix() + + fields := map[string]interface{}{ + "age": age, + "expiry": expiry, + "startdate": startdate, + "enddate": enddate, + } + + return fields +} + +func getTags(cert *x509.Certificate, location string) map[string]string { + tags := map[string]string{ + "source": location, + "common_name": cert.Subject.CommonName, + "serial_number": cert.SerialNumber.Text(16), + "signature_algorithm": cert.SignatureAlgorithm.String(), + "public_key_algorithm": cert.PublicKeyAlgorithm.String(), + } + + if len(cert.Subject.Organization) > 0 { + tags["organization"] = cert.Subject.Organization[0] + } + if len(cert.Subject.OrganizationalUnit) > 0 { + tags["organizational_unit"] = cert.Subject.OrganizationalUnit[0] + } + if len(cert.Subject.Country) > 0 { + tags["country"] = cert.Subject.Country[0] + } + if len(cert.Subject.Province) > 0 { + tags["province"] = cert.Subject.Province[0] + } + if len(cert.Subject.Locality) > 0 { + tags["locality"] = cert.Subject.Locality[0] + } + + tags["issuer_common_name"] = cert.Issuer.CommonName + tags["issuer_serial_number"] = cert.Issuer.SerialNumber + + san := append(cert.DNSNames, cert.EmailAddresses...) + for _, ip := range cert.IPAddresses { + san = append(san, ip.String()) + } + for _, uri := range cert.URIs { + san = append(san, uri.String()) + } + tags["san"] = strings.Join(san, ",") + + return tags +} + +// Gather adds metrics into the accumulator. +func (c *X509Cert) Gather(acc telegraf.Accumulator) error { + now := time.Now() + + for _, location := range c.Sources { + u, err := c.locationToURL(location) + if err != nil { + acc.AddError(err) + return nil + } + + certs, err := c.getCert(u, c.Timeout.Duration*time.Second) + if err != nil { + acc.AddError(fmt.Errorf("cannot get SSL cert '%s': %s", location, err.Error())) + } + + for i, cert := range certs { + fields := getFields(cert, now) + tags := getTags(cert, location) + + // The first certificate is the leaf/end-entity certificate which needs DNS + // name validation against the URL hostname. + opts := x509.VerifyOptions{ + Intermediates: x509.NewCertPool(), + } + if i == 0 { + if c.ServerName == "" { + opts.DNSName = u.Hostname() + } else { + opts.DNSName = c.ServerName + } + for j, cert := range certs { + if j != 0 { + opts.Intermediates.AddCert(cert) + } + } + } + if c.tlsCfg.RootCAs != nil { + opts.Roots = c.tlsCfg.RootCAs + } + + _, err = cert.Verify(opts) + if err == nil { + tags["verification"] = "valid" + fields["verification_code"] = 0 + } else { + tags["verification"] = "invalid" + fields["verification_code"] = 1 + fields["verification_error"] = err.Error() + } + + acc.AddFields("x509_cert", fields, tags) + } + } + + return nil +} + +func (c *X509Cert) Init() error { + tlsCfg, err := c.ClientConfig.TLSConfig() + if err != nil { + return err + } + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + + c.tlsCfg = tlsCfg + + return nil +} + +func init() { + inputs.Add("x509_cert", func() telegraf.Input { + return &X509Cert{ + Sources: []string{}, + Timeout: internal.Duration{Duration: 5}, + } + }) +} diff --git a/plugins/inputs/x509_cert/x509_cert_test.go b/plugins/inputs/x509_cert/x509_cert_test.go new file mode 100644 index 000000000..fa90a90eb --- /dev/null +++ b/plugins/inputs/x509_cert/x509_cert_test.go @@ -0,0 +1,344 @@ +package x509_cert + +import ( + "crypto/tls" + "encoding/base64" + "fmt" + "io/ioutil" + "math/big" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" +) + +var pki = testutil.NewPKI("../../../testutil/pki") + +// Make sure X509Cert implements telegraf.Input +var _ telegraf.Input = &X509Cert{} + +func TestGatherRemote(t *testing.T) { + if testing.Short() { + t.Skip("Skipping network-dependent test in short mode.") + } + + tmpfile, err := ioutil.TempFile("", "example") + if err != nil { + t.Fatal(err) + } + + defer os.Remove(tmpfile.Name()) + + if _, err := tmpfile.Write([]byte(pki.ReadServerCert())); err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + server string + timeout time.Duration + close bool + unset bool + noshake bool + error bool + }{ + {name: "wrong port", server: ":99999", error: true}, + {name: "no server", timeout: 5}, + {name: "successful https", server: "https://example.org:443", timeout: 5}, + {name: "successful file", server: "file://" + tmpfile.Name(), timeout: 5}, + {name: "unsupported scheme", server: "foo://", timeout: 5, error: true}, + {name: "no certificate", timeout: 5, unset: true, error: true}, + {name: "closed connection", close: true, error: true}, + {name: "no handshake", timeout: 5, noshake: true, error: true}, + } + + pair, err := tls.X509KeyPair([]byte(pki.ReadServerCert()), []byte(pki.ReadServerKey())) + if err != nil { + t.Fatal(err) + } + + config := &tls.Config{ + InsecureSkipVerify: true, + Certificates: []tls.Certificate{pair}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.unset { + config.Certificates = nil + config.GetCertificate = func(i *tls.ClientHelloInfo) (*tls.Certificate, error) { + return nil, nil + } + } + + ln, err := tls.Listen("tcp", ":0", config) + if err != nil { + t.Fatal(err) + } + defer ln.Close() + + go func() { + sconn, err := ln.Accept() + if err != nil { + return + } + if test.close { + sconn.Close() + } + + serverConfig := config.Clone() + + srv := tls.Server(sconn, serverConfig) + if test.noshake { + srv.Close() + } + if err := srv.Handshake(); err != nil { + return + } + }() + + if test.server == "" { + test.server = "tcp://" + ln.Addr().String() + } + + sc := X509Cert{ + Sources: []string{test.server}, + Timeout: internal.Duration{Duration: test.timeout}, + } + sc.Init() + + sc.InsecureSkipVerify = true + testErr := false + + acc := testutil.Accumulator{} + err = sc.Gather(&acc) + if len(acc.Errors) > 0 { + testErr = true + } + + if testErr != test.error { + t.Errorf("%s", err) + } + }) + } +} + +func TestGatherLocal(t *testing.T) { + wrongCert := fmt.Sprintf("-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----\n", base64.StdEncoding.EncodeToString([]byte("test"))) + + tests := []struct { + name string + mode os.FileMode + content string + error bool + }{ + {name: "permission denied", mode: 0001, error: true}, + {name: "not a certificate", mode: 0640, content: "test", error: true}, + {name: "wrong certificate", mode: 0640, content: wrongCert, error: true}, + {name: "correct certificate", mode: 0640, content: pki.ReadServerCert()}, + {name: "correct certificate and extra trailing space", mode: 0640, content: pki.ReadServerCert() + " "}, + {name: "correct certificate and extra leading space", mode: 0640, content: " " + pki.ReadServerCert()}, + {name: "correct multiple certificates", mode: 0640, content: pki.ReadServerCert() + pki.ReadCACert()}, + {name: "correct multiple certificates and key", mode: 0640, content: pki.ReadServerCert() + pki.ReadCACert() + pki.ReadServerKey()}, + {name: "correct certificate and wrong certificate", mode: 0640, content: pki.ReadServerCert() + "\n" + wrongCert, error: true}, + {name: "correct certificate and not a certificate", mode: 0640, content: pki.ReadServerCert() + "\ntest", error: true}, + {name: "correct multiple certificates and extra trailing space", mode: 0640, content: pki.ReadServerCert() + pki.ReadServerCert() + " "}, + {name: "correct multiple certificates and extra leading space", mode: 0640, content: " " + pki.ReadServerCert() + pki.ReadServerCert()}, + {name: "correct multiple certificates and extra middle space", mode: 0640, content: pki.ReadServerCert() + " " + pki.ReadServerCert()}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + f, err := ioutil.TempFile("", "x509_cert") + if err != nil { + t.Fatal(err) + } + + _, err = f.Write([]byte(test.content)) + if err != nil { + t.Fatal(err) + } + + err = f.Chmod(test.mode) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + defer os.Remove(f.Name()) + + sc := X509Cert{ + Sources: []string{f.Name()}, + } + sc.Init() + + error := false + + acc := testutil.Accumulator{} + err = sc.Gather(&acc) + if len(acc.Errors) > 0 { + error = true + } + + if error != test.error { + t.Errorf("%s", err) + } + }) + } +} + +func TestTags(t *testing.T) { + cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) + + f, err := ioutil.TempFile("", "x509_cert") + if err != nil { + t.Fatal(err) + } + + _, err = f.Write([]byte(cert)) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + defer os.Remove(f.Name()) + + sc := X509Cert{ + Sources: []string{f.Name()}, + } + sc.Init() + + acc := testutil.Accumulator{} + err = sc.Gather(&acc) + require.NoError(t, err) + + assert.True(t, acc.HasMeasurement("x509_cert")) + + assert.True(t, acc.HasTag("x509_cert", "common_name")) + assert.Equal(t, "server.localdomain", acc.TagValue("x509_cert", "common_name")) + + assert.True(t, acc.HasTag("x509_cert", "signature_algorithm")) + assert.Equal(t, "SHA256-RSA", acc.TagValue("x509_cert", "signature_algorithm")) + + assert.True(t, acc.HasTag("x509_cert", "public_key_algorithm")) + assert.Equal(t, "RSA", acc.TagValue("x509_cert", "public_key_algorithm")) + + assert.True(t, acc.HasTag("x509_cert", "issuer_common_name")) + assert.Equal(t, "Telegraf Test CA", acc.TagValue("x509_cert", "issuer_common_name")) + + assert.True(t, acc.HasTag("x509_cert", "san")) + assert.Equal(t, "localhost,127.0.0.1", acc.TagValue("x509_cert", "san")) + + assert.True(t, acc.HasTag("x509_cert", "serial_number")) + serialNumber := new(big.Int) + _, validSerialNumber := serialNumber.SetString(acc.TagValue("x509_cert", "serial_number"), 16) + if !validSerialNumber { + t.Errorf("Expected a valid Hex serial number but got %s", acc.TagValue("x509_cert", "serial_number")) + } + assert.Equal(t, big.NewInt(1), serialNumber) +} + +func TestGatherChain(t *testing.T) { + cert := fmt.Sprintf("%s\n%s", pki.ReadServerCert(), pki.ReadCACert()) + + tests := []struct { + name string + content string + error bool + }{ + {name: "chain certificate", content: cert}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + f, err := ioutil.TempFile("", "x509_cert") + if err != nil { + t.Fatal(err) + } + + _, err = f.Write([]byte(test.content)) + if err != nil { + t.Fatal(err) + } + + err = f.Close() + if err != nil { + t.Fatal(err) + } + + defer os.Remove(f.Name()) + + sc := X509Cert{ + Sources: []string{f.Name()}, + } + sc.Init() + + error := false + + acc := testutil.Accumulator{} + err = sc.Gather(&acc) + if err != nil { + error = true + } + + if error != test.error { + t.Errorf("%s", err) + } + }) + } + +} + +func TestStrings(t *testing.T) { + sc := X509Cert{} + sc.Init() + + tests := []struct { + name string + method string + returned string + expected string + }{ + {name: "description", method: "Description", returned: sc.Description(), expected: description}, + {name: "sample config", method: "SampleConfig", returned: sc.SampleConfig(), expected: sampleConfig}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.returned != test.expected { + t.Errorf("Expected method %s to return '%s', found '%s'.", test.method, test.expected, test.returned) + } + }) + } +} + +func TestGatherCert(t *testing.T) { + if testing.Short() { + t.Skip("Skipping integration test in short mode") + } + + m := &X509Cert{ + Sources: []string{"https://www.influxdata.com:443"}, + } + m.Init() + + var acc testutil.Accumulator + err := m.Gather(&acc) + require.NoError(t, err) + + assert.True(t, acc.HasMeasurement("x509_cert")) +} diff --git a/plugins/inputs/zfs/README.md b/plugins/inputs/zfs/README.md index b60711e30..e22156bc6 100644 --- a/plugins/inputs/zfs/README.md +++ b/plugins/inputs/zfs/README.md @@ -268,7 +268,7 @@ A short description for some of the metrics. `arcstats_evict_l2_ineligible` We evicted something which cannot be stored in the l2. Reasons could be: - - We have multiple pools, we evicted something from a pool whithout an l2 device. + - We have multiple pools, we evicted something from a pool without an l2 device. - The zfs property secondary cache. `arcstats_c` Arc target size, this is the size the system thinks the arc should have. diff --git a/plugins/inputs/zfs/zfs_freebsd.go b/plugins/inputs/zfs/zfs_freebsd.go index 63bbdd6e6..51c20682e 100644 --- a/plugins/inputs/zfs/zfs_freebsd.go +++ b/plugins/inputs/zfs/zfs_freebsd.go @@ -30,7 +30,11 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) { if z.PoolMetrics { for _, line := range lines { col := strings.Split(line, "\t") - tags := map[string]string{"pool": col[0], "health": col[8]} + if len(col) != 8 { + continue + } + + tags := map[string]string{"pool": col[0], "health": col[1]} fields := map[string]interface{}{} if tags["health"] == "UNAVAIL" { @@ -39,19 +43,19 @@ func (z *Zfs) gatherPoolStats(acc telegraf.Accumulator) (string, error) { } else { - size, err := strconv.ParseInt(col[1], 10, 64) + size, err := strconv.ParseInt(col[2], 10, 64) if err != nil { return "", fmt.Errorf("Error parsing size: %s", err) } fields["size"] = size - alloc, err := strconv.ParseInt(col[2], 10, 64) + alloc, err := strconv.ParseInt(col[3], 10, 64) if err != nil { return "", fmt.Errorf("Error parsing allocation: %s", err) } fields["allocated"] = alloc - free, err := strconv.ParseInt(col[3], 10, 64) + free, err := strconv.ParseInt(col[4], 10, 64) if err != nil { return "", fmt.Errorf("Error parsing free: %s", err) } @@ -130,7 +134,7 @@ func run(command string, args ...string) ([]string, error) { } func zpool() ([]string, error) { - return run("zpool", []string{"list", "-Hp"}...) + return run("zpool", []string{"list", "-Hp", "-o", "name,health,size,alloc,free,fragmentation,capacity,dedupratio"}...) } func sysctl(metric string) ([]string, error) { diff --git a/plugins/inputs/zfs/zfs_freebsd_test.go b/plugins/inputs/zfs/zfs_freebsd_test.go index 60b95a39d..87f21f672 100644 --- a/plugins/inputs/zfs/zfs_freebsd_test.go +++ b/plugins/inputs/zfs/zfs_freebsd_test.go @@ -10,21 +10,21 @@ import ( "github.com/stretchr/testify/require" ) -// $ zpool list -Hp +// $ zpool list -Hp -o name,health,size,alloc,free,fragmentation,capacity,dedupratio var zpool_output = []string{ - "freenas-boot 30601641984 2022177280 28579464704 - - 6 1.00x ONLINE -", - "red1 8933531975680 1126164848640 7807367127040 - 8% 12 1.83x ONLINE /mnt", - "temp1 2989297238016 1626309320704 1362987917312 - 38% 54 1.28x ONLINE /mnt", - "temp2 2989297238016 626958278656 2362338959360 - 12% 20 1.00x ONLINE /mnt", + "freenas-boot ONLINE 30601641984 2022177280 28579464704 - 6 1.00x", + "red1 ONLINE 8933531975680 1126164848640 7807367127040 8% 12 1.83x", + "temp1 ONLINE 2989297238016 1626309320704 1362987917312 38% 54 1.28x", + "temp2 ONLINE 2989297238016 626958278656 2362338959360 12% 20 1.00x", } func mock_zpool() ([]string, error) { return zpool_output, nil } -// $ zpool list -Hp +// $ zpool list -Hp -o name,health,size,alloc,free,fragmentation,capacity,dedupratio var zpool_output_unavail = []string{ - "temp2 - - - - - - - UNAVAIL -", + "temp2 UNAVAIL - - - - - -", } func mock_zpool_unavail() ([]string, error) { @@ -155,7 +155,7 @@ func TestZfsGeneratesMetrics(t *testing.T) { err = z.Gather(&acc) require.NoError(t, err) - //four pool, vdev_cache_stats and zfetchstatus metrics + //four pool, vdev_cache_stats and zfetchstats metrics intMetrics = getKstatMetricsVdevAndZfetch() acc.AssertContainsTaggedFields(t, "zfs", intMetrics, tags) diff --git a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go index ddc0d4918..ea25b49a0 100644 --- a/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go +++ b/plugins/inputs/zipkin/cmd/stress_test_write/stress_test_write.go @@ -55,7 +55,7 @@ func main() { zipkin.HTTPBatchInterval(time.Duration(BatchTimeInterval)*time.Second)) defer collector.Close() if err != nil { - log.Fatalf("Error intializing zipkin http collector: %v\n", err) + log.Fatalf("Error initializing zipkin http collector: %v\n", err) } tracer, err := zipkin.NewTracer( diff --git a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go index 60bf1b51a..dde89570b 100644 --- a/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go +++ b/plugins/inputs/zipkin/cmd/thrift_serialize/thrift_serialize.go @@ -5,7 +5,7 @@ vice versa. To convert from json to thrift, the json is unmarshalled, converted to zipkincore.Span structures, and marshalled into thrift binary protocol. The json must be in an array format (even if it only has one object), -because the tool automatically tries to unmarshall the json into an array of structs. +because the tool automatically tries to unmarshal the json into an array of structs. To convert from thrift to json, the opposite process must happen. The thrift binary data must be read into an array of diff --git a/plugins/inputs/zipkin/codec/codec_test.go b/plugins/inputs/zipkin/codec/codec_test.go index c3a9fbd73..3525f30c2 100644 --- a/plugins/inputs/zipkin/codec/codec_test.go +++ b/plugins/inputs/zipkin/codec/codec_test.go @@ -382,7 +382,7 @@ func TestNewBinaryAnnotations(t *testing.T) { name: "myservice", }, want: []trace.BinaryAnnotation{ - trace.BinaryAnnotation{ + { Host: "myhost", ServiceName: "myservice", Key: "mykey", @@ -424,7 +424,7 @@ func TestNewAnnotations(t *testing.T) { name: "myservice", }, want: []trace.Annotation{ - trace.Annotation{ + { Host: "myhost", ServiceName: "myservice", Timestamp: time.Unix(0, 0).UTC(), diff --git a/plugins/inputs/zipkin/codec/thrift/thrift_test.go b/plugins/inputs/zipkin/codec/thrift/thrift_test.go index 000ac628c..798fc269e 100644 --- a/plugins/inputs/zipkin/codec/thrift/thrift_test.go +++ b/plugins/inputs/zipkin/codec/thrift/thrift_test.go @@ -113,7 +113,7 @@ func TestUnmarshalThrift(t *testing.T) { Duration: addr(53106), Annotations: []*zipkincore.Annotation{}, BinaryAnnotations: []*zipkincore.BinaryAnnotation{ - &zipkincore.BinaryAnnotation{ + { Key: "lc", AnnotationType: zipkincore.AnnotationType_STRING, Value: []byte("trivial"), @@ -133,7 +133,7 @@ func TestUnmarshalThrift(t *testing.T) { Duration: addr(50410), Annotations: []*zipkincore.Annotation{}, BinaryAnnotations: []*zipkincore.BinaryAnnotation{ - &zipkincore.BinaryAnnotation{ + { Key: "lc", AnnotationType: zipkincore.AnnotationType_STRING, Value: []byte("trivial"), @@ -151,7 +151,7 @@ func TestUnmarshalThrift(t *testing.T) { Timestamp: addr(1498688360851318), Duration: addr(103680), Annotations: []*zipkincore.Annotation{ - &zipkincore.Annotation{ + { Timestamp: 1498688360851325, Value: "Starting child #0", Host: &zipkincore.Endpoint{ @@ -159,7 +159,7 @@ func TestUnmarshalThrift(t *testing.T) { ServiceName: "trivial", }, }, - &zipkincore.Annotation{ + { Timestamp: 1498688360904545, Value: "Starting child #1", Host: &zipkincore.Endpoint{ @@ -167,7 +167,7 @@ func TestUnmarshalThrift(t *testing.T) { ServiceName: "trivial", }, }, - &zipkincore.Annotation{ + { Timestamp: 1498688360954992, Value: "A Log", Host: &zipkincore.Endpoint{ @@ -177,7 +177,7 @@ func TestUnmarshalThrift(t *testing.T) { }, }, BinaryAnnotations: []*zipkincore.BinaryAnnotation{ - &zipkincore.BinaryAnnotation{ + { Key: "lc", AnnotationType: zipkincore.AnnotationType_STRING, Value: []byte("trivial"), diff --git a/plugins/inputs/zipkin/convert_test.go b/plugins/inputs/zipkin/convert_test.go index 5085deecb..23a951594 100644 --- a/plugins/inputs/zipkin/convert_test.go +++ b/plugins/inputs/zipkin/convert_test.go @@ -108,7 +108,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, }, want: []testutil.Metric{ - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "8090652509916334619", @@ -121,8 +121,9 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851331000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "8090652509916334619", @@ -138,8 +139,9 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851331000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "103618986556047333", @@ -152,8 +154,9 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360904552000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "103618986556047333", @@ -169,8 +172,9 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360904552000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "22964302721410078", @@ -183,8 +187,9 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "service_name": "trivial", @@ -199,8 +204,9 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "service_name": "trivial", @@ -215,8 +221,9 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "parent_id": "22964302721410078", @@ -231,8 +238,9 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "trace_id": "2505404965370368069", @@ -248,6 +256,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, }, wantErr: false, @@ -283,7 +292,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { }, }, want: []testutil.Metric{ - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "6802735349851856000", @@ -296,8 +305,9 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(1) * time.Nanosecond).Nanoseconds(), }, Time: time.Unix(1, 0).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "cs", @@ -312,6 +322,7 @@ func TestLineProtocolConverter_Record(t *testing.T) { "duration_ns": (time.Duration(1) * time.Nanosecond).Nanoseconds(), }, Time: time.Unix(1, 0).UTC(), + Type: telegraf.Untyped, }, }, }, diff --git a/plugins/inputs/zipkin/zipkin.go b/plugins/inputs/zipkin/zipkin.go index 18a63dccd..4224fea3d 100644 --- a/plugins/inputs/zipkin/zipkin.go +++ b/plugins/inputs/zipkin/zipkin.go @@ -3,7 +3,6 @@ package zipkin import ( "context" "fmt" - "log" "net" "net/http" "strconv" @@ -60,6 +59,8 @@ type Zipkin struct { Port int Path string + Log telegraf.Logger + address string handler Handler server *http.Server @@ -105,7 +106,7 @@ func (z *Zipkin) Start(acc telegraf.Accumulator) error { } z.address = ln.Addr().String() - log.Printf("I! Started the zipkin listener on %s", z.address) + z.Log.Infof("Started the zipkin listener on %s", z.address) go func() { wg.Add(1) diff --git a/plugins/inputs/zipkin/zipkin_test.go b/plugins/inputs/zipkin/zipkin_test.go index b71e5bf4e..77bef853b 100644 --- a/plugins/inputs/zipkin/zipkin_test.go +++ b/plugins/inputs/zipkin/zipkin_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/testutil" ) @@ -27,7 +28,7 @@ func TestZipkinPlugin(t *testing.T) { datafile: "testdata/threespans.dat", contentType: "application/x-thrift", want: []testutil.Metric{ - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "7047c59776af8a1b", @@ -40,8 +41,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851331000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "7047c59776af8a1b", @@ -57,8 +59,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(53106) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851331000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "17020eb55a8bfe5", @@ -71,8 +74,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360904552000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "17020eb55a8bfe5", @@ -88,8 +92,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(50410) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360904552000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "5195e96239641e", @@ -102,8 +107,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "service_name": "trivial", @@ -118,8 +124,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "service_name": "trivial", @@ -134,8 +141,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "parent_id": "5195e96239641e", @@ -150,8 +158,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "trace_id": "22c4fc8ab3669045", @@ -167,6 +176,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(103680) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1498688360851318000).UTC(), + Type: telegraf.Untyped, }, }, wantErr: false, @@ -176,7 +186,7 @@ func TestZipkinPlugin(t *testing.T) { datafile: "testdata/distributed_trace_sample.dat", contentType: "application/x-thrift", want: []testutil.Metric{ - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "id": "5e682bc21ce99c80", @@ -189,8 +199,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "cs", @@ -205,8 +216,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "cr", @@ -221,6 +233,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": (time.Duration(1) * time.Microsecond).Nanoseconds(), }, Time: time.Unix(0, 1433330263415871*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, }, }, @@ -240,7 +253,9 @@ func TestZipkinPlugin(t *testing.T) { }, Fields: map[string]interface{}{ "duration_ns": int64(3000000), - }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + }, + Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -257,6 +272,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(3000000), }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -273,6 +289,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(3000000), }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -290,6 +307,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(3000000), }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -307,6 +325,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(3000000), }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -324,6 +343,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(3000000), }, Time: time.Unix(0, 1503031538791000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -338,6 +358,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -354,6 +375,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -370,6 +392,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -387,6 +410,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -404,6 +428,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -421,6 +446,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -438,6 +464,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -455,6 +482,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(10000000), }, Time: time.Unix(0, 1503031538786000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -469,6 +497,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, { Measurement: "zipkin", @@ -485,8 +514,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "ss", @@ -501,8 +531,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "Demo2Application", @@ -518,8 +549,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "hi", @@ -535,8 +567,9 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, - testutil.Metric{ + { Measurement: "zipkin", Tags: map[string]string{ "annotation": "192.168.0.8:test:8010", @@ -552,6 +585,7 @@ func TestZipkinPlugin(t *testing.T) { "duration_ns": int64(23393000), }, Time: time.Unix(0, 1503031538778000*int64(time.Microsecond)).UTC(), + Type: telegraf.Untyped, }, }, }, @@ -562,6 +596,7 @@ func TestZipkinPlugin(t *testing.T) { DefaultNetwork = "tcp4" z := &Zipkin{ + Log: testutil.Logger{}, Path: "/api/v1/spans", Port: 0, } diff --git a/plugins/inputs/zookeeper/README.md b/plugins/inputs/zookeeper/README.md index d54caae44..23009c519 100644 --- a/plugins/inputs/zookeeper/README.md +++ b/plugins/inputs/zookeeper/README.md @@ -1,7 +1,7 @@ ## Zookeeper Input Plugin The zookeeper plugin collects variables outputted from the 'mntr' command -[Zookeeper Admin](https://zookeeper.apache.org/doc/trunk/zookeeperAdmin.html). +[Zookeeper Admin](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html). ### Configuration @@ -19,7 +19,7 @@ The zookeeper plugin collects variables outputted from the 'mntr' command # timeout = "5s" ## Optional TLS Config - # enable_ssl = true + # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" diff --git a/plugins/inputs/zookeeper/zookeeper.go b/plugins/inputs/zookeeper/zookeeper.go index ad990f28c..9c9a2fa77 100644 --- a/plugins/inputs/zookeeper/zookeeper.go +++ b/plugins/inputs/zookeeper/zookeeper.go @@ -17,6 +17,8 @@ import ( "github.com/influxdata/telegraf/plugins/inputs" ) +var zookeeperFormatRE = regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`) + // Zookeeper is a zookeeper plugin type Zookeeper struct { Servers []string @@ -136,9 +138,7 @@ func (z *Zookeeper) gatherServer(ctx context.Context, address string, acc telegr fields := make(map[string]interface{}) for scanner.Scan() { line := scanner.Text() - - re := regexp.MustCompile(`^zk_(\w+)\s+([\w\.\-]+)`) - parts := re.FindStringSubmatch(string(line)) + parts := zookeeperFormatRE.FindStringSubmatch(string(line)) if len(parts) != 3 { return fmt.Errorf("unexpected line in mntr response: %q", line) diff --git a/plugins/outputs/all/all.go b/plugins/outputs/all/all.go index 037807c22..7d37c2208 100644 --- a/plugins/outputs/all/all.go +++ b/plugins/outputs/all/all.go @@ -4,27 +4,36 @@ import ( _ "github.com/influxdata/telegraf/plugins/outputs/amon" _ "github.com/influxdata/telegraf/plugins/outputs/amqp" _ "github.com/influxdata/telegraf/plugins/outputs/application_insights" + _ "github.com/influxdata/telegraf/plugins/outputs/azure_monitor" + _ "github.com/influxdata/telegraf/plugins/outputs/cloud_pubsub" _ "github.com/influxdata/telegraf/plugins/outputs/cloudwatch" _ "github.com/influxdata/telegraf/plugins/outputs/cratedb" _ "github.com/influxdata/telegraf/plugins/outputs/datadog" _ "github.com/influxdata/telegraf/plugins/outputs/discard" _ "github.com/influxdata/telegraf/plugins/outputs/elasticsearch" + _ "github.com/influxdata/telegraf/plugins/outputs/exec" _ "github.com/influxdata/telegraf/plugins/outputs/file" _ "github.com/influxdata/telegraf/plugins/outputs/graphite" _ "github.com/influxdata/telegraf/plugins/outputs/graylog" + _ "github.com/influxdata/telegraf/plugins/outputs/health" _ "github.com/influxdata/telegraf/plugins/outputs/http" _ "github.com/influxdata/telegraf/plugins/outputs/influxdb" + _ "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" _ "github.com/influxdata/telegraf/plugins/outputs/instrumental" _ "github.com/influxdata/telegraf/plugins/outputs/kafka" _ "github.com/influxdata/telegraf/plugins/outputs/kinesis" _ "github.com/influxdata/telegraf/plugins/outputs/librato" _ "github.com/influxdata/telegraf/plugins/outputs/mqtt" _ "github.com/influxdata/telegraf/plugins/outputs/nats" + _ "github.com/influxdata/telegraf/plugins/outputs/newrelic" _ "github.com/influxdata/telegraf/plugins/outputs/nsq" _ "github.com/influxdata/telegraf/plugins/outputs/opentsdb" _ "github.com/influxdata/telegraf/plugins/outputs/prometheus_client" _ "github.com/influxdata/telegraf/plugins/outputs/riemann" _ "github.com/influxdata/telegraf/plugins/outputs/riemann_legacy" _ "github.com/influxdata/telegraf/plugins/outputs/socket_writer" + _ "github.com/influxdata/telegraf/plugins/outputs/stackdriver" + _ "github.com/influxdata/telegraf/plugins/outputs/syslog" + _ "github.com/influxdata/telegraf/plugins/outputs/warp10" _ "github.com/influxdata/telegraf/plugins/outputs/wavefront" ) diff --git a/plugins/outputs/amqp/README.md b/plugins/outputs/amqp/README.md index 6002311f6..04715f8e3 100644 --- a/plugins/outputs/amqp/README.md +++ b/plugins/outputs/amqp/README.md @@ -1,6 +1,6 @@ # AMQP Output Plugin -This plugin writes to a AMQP 0-9-1 Exchange, a promenent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). +This plugin writes to a AMQP 0-9-1 Exchange, a prominent implementation of this protocol being [RabbitMQ](https://www.rabbitmq.com/). This plugin does not bind the exchange to a queue. @@ -33,14 +33,14 @@ For an introduction to AMQP see: # exchange_type = "topic" ## If true, exchange will be passively declared. - # exchange_declare_passive = false + # exchange_passive = false - ## If true, exchange will be created as a durable exchange. - # exchange_durable = true + ## Exchange durability can be either "transient" or "durable". + # exchange_durability = "durable" ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## Authentication credentials for the PLAIN auth_method. # username = "" @@ -92,6 +92,14 @@ For an introduction to AMQP see: ## Recommended to set to true. # use_batch_format = false + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + ## + ## Please note that when use_batch_format = false each amqp message contains only + ## a single metric, it is recommended to use compression with batch format + ## for best results. + # content_encoding = "identity" + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/amqp/amqp.go b/plugins/outputs/amqp/amqp.go index a69db1e6d..b00480d5a 100644 --- a/plugins/outputs/amqp/amqp.go +++ b/plugins/outputs/amqp/amqp.go @@ -12,7 +12,6 @@ import ( "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - "github.com/streadway/amqp" ) @@ -55,6 +54,7 @@ type AMQP struct { Headers map[string]string `toml:"headers"` Timeout internal.Duration `toml:"timeout"` UseBatchFormat bool `toml:"use_batch_format"` + ContentEncoding string `toml:"content_encoding"` tls.ClientConfig serializer serializers.Serializer @@ -62,6 +62,7 @@ type AMQP struct { client Client config *ClientConfig sentMessages int + encoder internal.ContentEncoder } type Client interface { @@ -91,14 +92,14 @@ var sampleConfig = ` # exchange_type = "topic" ## If true, exchange will be passively declared. - # exchange_declare_passive = false + # exchange_passive = false - ## If true, exchange will be created as a durable exchange. - # exchange_durable = true + ## Exchange durability can be either "transient" or "durable". + # exchange_durability = "durable" ## Additional exchange arguments. # exchange_arguments = { } - # exchange_arguments = {"hash_propery" = "timestamp"} + # exchange_arguments = {"hash_property" = "timestamp"} ## Authentication credentials for the PLAIN auth_method. # username = "" @@ -150,6 +151,14 @@ var sampleConfig = ` ## Recommended to set to true. # use_batch_format = false + ## Content encoding for message payloads, can be set to "gzip" to or + ## "identity" to apply no encoding. + ## + ## Please note that when use_batch_format = false each amqp message contains only + ## a single metric, it is recommended to use compression with batch format + ## for best results. + # content_encoding = "identity" + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -178,11 +187,16 @@ func (q *AMQP) Connect() error { q.config = config } - client, err := q.connect(q.config) + var err error + q.encoder, err = internal.NewContentEncoder(q.ContentEncoding) + if err != nil { + return err + } + + q.client, err = q.connect(q.config) if err != nil { return err } - q.client = client return nil } @@ -206,8 +220,8 @@ func (q *AMQP) routingKey(metric telegraf.Metric) string { func (q *AMQP) Write(metrics []telegraf.Metric) error { batches := make(map[string][]telegraf.Metric) - if q.ExchangeType == "direct" || q.ExchangeType == "header" { - // Since the routing_key is ignored for these exchange types send as a + if q.ExchangeType == "header" { + // Since the routing_key is ignored for this exchange type send as a // single batch. batches[""] = metrics } else { @@ -228,6 +242,11 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { return err } + body, err = q.encoder.Encode(body) + if err != nil { + return err + } + err = q.publish(key, body) if err != nil { // If this is the first attempt to publish and the connection is @@ -249,6 +268,7 @@ func (q *AMQP) Write(metrics []telegraf.Metric) error { if q.sentMessages >= q.MaxMessages && q.MaxMessages > 0 { log.Printf("D! Output [amqp] sent MaxMessages; closing connection") + q.client.Close() q.client = nil } @@ -281,7 +301,8 @@ func (q *AMQP) serialize(metrics []telegraf.Metric) ([]byte, error) { for _, metric := range metrics { octets, err := q.serializer.Serialize(metric) if err != nil { - return nil, err + log.Printf("D! [outputs.amqp] Could not serialize metric: %v", err) + continue } _, err = buf.Write(octets) if err != nil { @@ -298,6 +319,7 @@ func (q *AMQP) makeClientConfig() (*ClientConfig, error) { exchange: q.Exchange, exchangeType: q.ExchangeType, exchangePassive: q.ExchangePassive, + encoding: q.ContentEncoding, timeout: q.Timeout.Duration, } diff --git a/plugins/outputs/amqp/client.go b/plugins/outputs/amqp/client.go index ba4e45162..8c230b706 100644 --- a/plugins/outputs/amqp/client.go +++ b/plugins/outputs/amqp/client.go @@ -19,6 +19,7 @@ type ClientConfig struct { exchangePassive bool exchangeDurable bool exchangeArguments amqp.Table + encoding string headers amqp.Table deliveryMode uint8 tlsConfig *tls.Config @@ -55,7 +56,7 @@ func Connect(config *ClientConfig) (*client, error) { log.Printf("D! Output [amqp] connected to %q", broker) break } - log.Printf("D! Output [amqp] error connecting to %q", broker) + log.Printf("D! Output [amqp] error connecting to %q - %s", broker, err.Error()) } if client.conn == nil { @@ -77,6 +78,10 @@ func Connect(config *ClientConfig) (*client, error) { } func (c *client) DeclareExchange() error { + if c.config.exchange == "" { + return nil + } + var err error if c.config.exchangePassive { err = c.channel.ExchangeDeclarePassive( @@ -114,10 +119,11 @@ func (c *client) Publish(key string, body []byte) error { false, // mandatory false, // immediate amqp.Publishing{ - Headers: c.config.headers, - ContentType: "text/plain", - Body: body, - DeliveryMode: c.config.deliveryMode, + Headers: c.config.headers, + ContentType: "text/plain", + ContentEncoding: c.config.encoding, + Body: body, + DeliveryMode: c.config.deliveryMode, }) } diff --git a/plugins/outputs/application_insights/README.md b/plugins/outputs/application_insights/README.md index 08850a3e6..c64e84488 100644 --- a/plugins/outputs/application_insights/README.md +++ b/plugins/outputs/application_insights/README.md @@ -12,7 +12,7 @@ This plugin writes telegraf metrics to [Azure Application Insights](https://azur # timeout = "5s" ## Enable additional diagnostic logging. - # enable_diagnosic_logging = false + # enable_diagnostic_logging = false ## Context Tag Sources add Application Insights context tags to a tag value. ## @@ -37,7 +37,7 @@ foo,host=a first=42,second=43 1525293034000000000 In the special case of a single field named `value`, a single telemetry record is created named using only the measurement name -**Example:** Create a telemetry record `foo`: +**Example:** Create a telemetry record `bar`: ``` bar,host=a value=42 1525293034000000000 ``` diff --git a/plugins/outputs/application_insights/application_insights.go b/plugins/outputs/application_insights/application_insights.go index 26f3f8dc0..3da420218 100644 --- a/plugins/outputs/application_insights/application_insights.go +++ b/plugins/outputs/application_insights/application_insights.go @@ -48,7 +48,7 @@ var ( # timeout = "5s" ## Enable additional diagnostic logging. - # enable_diagnosic_logging = false + # enable_diagnostic_logging = false ## Context Tag Sources add Application Insights context tags to a tag value. ## diff --git a/plugins/outputs/application_insights/application_insights_test.go b/plugins/outputs/application_insights/application_insights_test.go index 561e6c9f9..5a017823c 100644 --- a/plugins/outputs/application_insights/application_insights_test.go +++ b/plugins/outputs/application_insights/application_insights_test.go @@ -184,7 +184,7 @@ func TestSimpleMetricCreated(t *testing.T) { {"neither value nor count", map[string]interface{}{"v1": "alpha", "v2": 45.8}, "", []string{"v2"}}, {"value is of wrong type", map[string]interface{}{"value": "alpha", "count": 15}, "", []string{"count"}}, {"count is of wrong type", map[string]interface{}{"value": 23.77, "count": 7.5}, "", []string{"count", "value"}}, - {"count is out of range", map[string]interface{}{"value": -98.45E4, "count": math.MaxUint64 - uint64(20)}, "", []string{"value", "count"}}, + {"count is out of range", map[string]interface{}{"value": -98.45e4, "count": math.MaxUint64 - uint64(20)}, "", []string{"value", "count"}}, {"several additional fields", map[string]interface{}{"alpha": 10, "bravo": "bravo", "charlie": 30, "delta": 40.7}, "", []string{"alpha", "charlie", "delta"}}, } @@ -288,7 +288,7 @@ func TestTagsAppliedToTelemetry(t *testing.T) { transmitter.AssertNumberOfCalls(t, "Track", len(tt.metricValueFields)) transmitter.AssertCalled(t, "Track", mock.AnythingOfType("*appinsights.MetricTelemetry")) - // Will verify that all original tags are present in telemetry.Properies map + // Will verify that all original tags are present in telemetry.Properties map verifyAdditionalTelemetry(assert, m, transmitter, tt.metricValueFields, metricName) } diff --git a/plugins/outputs/azure_monitor/README.md b/plugins/outputs/azure_monitor/README.md new file mode 100644 index 000000000..fbb493586 --- /dev/null +++ b/plugins/outputs/azure_monitor/README.md @@ -0,0 +1,152 @@ +# Azure Monitor + +__The Azure Monitor custom metrics service is currently in preview and not +available in a subset of Azure regions.__ + +This plugin will send custom metrics to Azure Monitor. Azure Monitor has a +metric resolution of one minute. To handle this in Telegraf, the Azure Monitor +output plugin will automatically aggregates metrics into one minute buckets, +which are then sent to Azure Monitor on every flush interval. + +The metrics from each input plugin will be written to a separate Azure Monitor +namespace, prefixed with `Telegraf/` by default. The field name for each +metric is written as the Azure Monitor metric name. All field values are +written as a summarized set that includes: min, max, sum, count. Tags are +written as a dimension on each Azure Monitor metric. + +### Configuration: + +```toml +[[outputs.azure_monitor]] + ## Timeout for HTTP writes. + # timeout = "20s" + + ## Set the namespace prefix, defaults to "Telegraf/". + # namespace_prefix = "Telegraf/" + + ## Azure Monitor doesn't have a string value type, so convert string + ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows + ## a maximum of 10 dimensions so Telegraf will only send the first 10 + ## alphanumeric dimensions. + # strings_as_dimensions = false + + ## Both region and resource_id must be set or be available via the + ## Instance Metadata service on Azure Virtual Machines. + # + ## Azure Region to publish metrics against. + ## ex: region = "southcentralus" + # region = "" + # + ## The Azure Resource ID against which metric will be logged, e.g. + ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" + # resource_id = "" + + ## Optionally, if in Azure US Government, China, or other sovereign + ## cloud environment, set the appropriate REST endpoint for receiving + ## metrics. (Note: region may be unused in this context) + # endpoint_url = "https://monitoring.core.usgovcloudapi.net" +``` + +### Setup + +1. [Register the `microsoft.insights` resource provider in your Azure subscription][resource provider]. +2. If using Managed Service Identities to authenticate an Azure VM, + [enable system-assigned managed identity][enable msi]. +2. Use a region that supports Azure Monitor Custom Metrics, + For regions with Custom Metrics support, an endpoint will be available with + the format `https://.monitoring.azure.com`. The following regions + are currently known to be supported: + - East US (eastus) + - West US 2 (westus2) + - South Central US (southcentralus) + - West Central US (westcentralus) + - North Europe (northeurope) + - West Europe (westeurope) + - Southeast Asia (southeastasia) + +[resource provider]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-manager-supported-services +[enable msi]: https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/qs-configure-portal-windows-vm + +### Region and Resource ID + +The plugin will attempt to discover the region and resource ID using the Azure +VM Instance Metadata service. If Telegraf is not running on a virtual machine +or the VM Instance Metadata service is not available, the following variables +are required for the output to function. + +* region +* resource_id + +### Authentication + +This plugin uses one of several different types of authenticate methods. The +preferred authentication methods are different from the *order* in which each +authentication is checked. Here are the preferred authentication methods: + +1. Managed Service Identity (MSI) token + - This is the prefered authentication method. Telegraf will automatically + authenticate using this method when running on Azure VMs. +2. AAD Application Tokens (Service Principals) + - Primarily useful if Telegraf is writing metrics for other resources. + [More information][principal]. + - A Service Principal or User Principal needs to be assigned the `Monitoring + Metrics Publisher` role on the resource(s) metrics will be emitted + against. +3. AAD User Tokens (User Principals) + - Allows Telegraf to authenticate like a user. It is best to use this method + for development. + +[principal]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects + +The plugin will authenticate using the first available of the +following configurations: + +1. **Client Credentials**: Azure AD Application ID and Secret. + + Set the following environment variables: + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CLIENT_SECRET`: Specifies the app secret to use. + +2. **Client Certificate**: Azure AD Application ID and X.509 Certificate. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_CERTIFICATE_PATH`: Specifies the certificate Path to use. + - `AZURE_CERTIFICATE_PASSWORD`: Specifies the certificate password to use. + +3. **Resource Owner Password**: Azure AD User and Password. This grant type is + *not recommended*, use device login instead if you need interactive login. + + - `AZURE_TENANT_ID`: Specifies the Tenant to which to authenticate. + - `AZURE_CLIENT_ID`: Specifies the app client ID to use. + - `AZURE_USERNAME`: Specifies the username to use. + - `AZURE_PASSWORD`: Specifies the password to use. + +4. **Azure Managed Service Identity**: Delegate credential management to the + platform. Requires that code is running in Azure, e.g. on a VM. All + configuration is handled by Azure. See [Azure Managed Service Identity][msi] + for more details. Only available when using the [Azure Resource Manager][arm]. + +[msi]: https://docs.microsoft.com/en-us/azure/active-directory/msi-overview +[arm]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview + +**Note: As shown above, the last option (#4) is the preferred way to +authenticate when running Telegraf on Azure VMs. + +### Dimensions + +Azure Monitor only accepts values with a numeric type. The plugin will drop +fields with a string type by default. The plugin can set all string type fields +as extra dimensions in the Azure Monitor custom metric by setting the +configuration option `strings_as_dimensions` to `true`. + +Keep in mind, Azure Monitor allows a maximum of 10 dimensions per metric. The +plugin will deterministically dropped any dimensions that exceed the 10 +dimension limit. + +To convert only a subset of string-typed fields as dimensions, enable +`strings_as_dimensions` and use the [`fieldpass` or `fielddrop` +processors](https://docs.influxdata.com/telegraf/v1.7/administration/configuration/#processor-configuration) +to limit the string-typed fields that are sent to the plugin. diff --git a/plugins/outputs/azure_monitor/azure_monitor.go b/plugins/outputs/azure_monitor/azure_monitor.go new file mode 100644 index 000000000..f2b1db1dd --- /dev/null +++ b/plugins/outputs/azure_monitor/azure_monitor.go @@ -0,0 +1,652 @@ +package azure_monitor + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash/fnv" + "io/ioutil" + "log" + "net/http" + "regexp" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/selfstat" +) + +// AzureMonitor allows publishing of metrics to the Azure Monitor custom metrics +// service +type AzureMonitor struct { + Timeout internal.Duration + NamespacePrefix string `toml:"namespace_prefix"` + StringsAsDimensions bool `toml:"strings_as_dimensions"` + Region string + ResourceID string `toml:"resource_id"` + EndpointUrl string `toml:"endpoint_url"` + + url string + auth autorest.Authorizer + client *http.Client + + cache map[time.Time]map[uint64]*aggregate + timeFunc func() time.Time + + MetricOutsideWindow selfstat.Stat +} + +// VirtualMachineMetadata contains information about a VM from the metadata service +type virtualMachineMetadata struct { + Compute struct { + Location string `json:"location"` + Name string `json:"name"` + ResourceGroupName string `json:"resourceGroupName"` + SubscriptionID string `json:"subscriptionId"` + VMScaleSetName string `json:"vmScaleSetName"` + } `json:"compute"` +} + +func (m *virtualMachineMetadata) ResourceID() string { + if m.Compute.VMScaleSetName != "" { + return fmt.Sprintf( + resourceIDScaleSetTemplate, + m.Compute.SubscriptionID, + m.Compute.ResourceGroupName, + m.Compute.VMScaleSetName, + ) + } else { + return fmt.Sprintf( + resourceIDTemplate, + m.Compute.SubscriptionID, + m.Compute.ResourceGroupName, + m.Compute.Name, + ) + } +} + +type dimension struct { + name string + value string +} + +type aggregate struct { + name string + min float64 + max float64 + sum float64 + count int64 + dimensions []dimension + updated bool +} + +const ( + defaultRequestTimeout = time.Second * 5 + defaultNamespacePrefix = "Telegraf/" + defaultAuthResource = "https://monitoring.azure.com/" + + vmInstanceMetadataURL = "http://169.254.169.254/metadata/instance?api-version=2017-12-01" + resourceIDTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s" + resourceIDScaleSetTemplate = "/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s" + urlTemplate = "https://%s.monitoring.azure.com%s/metrics" + urlOverrideTemplate = "%s%s/metrics" + maxRequestBodySize = 4000000 +) + +var sampleConfig = ` + ## Timeout for HTTP writes. + # timeout = "20s" + + ## Set the namespace prefix, defaults to "Telegraf/". + # namespace_prefix = "Telegraf/" + + ## Azure Monitor doesn't have a string value type, so convert string + ## fields to dimensions (a.k.a. tags) if enabled. Azure Monitor allows + ## a maximum of 10 dimensions so Telegraf will only send the first 10 + ## alphanumeric dimensions. + # strings_as_dimensions = false + + ## Both region and resource_id must be set or be available via the + ## Instance Metadata service on Azure Virtual Machines. + # + ## Azure Region to publish metrics against. + ## ex: region = "southcentralus" + # region = "" + # + ## The Azure Resource ID against which metric will be logged, e.g. + ## ex: resource_id = "/subscriptions//resourceGroups//providers/Microsoft.Compute/virtualMachines/" + # resource_id = "" + + ## Optionally, if in Azure US Government, China or other sovereign + ## cloud environment, set appropriate REST endpoint for receiving + ## metrics. (Note: region may be unused in this context) + # endpoint_url = "https://monitoring.core.usgovcloudapi.net" +` + +// Description provides a description of the plugin +func (a *AzureMonitor) Description() string { + return "Send aggregate metrics to Azure Monitor" +} + +// SampleConfig provides a sample configuration for the plugin +func (a *AzureMonitor) SampleConfig() string { + return sampleConfig +} + +// Connect initializes the plugin and validates connectivity +func (a *AzureMonitor) Connect() error { + a.cache = make(map[time.Time]map[uint64]*aggregate, 36) + + if a.Timeout.Duration == 0 { + a.Timeout.Duration = defaultRequestTimeout + } + + a.client = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + Timeout: a.Timeout.Duration, + } + + if a.NamespacePrefix == "" { + a.NamespacePrefix = defaultNamespacePrefix + } + + var err error + var region string + var resourceID string + var endpointUrl string + + if a.Region == "" || a.ResourceID == "" { + // Pull region and resource identifier + region, resourceID, err = vmInstanceMetadata(a.client) + if err != nil { + return err + } + } + if a.Region != "" { + region = a.Region + } + if a.ResourceID != "" { + resourceID = a.ResourceID + } + if a.EndpointUrl != "" { + endpointUrl = a.EndpointUrl + } + + if resourceID == "" { + return fmt.Errorf("no resource ID configured or available via VM instance metadata") + } else if region == "" { + return fmt.Errorf("no region configured or available via VM instance metadata") + } + + if endpointUrl == "" { + a.url = fmt.Sprintf(urlTemplate, region, resourceID) + } else { + a.url = fmt.Sprintf(urlOverrideTemplate, endpointUrl, resourceID) + } + + log.Printf("D! Writing to Azure Monitor URL: %s", a.url) + + a.auth, err = auth.NewAuthorizerFromEnvironmentWithResource(defaultAuthResource) + if err != nil { + return nil + } + + a.Reset() + + tags := map[string]string{ + "region": region, + "resource_id": resourceID, + } + a.MetricOutsideWindow = selfstat.Register("azure_monitor", "metric_outside_window", tags) + + return nil +} + +// vmMetadata retrieves metadata about the current Azure VM +func vmInstanceMetadata(c *http.Client) (string, string, error) { + req, err := http.NewRequest("GET", vmInstanceMetadataURL, nil) + if err != nil { + return "", "", fmt.Errorf("error creating request: %v", err) + } + req.Header.Set("Metadata", "true") + + resp, err := c.Do(req) + if err != nil { + return "", "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", "", err + } + if resp.StatusCode >= 300 || resp.StatusCode < 200 { + return "", "", fmt.Errorf("unable to fetch instance metadata: [%s] %d", + vmInstanceMetadataURL, resp.StatusCode) + } + + var metadata virtualMachineMetadata + if err := json.Unmarshal(body, &metadata); err != nil { + return "", "", err + } + + region := metadata.Compute.Location + resourceID := metadata.ResourceID() + + return region, resourceID, nil +} + +// Close shuts down an any active connections +func (a *AzureMonitor) Close() error { + a.client = nil + return nil +} + +type azureMonitorMetric struct { + Time time.Time `json:"time"` + Data *azureMonitorData `json:"data"` +} + +type azureMonitorData struct { + BaseData *azureMonitorBaseData `json:"baseData"` +} + +type azureMonitorBaseData struct { + Metric string `json:"metric"` + Namespace string `json:"namespace"` + DimensionNames []string `json:"dimNames"` + Series []*azureMonitorSeries `json:"series"` +} + +type azureMonitorSeries struct { + DimensionValues []string `json:"dimValues"` + Min float64 `json:"min"` + Max float64 `json:"max"` + Sum float64 `json:"sum"` + Count int64 `json:"count"` +} + +// Write writes metrics to the remote endpoint +func (a *AzureMonitor) Write(metrics []telegraf.Metric) error { + azmetrics := make(map[uint64]*azureMonitorMetric, len(metrics)) + for _, m := range metrics { + id := hashIDWithTagKeysOnly(m) + if azm, ok := azmetrics[id]; !ok { + amm, err := translate(m, a.NamespacePrefix) + if err != nil { + log.Printf("E! [outputs.azure_monitor]: could not create azure metric for %q; discarding point", m.Name()) + continue + } + azmetrics[id] = amm + } else { + amm, err := translate(m, a.NamespacePrefix) + if err != nil { + log.Printf("E! [outputs.azure_monitor]: could not create azure metric for %q; discarding point", m.Name()) + continue + } + + azmetrics[id].Data.BaseData.Series = append( + azm.Data.BaseData.Series, + amm.Data.BaseData.Series..., + ) + } + } + + if len(azmetrics) == 0 { + return nil + } + + var body []byte + for _, m := range azmetrics { + // Azure Monitor accepts new batches of points in new-line delimited + // JSON, following RFC 4288 (see https://github.com/ndjson/ndjson-spec). + jsonBytes, err := json.Marshal(&m) + if err != nil { + return err + } + // Azure Monitor's maximum request body size of 4MB. Send batches that + // exceed this size via separate write requests. + if (len(body) + len(jsonBytes) + 1) > maxRequestBodySize { + err := a.send(body) + if err != nil { + return err + } + body = nil + } + body = append(body, jsonBytes...) + body = append(body, '\n') + } + + return a.send(body) +} + +func (a *AzureMonitor) send(body []byte) error { + var buf bytes.Buffer + g := gzip.NewWriter(&buf) + if _, err := g.Write(body); err != nil { + return err + } + if err := g.Close(); err != nil { + return err + } + + req, err := http.NewRequest("POST", a.url, &buf) + if err != nil { + return err + } + + req.Header.Set("Content-Encoding", "gzip") + req.Header.Set("Content-Type", "application/x-ndjson") + + // Add the authorization header. WithAuthorization will automatically + // refresh the token if needed. + req, err = autorest.CreatePreparer(a.auth.WithAuthorization()).Prepare(req) + if err != nil { + return fmt.Errorf("unable to fetch authentication credentials: %v", err) + } + + resp, err := a.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = ioutil.ReadAll(resp.Body) + if err != nil || resp.StatusCode < 200 || resp.StatusCode > 299 { + return fmt.Errorf("failed to write batch: [%v] %s", resp.StatusCode, resp.Status) + } + + return nil +} + +func hashIDWithTagKeysOnly(m telegraf.Metric) uint64 { + h := fnv.New64a() + h.Write([]byte(m.Name())) + h.Write([]byte("\n")) + for _, tag := range m.TagList() { + if tag.Key == "" || tag.Value == "" { + continue + } + + h.Write([]byte(tag.Key)) + h.Write([]byte("\n")) + } + b := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(b, uint64(m.Time().UnixNano())) + h.Write(b[:n]) + h.Write([]byte("\n")) + return h.Sum64() +} + +func translate(m telegraf.Metric, prefix string) (*azureMonitorMetric, error) { + var dimensionNames []string + var dimensionValues []string + for _, tag := range m.TagList() { + // Azure custom metrics service supports up to 10 dimensions + if len(dimensionNames) >= 10 { + continue + } + + if tag.Key == "" || tag.Value == "" { + continue + } + + dimensionNames = append(dimensionNames, tag.Key) + dimensionValues = append(dimensionValues, tag.Value) + } + + min, err := getFloatField(m, "min") + if err != nil { + return nil, err + } + max, err := getFloatField(m, "max") + if err != nil { + return nil, err + } + sum, err := getFloatField(m, "sum") + if err != nil { + return nil, err + } + count, err := getIntField(m, "count") + if err != nil { + return nil, err + } + + mn, ns := "Missing", "Missing" + names := strings.SplitN(m.Name(), "-", 2) + if len(names) > 1 { + mn = names[1] + } + if len(names) > 0 { + ns = names[0] + } + ns = prefix + ns + + return &azureMonitorMetric{ + Time: m.Time(), + Data: &azureMonitorData{ + BaseData: &azureMonitorBaseData{ + Metric: mn, + Namespace: ns, + DimensionNames: dimensionNames, + Series: []*azureMonitorSeries{ + { + DimensionValues: dimensionValues, + Min: min, + Max: max, + Sum: sum, + Count: count, + }, + }, + }, + }, + }, nil +} + +func getFloatField(m telegraf.Metric, key string) (float64, error) { + fv, ok := m.GetField(key) + if !ok { + return 0, fmt.Errorf("missing field: %s", key) + } + + if value, ok := fv.(float64); ok { + return value, nil + } + return 0, fmt.Errorf("unexpected type: %s: %T", key, fv) +} + +func getIntField(m telegraf.Metric, key string) (int64, error) { + fv, ok := m.GetField(key) + if !ok { + return 0, fmt.Errorf("missing field: %s", key) + } + + if value, ok := fv.(int64); ok { + return value, nil + } + return 0, fmt.Errorf("unexpected type: %s: %T", key, fv) +} + +// Add will append a metric to the output aggregate +func (a *AzureMonitor) Add(m telegraf.Metric) { + // Azure Monitor only supports aggregates 30 minutes into the past and 4 + // minutes into the future. Future metrics are dropped when pushed. + t := m.Time() + tbucket := time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), 0, 0, t.Location()) + if tbucket.Before(a.timeFunc().Add(-time.Minute * 30)) { + a.MetricOutsideWindow.Incr(1) + return + } + + // Azure Monitor doesn't have a string value type, so convert string fields + // to dimensions (a.k.a. tags) if enabled. + if a.StringsAsDimensions { + for _, f := range m.FieldList() { + if v, ok := f.Value.(string); ok { + m.AddTag(f.Key, v) + } + } + } + + for _, f := range m.FieldList() { + fv, ok := convert(f.Value) + if !ok { + continue + } + + // Azure Monitor does not support fields so the field name is appended + // to the metric name. + name := m.Name() + "-" + sanitize(f.Key) + id := hashIDWithField(m.HashID(), f.Key) + + _, ok = a.cache[tbucket] + if !ok { + // Time bucket does not exist and needs to be created. + a.cache[tbucket] = make(map[uint64]*aggregate) + } + + // Fetch existing aggregate + var agg *aggregate + agg, ok = a.cache[tbucket][id] + if !ok { + agg := &aggregate{ + name: name, + min: fv, + max: fv, + sum: fv, + count: 1, + } + for _, tag := range m.TagList() { + dim := dimension{ + name: tag.Key, + value: tag.Value, + } + agg.dimensions = append(agg.dimensions, dim) + } + agg.updated = true + a.cache[tbucket][id] = agg + continue + } + + if fv < agg.min { + agg.min = fv + } + if fv > agg.max { + agg.max = fv + } + agg.sum += fv + agg.count++ + agg.updated = true + } +} + +func convert(in interface{}) (float64, bool) { + switch v := in.(type) { + case int64: + return float64(v), true + case uint64: + return float64(v), true + case float64: + return v, true + case bool: + if v { + return 1, true + } + return 0, true + default: + return 0, false + } +} + +var invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) + +func sanitize(value string) string { + return invalidNameCharRE.ReplaceAllString(value, "_") +} + +func hashIDWithField(id uint64, fk string) uint64 { + h := fnv.New64a() + b := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(b, id) + h.Write(b[:n]) + h.Write([]byte("\n")) + h.Write([]byte(fk)) + h.Write([]byte("\n")) + return h.Sum64() +} + +// Push sends metrics to the output metric buffer +func (a *AzureMonitor) Push() []telegraf.Metric { + var metrics []telegraf.Metric + for tbucket, aggs := range a.cache { + // Do not send metrics early + if tbucket.After(a.timeFunc().Add(-time.Minute)) { + continue + } + for _, agg := range aggs { + // Only send aggregates that have had an update since the last push. + if !agg.updated { + continue + } + + tags := make(map[string]string, len(agg.dimensions)) + for _, tag := range agg.dimensions { + tags[tag.name] = tag.value + } + + m, err := metric.New(agg.name, + tags, + map[string]interface{}{ + "min": agg.min, + "max": agg.max, + "sum": agg.sum, + "count": agg.count, + }, + tbucket, + ) + + if err != nil { + log.Printf("E! [outputs.azure_monitor]: could not create metric for aggregation %q; discarding point", agg.name) + } + + metrics = append(metrics, m) + } + } + return metrics +} + +// Reset clears the cache of aggregate metrics +func (a *AzureMonitor) Reset() { + for tbucket := range a.cache { + // Remove aggregates older than 30 minutes + if tbucket.Before(a.timeFunc().Add(-time.Minute * 30)) { + delete(a.cache, tbucket) + continue + } + // Metrics updated within the latest 1m have not been pushed and should + // not be cleared. + if tbucket.After(a.timeFunc().Add(-time.Minute)) { + continue + } + for id := range a.cache[tbucket] { + a.cache[tbucket][id].updated = false + } + } +} + +func init() { + outputs.Add("azure_monitor", func() telegraf.Output { + return &AzureMonitor{ + timeFunc: time.Now, + } + }) +} diff --git a/plugins/outputs/azure_monitor/azure_monitor_test.go b/plugins/outputs/azure_monitor/azure_monitor_test.go new file mode 100644 index 000000000..6fb40805e --- /dev/null +++ b/plugins/outputs/azure_monitor/azure_monitor_test.go @@ -0,0 +1,361 @@ +package azure_monitor + +import ( + "bufio" + "compress/gzip" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestAggregate(t *testing.T) { + tests := []struct { + name string + plugin *AzureMonitor + metrics []telegraf.Metric + addTime time.Time + pushTime time.Time + check func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) + }{ + { + name: "add metric outside window is dropped", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + addTime: time.Unix(3600, 0), + pushTime: time.Unix(3600, 0), + check: func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) { + require.Equal(t, int64(1), plugin.MetricOutsideWindow.Get()) + require.Len(t, metrics, 0) + }, + }, + { + name: "metric not sent until period expires", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + addTime: time.Unix(0, 0), + pushTime: time.Unix(0, 0), + check: func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) { + require.Len(t, metrics, 0) + }, + }, + { + name: "add strings as dimensions", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + StringsAsDimensions: true, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "localhost", + }, + map[string]interface{}{ + "value": 42, + "message": "howdy", + }, + time.Unix(0, 0), + ), + }, + addTime: time.Unix(0, 0), + pushTime: time.Unix(3600, 0), + check: func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu-value", + map[string]string{ + "host": "localhost", + "message": "howdy", + }, + map[string]interface{}{ + "min": 42.0, + "max": 42.0, + "sum": 42.0, + "count": 1, + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, metrics) + }, + }, + { + name: "add metric to cache and push", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + cache: make(map[time.Time]map[uint64]*aggregate, 36), + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + addTime: time.Unix(0, 0), + pushTime: time.Unix(3600, 0), + check: func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu-value", + map[string]string{}, + map[string]interface{}{ + "min": 42.0, + "max": 42.0, + "sum": 42.0, + "count": 1, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, metrics) + }, + }, + { + name: "added metric are aggregated", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + cache: make(map[time.Time]map[uint64]*aggregate, 36), + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 84, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 2, + }, + time.Unix(0, 0), + ), + }, + addTime: time.Unix(0, 0), + pushTime: time.Unix(3600, 0), + check: func(t *testing.T, plugin *AzureMonitor, metrics []telegraf.Metric) { + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu-value", + map[string]string{}, + map[string]interface{}{ + "min": 2.0, + "max": 84.0, + "sum": 128.0, + "count": 3, + }, + time.Unix(0, 0), + ), + } + + testutil.RequireMetricsEqual(t, expected, metrics) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.plugin.Connect() + require.NoError(t, err) + + // Reset globals + tt.plugin.MetricOutsideWindow.Set(0) + + tt.plugin.timeFunc = func() time.Time { return tt.addTime } + for _, m := range tt.metrics { + tt.plugin.Add(m) + } + + tt.plugin.timeFunc = func() time.Time { return tt.pushTime } + metrics := tt.plugin.Push() + tt.plugin.Reset() + + tt.check(t, tt.plugin, metrics) + }) + } +} + +func TestWrite(t *testing.T) { + readBody := func(r *http.Request) ([]*azureMonitorMetric, error) { + gz, err := gzip.NewReader(r.Body) + if err != nil { + return nil, err + } + scanner := bufio.NewScanner(gz) + + azmetrics := make([]*azureMonitorMetric, 0) + for scanner.Scan() { + line := scanner.Text() + var amm azureMonitorMetric + err = json.Unmarshal([]byte(line), &amm) + if err != nil { + return nil, err + } + azmetrics = append(azmetrics, &amm) + } + + return azmetrics, nil + } + + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + url := "http://" + ts.Listener.Addr().String() + "/metrics" + + tests := []struct { + name string + plugin *AzureMonitor + metrics []telegraf.Metric + handler func(t *testing.T, w http.ResponseWriter, r *http.Request) + }{ + { + name: "if not an azure metric nothing is sent", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + t.Fatal("should not call") + }, + }, + { + name: "single azure metric", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu-value", + map[string]string{}, + map[string]interface{}{ + "min": float64(42), + "max": float64(42), + "sum": float64(42), + "count": int64(1), + }, + time.Unix(0, 0), + ), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + azmetrics, err := readBody(r) + require.NoError(t, err) + require.Len(t, azmetrics, 1) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "multiple azure metric", + plugin: &AzureMonitor{ + Region: "test", + ResourceID: "/test", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu-value", + map[string]string{}, + map[string]interface{}{ + "min": float64(42), + "max": float64(42), + "sum": float64(42), + "count": int64(1), + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu-value", + map[string]string{}, + map[string]interface{}{ + "min": float64(42), + "max": float64(42), + "sum": float64(42), + "count": int64(1), + }, + time.Unix(60, 0), + ), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + azmetrics, err := readBody(r) + require.NoError(t, err) + require.Len(t, azmetrics, 2) + w.WriteHeader(http.StatusOK) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tt.handler(t, w, r) + }) + + err := tt.plugin.Connect() + require.NoError(t, err) + + // override real authorizer and write url + tt.plugin.auth = autorest.NullAuthorizer{} + tt.plugin.url = url + + err = tt.plugin.Write(tt.metrics) + require.NoError(t, err) + }) + } +} diff --git a/plugins/outputs/cloud_pubsub/README.md b/plugins/outputs/cloud_pubsub/README.md new file mode 100644 index 000000000..3a4088b61 --- /dev/null +++ b/plugins/outputs/cloud_pubsub/README.md @@ -0,0 +1,64 @@ +# Google Cloud PubSub Output Plugin + +The GCP PubSub plugin publishes metrics to a [Google Cloud PubSub][pubsub] topic +as one of the supported [output data formats][]. + + +### Configuration + +This section contains the default TOML to configure the plugin. You can +generate it using `telegraf --usage cloud_pubsub`. + +```toml +[[outputs.cloud_pubsub]] + ## Required. Name of Google Cloud Platform (GCP) Project that owns + ## the given PubSub topic. + project = "my-project" + + ## Required. Name of PubSub topic to publish metrics to. + topic = "my-topic" + + ## Required. Data format to consume. + ## Each data format has its own unique set of configuration options. + ## Read more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. + # credentials_file = "path/to/my/creds.json" + + ## Optional. If true, will send all metrics per write in one PubSub message. + # send_batched = true + + ## The following publish_* parameters specifically configures batching + ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read + ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings + + ## Optional. Send a request to PubSub (i.e. actually publish a batch) + ## when it has this many PubSub messages. If send_batched is true, + ## this is ignored and treated as if it were 1. + # publish_count_threshold = 1000 + + ## Optional. Send a request to PubSub (i.e. actually publish a batch) + ## when it has this many PubSub messages. If send_batched is true, + ## this is ignored and treated as if it were 1 + # publish_byte_threshold = 1000000 + + ## Optional. Specifically configures requests made to the PubSub API. + # publish_num_go_routines = 2 + + ## Optional. Specifies a timeout for requests to the PubSub API. + # publish_timeout = "30s" + + ## Optional. If true, published PubSub message data will be base64-encoded. + # base64_data = false + + ## Optional. PubSub attributes to add to metrics. + # [[inputs.pubsub.attributes]] + # my_attr = "tag_value" +``` + +[pubsub]: https://cloud.google.com/pubsub +[output data formats]: /docs/DATA_FORMATS_OUTPUT.md diff --git a/plugins/outputs/cloud_pubsub/pubsub.go b/plugins/outputs/cloud_pubsub/pubsub.go new file mode 100644 index 000000000..5abb04afb --- /dev/null +++ b/plugins/outputs/cloud_pubsub/pubsub.go @@ -0,0 +1,282 @@ +package cloud_pubsub + +import ( + "context" + "encoding/base64" + "fmt" + "log" + "sync" + + "cloud.google.com/go/pubsub" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" +) + +const sampleConfig = ` + ## Required. Name of Google Cloud Platform (GCP) Project that owns + ## the given PubSub topic. + project = "my-project" + + ## Required. Name of PubSub topic to publish metrics to. + topic = "my-topic" + + ## Required. Data format to consume. + ## Each data format has its own unique set of configuration options. + ## Read more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" + + ## Optional. Filepath for GCP credentials JSON file to authorize calls to + ## PubSub APIs. If not set explicitly, Telegraf will attempt to use + ## Application Default Credentials, which is preferred. + # credentials_file = "path/to/my/creds.json" + + ## Optional. If true, will send all metrics per write in one PubSub message. + # send_batched = true + + ## The following publish_* parameters specifically configures batching + ## requests made to the GCP Cloud PubSub API via the PubSub Golang library. Read + ## more here: https://godoc.org/cloud.google.com/go/pubsub#PublishSettings + + ## Optional. Send a request to PubSub (i.e. actually publish a batch) + ## when it has this many PubSub messages. If send_batched is true, + ## this is ignored and treated as if it were 1. + # publish_count_threshold = 1000 + + ## Optional. Send a request to PubSub (i.e. actually publish a batch) + ## when it has this many PubSub messages. If send_batched is true, + ## this is ignored and treated as if it were 1 + # publish_byte_threshold = 1000000 + + ## Optional. Specifically configures requests made to the PubSub API. + # publish_num_go_routines = 2 + + ## Optional. Specifies a timeout for requests to the PubSub API. + # publish_timeout = "30s" + + ## Optional. If true, published PubSub message data will be base64-encoded. + # base64_data = false + + ## Optional. PubSub attributes to add to metrics. + # [[inputs.pubsub.attributes]] + # my_attr = "tag_value" +` + +type PubSub struct { + CredentialsFile string `toml:"credentials_file"` + Project string `toml:"project"` + Topic string `toml:"topic"` + Attributes map[string]string `toml:"attributes"` + + SendBatched bool `toml:"send_batched"` + PublishCountThreshold int `toml:"publish_count_threshold"` + PublishByteThreshold int `toml:"publish_byte_threshold"` + PublishNumGoroutines int `toml:"publish_num_go_routines"` + PublishTimeout internal.Duration `toml:"publish_timeout"` + Base64Data bool `toml:"base64_data"` + + t topic + c *pubsub.Client + + stubTopic func(id string) topic + + serializer serializers.Serializer + publishResults []publishResult +} + +func (ps *PubSub) Description() string { + return "Publish Telegraf metrics to a Google Cloud PubSub topic" +} + +func (ps *PubSub) SampleConfig() string { + return sampleConfig +} + +func (ps *PubSub) SetSerializer(serializer serializers.Serializer) { + ps.serializer = serializer +} + +func (ps *PubSub) Connect() error { + if ps.Topic == "" { + return fmt.Errorf(`"topic" is required`) + } + + if ps.Project == "" { + return fmt.Errorf(`"project" is required`) + } + + if ps.stubTopic == nil { + return ps.initPubSubClient() + } else { + return nil + } +} + +func (ps *PubSub) Close() error { + if ps.t != nil { + ps.t.Stop() + } + return nil +} + +func (ps *PubSub) Write(metrics []telegraf.Metric) error { + ps.refreshTopic() + + // Serialize metrics and package into appropriate PubSub messages + msgs, err := ps.toMessages(metrics) + if err != nil { + return err + } + + cctx, cancel := context.WithCancel(context.Background()) + + // Publish all messages - each call to Publish returns a future. + ps.publishResults = make([]publishResult, len(msgs)) + for i, m := range msgs { + ps.publishResults[i] = ps.t.Publish(cctx, m) + } + + // topic.Stop() forces all published messages to be sent, even + // if PubSub batch limits have not been reached. + go ps.t.Stop() + + return ps.waitForResults(cctx, cancel) +} + +func (ps *PubSub) initPubSubClient() error { + var credsOpt option.ClientOption + if ps.CredentialsFile != "" { + credsOpt = option.WithCredentialsFile(ps.CredentialsFile) + } else { + creds, err := google.FindDefaultCredentials(context.Background(), pubsub.ScopeCloudPlatform) + if err != nil { + return fmt.Errorf( + "unable to find GCP Application Default Credentials: %v."+ + "Either set ADC or provide CredentialsFile config", err) + } + credsOpt = option.WithCredentials(creds) + } + client, err := pubsub.NewClient( + context.Background(), + ps.Project, + credsOpt, + option.WithScopes(pubsub.ScopeCloudPlatform), + option.WithUserAgent(internal.ProductToken()), + ) + if err != nil { + return fmt.Errorf("unable to generate PubSub client: %v", err) + } + ps.c = client + return nil +} + +func (ps *PubSub) refreshTopic() { + if ps.stubTopic != nil { + ps.t = ps.stubTopic(ps.Topic) + } else { + t := ps.c.Topic(ps.Topic) + ps.t = &topicWrapper{t} + } + ps.t.SetPublishSettings(ps.publishSettings()) +} + +func (ps *PubSub) publishSettings() pubsub.PublishSettings { + settings := pubsub.PublishSettings{} + if ps.PublishNumGoroutines > 0 { + settings.NumGoroutines = ps.PublishNumGoroutines + } + + if ps.PublishTimeout.Duration > 0 { + settings.CountThreshold = 1 + } + + if ps.SendBatched { + settings.CountThreshold = 1 + } else if ps.PublishCountThreshold > 0 { + settings.CountThreshold = ps.PublishCountThreshold + } + + if ps.PublishByteThreshold > 0 { + settings.ByteThreshold = ps.PublishByteThreshold + } + + return settings +} + +func (ps *PubSub) toMessages(metrics []telegraf.Metric) ([]*pubsub.Message, error) { + if ps.SendBatched { + b, err := ps.serializer.SerializeBatch(metrics) + if err != nil { + return nil, err + } + + if ps.Base64Data { + encoded := base64.StdEncoding.EncodeToString(b) + b = []byte(encoded) + } + + msg := &pubsub.Message{Data: b} + if ps.Attributes != nil { + msg.Attributes = ps.Attributes + } + return []*pubsub.Message{msg}, nil + } + + msgs := make([]*pubsub.Message, len(metrics)) + for i, m := range metrics { + b, err := ps.serializer.Serialize(m) + if err != nil { + log.Printf("D! [outputs.cloud_pubsub] Could not serialize metric: %v", err) + continue + } + + if ps.Base64Data { + encoded := base64.StdEncoding.EncodeToString(b) + b = []byte(encoded) + } + + msgs[i] = &pubsub.Message{ + Data: b, + } + if ps.Attributes != nil { + msgs[i].Attributes = ps.Attributes + } + } + + return msgs, nil +} + +func (ps *PubSub) waitForResults(ctx context.Context, cancel context.CancelFunc) error { + var pErr error + var setErr sync.Once + var wg sync.WaitGroup + + for _, pr := range ps.publishResults { + wg.Add(1) + + go func(r publishResult) { + defer wg.Done() + // Wait on each future + _, err := r.Get(ctx) + if err != nil { + setErr.Do(func() { + pErr = err + cancel() + }) + } + }(pr) + } + + wg.Wait() + return pErr +} + +func init() { + outputs.Add("cloud_pubsub", func() telegraf.Output { + return &PubSub{} + }) +} diff --git a/plugins/outputs/cloud_pubsub/pubsub_test.go b/plugins/outputs/cloud_pubsub/pubsub_test.go new file mode 100644 index 000000000..76eb518d7 --- /dev/null +++ b/plugins/outputs/cloud_pubsub/pubsub_test.go @@ -0,0 +1,204 @@ +package cloud_pubsub + +import ( + "testing" + + "cloud.google.com/go/pubsub" + "encoding/base64" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestPubSub_WriteSingle(t *testing.T) { + + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error */}, + } + + settings := pubsub.DefaultPublishSettings + settings.CountThreshold = 1 + ps, topic, metrics := getTestResources(t, settings, testMetrics) + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + verifyRawMetricPublished(t, testM.m, topic.published) + } +} + +func TestPubSub_WriteWithAttribute(t *testing.T) { + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error*/}, + } + + settings := pubsub.DefaultPublishSettings + ps, topic, metrics := getTestResources(t, settings, testMetrics) + ps.Attributes = map[string]string{ + "foo1": "bar1", + "foo2": "bar2", + } + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + msg := verifyRawMetricPublished(t, testM.m, topic.published) + assert.Equalf(t, "bar1", msg.Attributes["foo1"], "expected attribute foo1=bar1") + assert.Equalf(t, "bar2", msg.Attributes["foo2"], "expected attribute foo2=bar2") + } +} + +func TestPubSub_WriteMultiple(t *testing.T) { + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error*/}, + {testutil.TestMetric("value_2", "test"), false}, + } + + settings := pubsub.DefaultPublishSettings + + ps, topic, metrics := getTestResources(t, settings, testMetrics) + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + verifyRawMetricPublished(t, testM.m, topic.published) + } + assert.Equalf(t, 1, topic.bundleCount, "unexpected bundle count") +} + +func TestPubSub_WriteOverCountThreshold(t *testing.T) { + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error*/}, + {testutil.TestMetric("value_2", "test"), false}, + {testutil.TestMetric("value_3", "test"), false}, + {testutil.TestMetric("value_4", "test"), false}, + } + + settings := pubsub.DefaultPublishSettings + settings.CountThreshold = 2 + + ps, topic, metrics := getTestResources(t, settings, testMetrics) + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + verifyRawMetricPublished(t, testM.m, topic.published) + } + assert.Equalf(t, 2, topic.bundleCount, "unexpected bundle count") +} + +func TestPubSub_WriteOverByteThreshold(t *testing.T) { + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error*/}, + {testutil.TestMetric("value_2", "test"), false}, + } + + settings := pubsub.DefaultPublishSettings + settings.CountThreshold = 10 + settings.ByteThreshold = 1 + + ps, topic, metrics := getTestResources(t, settings, testMetrics) + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + verifyRawMetricPublished(t, testM.m, topic.published) + } + assert.Equalf(t, 2, topic.bundleCount, "unexpected bundle count") +} + +func TestPubSub_WriteBase64Single(t *testing.T) { + + testMetrics := []testMetric{ + {testutil.TestMetric("value_1", "test"), false /*return error */}, + {testutil.TestMetric("value_2", "test"), false}, + } + + settings := pubsub.DefaultPublishSettings + settings.CountThreshold = 1 + ps, topic, metrics := getTestResources(t, settings, testMetrics) + ps.Base64Data = true + + err := ps.Write(metrics) + if err != nil { + t.Fatalf("got unexpected error: %v", err) + } + + for _, testM := range testMetrics { + verifyMetricPublished(t, testM.m, topic.published, true /* base64encoded */) + } +} + +func TestPubSub_Error(t *testing.T) { + testMetrics := []testMetric{ + // Force this batch to return error + {testutil.TestMetric("value_1", "test"), true}, + {testutil.TestMetric("value_2", "test"), false}, + } + + settings := pubsub.DefaultPublishSettings + ps, _, metrics := getTestResources(t, settings, testMetrics) + + err := ps.Write(metrics) + if err == nil { + t.Fatalf("expected error") + } + if err.Error() != errMockFail { + t.Fatalf("expected fake error, got %v", err) + } +} + +func verifyRawMetricPublished(t *testing.T, m telegraf.Metric, published map[string]*pubsub.Message) *pubsub.Message { + return verifyMetricPublished(t, m, published, false) +} + +func verifyMetricPublished(t *testing.T, m telegraf.Metric, published map[string]*pubsub.Message, base64Encoded bool) *pubsub.Message { + p, _ := parsers.NewInfluxParser() + + v, _ := m.GetField("value") + psMsg, ok := published[v.(string)] + if !ok { + t.Fatalf("expected metric to get published (value: %s)", v.(string)) + } + + data := psMsg.Data + if base64Encoded { + v, err := base64.StdEncoding.DecodeString(string(psMsg.Data)) + if err != nil { + t.Fatalf("Unable to decode expected base64-encoded message: %s", err) + } + data = []byte(v) + } + + parsed, err := p.Parse(data) + if err != nil { + t.Fatalf("could not parse influxdb metric from published message: %s", string(psMsg.Data)) + } + if len(parsed) > 1 { + t.Fatalf("expected only one influxdb metric per published message, got %d", len(published)) + } + + publishedV, ok := parsed[0].GetField("value") + if !ok { + t.Fatalf("expected published metric to have a value") + } + assert.Equal(t, v, publishedV, "incorrect published value") + + return psMsg +} diff --git a/plugins/outputs/cloud_pubsub/topic_gcp.go b/plugins/outputs/cloud_pubsub/topic_gcp.go new file mode 100644 index 000000000..a85c6f39e --- /dev/null +++ b/plugins/outputs/cloud_pubsub/topic_gcp.go @@ -0,0 +1,46 @@ +package cloud_pubsub + +import ( + "cloud.google.com/go/pubsub" + "context" +) + +type ( + topicFactory func(string) (topic, error) + + topic interface { + ID() string + Stop() + Publish(ctx context.Context, msg *pubsub.Message) publishResult + PublishSettings() pubsub.PublishSettings + SetPublishSettings(settings pubsub.PublishSettings) + } + + publishResult interface { + Get(ctx context.Context) (string, error) + } + + topicWrapper struct { + topic *pubsub.Topic + } +) + +func (tw *topicWrapper) ID() string { + return tw.topic.ID() +} + +func (tw *topicWrapper) Stop() { + tw.topic.Stop() +} + +func (tw *topicWrapper) Publish(ctx context.Context, msg *pubsub.Message) publishResult { + return tw.topic.Publish(ctx, msg) +} + +func (tw *topicWrapper) PublishSettings() pubsub.PublishSettings { + return tw.topic.PublishSettings +} + +func (tw *topicWrapper) SetPublishSettings(settings pubsub.PublishSettings) { + tw.topic.PublishSettings = settings +} diff --git a/plugins/outputs/cloud_pubsub/topic_stubbed.go b/plugins/outputs/cloud_pubsub/topic_stubbed.go new file mode 100644 index 000000000..d78d4fbd4 --- /dev/null +++ b/plugins/outputs/cloud_pubsub/topic_stubbed.go @@ -0,0 +1,212 @@ +package cloud_pubsub + +import ( + "context" + "errors" + "fmt" + "runtime" + "sync" + "testing" + "time" + + "cloud.google.com/go/pubsub" + "encoding/base64" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/serializers" + "google.golang.org/api/support/bundler" +) + +const ( + errMockFail = "this is an error" +) + +type ( + testMetric struct { + m telegraf.Metric + returnErr bool + } + + bundledMsg struct { + *pubsub.Message + *stubResult + } + + stubResult struct { + metricIds []string + + sendError bool + err chan error + done chan struct{} + } + + stubTopic struct { + Settings pubsub.PublishSettings + ReturnErr map[string]bool + parsers.Parser + *testing.T + + stopped bool + pLock sync.Mutex + + published map[string]*pubsub.Message + + bundler *bundler.Bundler + bLock sync.Mutex + bundleCount int + } +) + +func getTestResources(tT *testing.T, settings pubsub.PublishSettings, testM []testMetric) (*PubSub, *stubTopic, []telegraf.Metric) { + s, _ := serializers.NewInfluxSerializer() + + metrics := make([]telegraf.Metric, len(testM)) + t := &stubTopic{ + T: tT, + ReturnErr: make(map[string]bool), + published: make(map[string]*pubsub.Message), + } + + for i, tm := range testM { + metrics[i] = tm.m + if tm.returnErr { + v, _ := tm.m.GetField("value") + t.ReturnErr[v.(string)] = true + } + } + + ps := &PubSub{ + Project: "test-project", + Topic: "test-topic", + stubTopic: func(string) topic { return t }, + PublishCountThreshold: settings.CountThreshold, + PublishByteThreshold: settings.ByteThreshold, + PublishNumGoroutines: settings.NumGoroutines, + PublishTimeout: internal.Duration{Duration: settings.Timeout}, + } + ps.SetSerializer(s) + + return ps, t, metrics +} + +func (t *stubTopic) ID() string { + return "test-topic" +} + +func (t *stubTopic) Stop() { + t.pLock.Lock() + defer t.pLock.Unlock() + + t.stopped = true + t.bundler.Flush() +} + +func (t *stubTopic) Publish(ctx context.Context, msg *pubsub.Message) publishResult { + t.pLock.Lock() + defer t.pLock.Unlock() + + if t.stopped || ctx.Err() != nil { + t.Fatalf("publish called after stop") + } + + ids := t.parseIDs(msg) + r := &stubResult{ + metricIds: ids, + err: make(chan error, 1), + done: make(chan struct{}, 1), + } + + for _, id := range ids { + _, ok := t.ReturnErr[id] + r.sendError = r.sendError || ok + } + + bundled := &bundledMsg{msg, r} + err := t.bundler.Add(bundled, len(msg.Data)) + if err != nil { + t.Fatalf("unexpected error while adding to bundle: %v", err) + } + return r +} + +func (t *stubTopic) PublishSettings() pubsub.PublishSettings { + return t.Settings +} + +func (t *stubTopic) SetPublishSettings(settings pubsub.PublishSettings) { + t.Settings = settings + t.initBundler() +} + +func (t *stubTopic) initBundler() *stubTopic { + t.bundler = bundler.NewBundler(&bundledMsg{}, t.sendBundle()) + t.bundler.DelayThreshold = 10 * time.Second + t.bundler.BundleCountThreshold = t.Settings.CountThreshold + if t.bundler.BundleCountThreshold > pubsub.MaxPublishRequestCount { + t.bundler.BundleCountThreshold = pubsub.MaxPublishRequestCount + } + t.bundler.BundleByteThreshold = t.Settings.ByteThreshold + t.bundler.BundleByteLimit = pubsub.MaxPublishRequestBytes + t.bundler.HandlerLimit = 25 * runtime.GOMAXPROCS(0) + + return t +} + +func (t *stubTopic) sendBundle() func(items interface{}) { + return func(items interface{}) { + t.bLock.Lock() + defer t.bLock.Unlock() + + bundled := items.([]*bundledMsg) + + for _, msg := range bundled { + r := msg.stubResult + for _, id := range r.metricIds { + t.published[id] = msg.Message + } + + if r.sendError { + r.err <- errors.New(errMockFail) + } else { + r.done <- struct{}{} + } + } + + t.bundleCount++ + } +} + +func (t *stubTopic) parseIDs(msg *pubsub.Message) []string { + p, _ := parsers.NewInfluxParser() + metrics, err := p.Parse(msg.Data) + if err != nil { + // Just attempt to base64-decode first before returning error. + d, err := base64.StdEncoding.DecodeString(string(msg.Data)) + if err != nil { + t.Errorf("unable to base64-decode potential test message: %v", err) + } + metrics, err = p.Parse(d) + if err != nil { + t.Fatalf("unexpected parsing error: %v", err) + } + } + + ids := make([]string, len(metrics)) + for i, met := range metrics { + id, _ := met.GetField("value") + ids[i] = id.(string) + } + return ids +} + +func (r *stubResult) Get(ctx context.Context) (string, error) { + select { + case <-ctx.Done(): + return "", ctx.Err() + case err := <-r.err: + return "", err + case <-r.done: + return fmt.Sprintf("id-%s", r.metricIds[0]), nil + } +} diff --git a/plugins/outputs/cloudwatch/README.md b/plugins/outputs/cloudwatch/README.md index c44ac4ead..418fe86ff 100644 --- a/plugins/outputs/cloudwatch/README.md +++ b/plugins/outputs/cloudwatch/README.md @@ -36,3 +36,16 @@ Examples include but are not limited to: ### namespace The namespace used for AWS CloudWatch metrics. + +### write_statistics + +If you have a large amount of metrics, you should consider to send statistic +values instead of raw metrics which could not only improve performance but +also save AWS API cost. If enable this flag, this plugin would parse the required +[CloudWatch statistic fields](https://docs.aws.amazon.com/sdk-for-go/api/service/cloudwatch/#StatisticSet) +(count, min, max, and sum) and send them to CloudWatch. You could use `basicstats` +aggregator to calculate those fields. If not all statistic fields are available, +all fields would still be sent as raw metrics. + +### high_resolution_metrics +Enable high resolution metrics (1 second precision) instead of standard ones (60 seconds precision) \ No newline at end of file diff --git a/plugins/outputs/cloudwatch/cloudwatch.go b/plugins/outputs/cloudwatch/cloudwatch.go index 39b13cf29..5e59ba2aa 100644 --- a/plugins/outputs/cloudwatch/cloudwatch.go +++ b/plugins/outputs/cloudwatch/cloudwatch.go @@ -9,24 +9,151 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" - "github.com/aws/aws-sdk-go/service/sts" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/internal/config/aws" + internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/plugins/outputs" ) type CloudWatch struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` - Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace - svc *cloudwatch.CloudWatch + Namespace string `toml:"namespace"` // CloudWatch Metrics Namespace + HighResolutionMetrics bool `toml:"high_resolution_metrics"` + svc *cloudwatch.CloudWatch + + WriteStatistics bool `toml:"write_statistics"` +} + +type statisticType int + +const ( + statisticTypeNone statisticType = iota + statisticTypeMax + statisticTypeMin + statisticTypeSum + statisticTypeCount +) + +type cloudwatchField interface { + addValue(sType statisticType, value float64) + buildDatum() []*cloudwatch.MetricDatum +} + +type statisticField struct { + metricName string + fieldName string + tags map[string]string + values map[statisticType]float64 + timestamp time.Time + storageResolution int64 +} + +func (f *statisticField) addValue(sType statisticType, value float64) { + if sType != statisticTypeNone { + f.values[sType] = value + } +} + +func (f *statisticField) buildDatum() []*cloudwatch.MetricDatum { + + var datums []*cloudwatch.MetricDatum + + if f.hasAllFields() { + // If we have all required fields, we build datum with StatisticValues + min, _ := f.values[statisticTypeMin] + max, _ := f.values[statisticTypeMax] + sum, _ := f.values[statisticTypeSum] + count, _ := f.values[statisticTypeCount] + + datum := &cloudwatch.MetricDatum{ + MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), + Dimensions: BuildDimensions(f.tags), + Timestamp: aws.Time(f.timestamp), + StatisticValues: &cloudwatch.StatisticSet{ + Minimum: aws.Float64(min), + Maximum: aws.Float64(max), + Sum: aws.Float64(sum), + SampleCount: aws.Float64(count), + }, + StorageResolution: aws.Int64(f.storageResolution), + } + + datums = append(datums, datum) + + } else { + // If we don't have all required fields, we build each field as independent datum + for sType, value := range f.values { + datum := &cloudwatch.MetricDatum{ + Value: aws.Float64(value), + Dimensions: BuildDimensions(f.tags), + Timestamp: aws.Time(f.timestamp), + } + + switch sType { + case statisticTypeMin: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "min"}, "_")) + case statisticTypeMax: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "max"}, "_")) + case statisticTypeSum: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "sum"}, "_")) + case statisticTypeCount: + datum.MetricName = aws.String(strings.Join([]string{f.metricName, f.fieldName, "count"}, "_")) + default: + // should not be here + continue + } + + datums = append(datums, datum) + } + } + + return datums +} + +func (f *statisticField) hasAllFields() bool { + + _, hasMin := f.values[statisticTypeMin] + _, hasMax := f.values[statisticTypeMax] + _, hasSum := f.values[statisticTypeSum] + _, hasCount := f.values[statisticTypeCount] + + return hasMin && hasMax && hasSum && hasCount +} + +type valueField struct { + metricName string + fieldName string + tags map[string]string + value float64 + timestamp time.Time + storageResolution int64 +} + +func (f *valueField) addValue(sType statisticType, value float64) { + if sType == statisticTypeNone { + f.value = value + } +} + +func (f *valueField) buildDatum() []*cloudwatch.MetricDatum { + + return []*cloudwatch.MetricDatum{ + { + MetricName: aws.String(strings.Join([]string{f.metricName, f.fieldName}, "_")), + Value: aws.Float64(f.value), + Dimensions: BuildDimensions(f.tags), + Timestamp: aws.Time(f.timestamp), + StorageResolution: aws.Int64(f.storageResolution), + }, + } } var sampleConfig = ` @@ -48,8 +175,25 @@ var sampleConfig = ` #profile = "" #shared_credential_file = "" + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + ## Namespace for the CloudWatch MetricDatums namespace = "InfluxData/Telegraf" + + ## If you have a large amount of metrics, you should consider to send statistic + ## values instead of raw metrics which could not only improve performance but + ## also save AWS API cost. If enable this flag, this plugin would parse the required + ## CloudWatch statistic fields (count, min, max, and sum) and send them to CloudWatch. + ## You could use basicstats aggregator to calculate those fields. If not all statistic + ## fields are available, all fields would still be sent as raw metrics. + # write_statistics = false + + ## Enable high resolution metrics of 1 second (if not enabled, standard resolution are of 60 seconds precision) + # high_resolution_metrics = false ` func (c *CloudWatch) SampleConfig() string { @@ -62,29 +206,17 @@ func (c *CloudWatch) Description() string { func (c *CloudWatch) Connect() error { credentialConfig := &internalaws.CredentialConfig{ - Region: c.Region, - AccessKey: c.AccessKey, - SecretKey: c.SecretKey, - RoleARN: c.RoleARN, - Profile: c.Profile, - Filename: c.Filename, - Token: c.Token, + Region: c.Region, + AccessKey: c.AccessKey, + SecretKey: c.SecretKey, + RoleARN: c.RoleARN, + Profile: c.Profile, + Filename: c.Filename, + Token: c.Token, + EndpointURL: c.EndpointURL, } configProvider := credentialConfig.Credentials() - - stsService := sts.New(configProvider) - - params := &sts.GetCallerIdentityInput{} - - _, err := stsService.GetCallerIdentity(params) - - if err != nil { - log.Printf("E! cloudwatch: Cannot use credentials to connect to AWS : %+v \n", err.Error()) - return err - } - c.svc = cloudwatch.New(configProvider) - return nil } @@ -93,27 +225,17 @@ func (c *CloudWatch) Close() error { } func (c *CloudWatch) Write(metrics []telegraf.Metric) error { + + var datums []*cloudwatch.MetricDatum for _, m := range metrics { - err := c.WriteSinglePoint(m) - if err != nil { - return err - } + d := BuildMetricDatum(c.WriteStatistics, c.HighResolutionMetrics, m) + datums = append(datums, d...) } - return nil -} - -// Write data for a single point. A point can have many fields and one field -// is equal to one MetricDatum. There is a limit on how many MetricDatums a -// request can have so we process one Point at a time. -func (c *CloudWatch) WriteSinglePoint(point telegraf.Metric) error { - datums := BuildMetricDatum(point) - const maxDatumsPerCall = 20 // PutMetricData only supports up to 20 data metrics per call for _, partition := range PartitionDatums(maxDatumsPerCall, datums) { err := c.WriteToCloudWatch(partition) - if err != nil { return err } @@ -161,67 +283,65 @@ func PartitionDatums(size int, datums []*cloudwatch.MetricDatum) [][]*cloudwatch return partitions } -// Make a MetricDatum for each field in a Point. Only fields with values that can be -// converted to float64 are supported. Non-supported fields are skipped. -func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum { - datums := make([]*cloudwatch.MetricDatum, len(point.Fields())) - i := 0 +// Make a MetricDatum from telegraf.Metric. It would check if all required fields of +// cloudwatch.StatisticSet are available. If so, it would build MetricDatum from statistic values. +// Otherwise, fields would still been built independently. +func BuildMetricDatum(buildStatistic bool, highResolutionMetrics bool, point telegraf.Metric) []*cloudwatch.MetricDatum { - var value float64 + fields := make(map[string]cloudwatchField) + tags := point.Tags() + storageResolution := int64(60) + if highResolutionMetrics { + storageResolution = 1 + } for k, v := range point.Fields() { - switch t := v.(type) { - case int: - value = float64(t) - case int32: - value = float64(t) - case int64: - value = float64(t) - case uint64: - value = float64(t) - case float64: - value = t - case bool: - if t { - value = 1 - } else { - value = 0 + + val, ok := convert(v) + if !ok { + // Only fields with values that can be converted to float64 (and within CloudWatch boundary) are supported. + // Non-supported fields are skipped. + continue + } + + sType, fieldName := getStatisticType(k) + + // If statistic metric is not enabled or non-statistic type, just take current field as a value field. + if !buildStatistic || sType == statisticTypeNone { + fields[k] = &valueField{ + metricName: point.Name(), + fieldName: k, + tags: tags, + timestamp: point.Time(), + value: val, + storageResolution: storageResolution, } - case time.Time: - value = float64(t.Unix()) - default: - // Skip unsupported type. - datums = datums[:len(datums)-1] continue } - // Do CloudWatch boundary checking - // Constraints at: http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html - if math.IsNaN(value) { - datums = datums[:len(datums)-1] - continue - } - if math.IsInf(value, 0) { - datums = datums[:len(datums)-1] - continue - } - if value > 0 && value < float64(8.515920e-109) { - datums = datums[:len(datums)-1] - continue - } - if value > float64(1.174271e+108) { - datums = datums[:len(datums)-1] - continue + // Otherwise, it shall be a statistic field. + if _, ok := fields[fieldName]; !ok { + // Hit an uncached field, create statisticField for first time + fields[fieldName] = &statisticField{ + metricName: point.Name(), + fieldName: fieldName, + tags: tags, + timestamp: point.Time(), + values: map[statisticType]float64{ + sType: val, + }, + storageResolution: storageResolution, + } + } else { + // Add new statistic value to this field + fields[fieldName].addValue(sType, val) } + } - datums[i] = &cloudwatch.MetricDatum{ - MetricName: aws.String(strings.Join([]string{point.Name(), k}, "_")), - Value: aws.Float64(value), - Dimensions: BuildDimensions(point.Tags()), - Timestamp: aws.Time(point.Time()), - } - - i += 1 + var datums []*cloudwatch.MetricDatum + for _, f := range fields { + d := f.buildDatum() + datums = append(datums, d...) } return datums @@ -231,19 +351,15 @@ func BuildMetricDatum(point telegraf.Metric) []*cloudwatch.MetricDatum { // 10 dimensions per metric so we only keep up to the first 10 alphabetically. // This always includes the "host" tag if it exists. func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { - const MaxDimensions = 10 - dimensions := make([]*cloudwatch.Dimension, int(math.Min(float64(len(mTags)), MaxDimensions))) - - i := 0 + dimensions := make([]*cloudwatch.Dimension, 0, MaxDimensions) // This is pretty ugly but we always want to include the "host" tag if it exists. if host, ok := mTags["host"]; ok { - dimensions[i] = &cloudwatch.Dimension{ + dimensions = append(dimensions, &cloudwatch.Dimension{ Name: aws.String("host"), Value: aws.String(host), - } - i += 1 + }) } var keys []string @@ -255,21 +371,90 @@ func BuildDimensions(mTags map[string]string) []*cloudwatch.Dimension { sort.Strings(keys) for _, k := range keys { - if i >= MaxDimensions { + if len(dimensions) >= MaxDimensions { break } - dimensions[i] = &cloudwatch.Dimension{ - Name: aws.String(k), - Value: aws.String(mTags[k]), + value := mTags[k] + if value == "" { + continue } - i += 1 + dimensions = append(dimensions, &cloudwatch.Dimension{ + Name: aws.String(k), + Value: aws.String(mTags[k]), + }) } return dimensions } +func getStatisticType(name string) (sType statisticType, fieldName string) { + switch { + case strings.HasSuffix(name, "_max"): + sType = statisticTypeMax + fieldName = strings.TrimSuffix(name, "_max") + case strings.HasSuffix(name, "_min"): + sType = statisticTypeMin + fieldName = strings.TrimSuffix(name, "_min") + case strings.HasSuffix(name, "_sum"): + sType = statisticTypeSum + fieldName = strings.TrimSuffix(name, "_sum") + case strings.HasSuffix(name, "_count"): + sType = statisticTypeCount + fieldName = strings.TrimSuffix(name, "_count") + default: + sType = statisticTypeNone + fieldName = name + } + return +} + +func convert(v interface{}) (value float64, ok bool) { + + ok = true + + switch t := v.(type) { + case int: + value = float64(t) + case int32: + value = float64(t) + case int64: + value = float64(t) + case uint64: + value = float64(t) + case float64: + value = t + case bool: + if t { + value = 1 + } else { + value = 0 + } + case time.Time: + value = float64(t.Unix()) + default: + // Skip unsupported type. + ok = false + return + } + + // Do CloudWatch boundary checking + // Constraints at: http://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_MetricDatum.html + switch { + case math.IsNaN(value): + return 0, false + case math.IsInf(value, 0): + return 0, false + case value > 0 && value < float64(8.515920e-109): + return 0, false + case value > float64(1.174271e+108): + return 0, false + } + + return +} + func init() { outputs.Add("cloudwatch", func() telegraf.Output { return &CloudWatch{} diff --git a/plugins/outputs/cloudwatch/cloudwatch_test.go b/plugins/outputs/cloudwatch/cloudwatch_test.go index 8ab60de2f..b2466e4d0 100644 --- a/plugins/outputs/cloudwatch/cloudwatch_test.go +++ b/plugins/outputs/cloudwatch/cloudwatch_test.go @@ -5,14 +5,17 @@ import ( "math" "sort" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Test that each tag becomes one dimension @@ -26,7 +29,7 @@ func TestBuildDimensions(t *testing.T) { tagKeys := make([]string, len(testPoint.Tags())) i := 0 - for k, _ := range testPoint.Tags() { + for k := range testPoint.Tags() { tagKeys[i] = k i += 1 } @@ -72,13 +75,79 @@ func TestBuildMetricDatums(t *testing.T) { testutil.TestMetric(float64(1.174272e+108)), // largest should be 1.174271e+108 } for _, point := range validMetrics { - datums := BuildMetricDatum(point) + datums := BuildMetricDatum(false, false, point) assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", point)) } for _, point := range invalidMetrics { - datums := BuildMetricDatum(point) + datums := BuildMetricDatum(false, false, point) assert.Equal(0, len(datums), fmt.Sprintf("Valid point should not create a Datum {value: %v}", point)) } + + statisticMetric, _ := metric.New( + "test1", + map[string]string{"tag1": "value1"}, + map[string]interface{}{"value_max": float64(10), "value_min": float64(0), "value_sum": float64(100), "value_count": float64(20)}, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + datums := BuildMetricDatum(true, false, statisticMetric) + assert.Equal(1, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", statisticMetric)) + + multiFieldsMetric, _ := metric.New( + "test1", + map[string]string{"tag1": "value1"}, + map[string]interface{}{"valueA": float64(10), "valueB": float64(0), "valueC": float64(100), "valueD": float64(20)}, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + datums = BuildMetricDatum(true, false, multiFieldsMetric) + assert.Equal(4, len(datums), fmt.Sprintf("Each field should create a Datum {value: %v}", multiFieldsMetric)) + + multiStatisticMetric, _ := metric.New( + "test1", + map[string]string{"tag1": "value1"}, + map[string]interface{}{ + "valueA_max": float64(10), "valueA_min": float64(0), "valueA_sum": float64(100), "valueA_count": float64(20), + "valueB_max": float64(10), "valueB_min": float64(0), "valueB_sum": float64(100), "valueB_count": float64(20), + "valueC_max": float64(10), "valueC_min": float64(0), "valueC_sum": float64(100), + "valueD": float64(10), "valueE": float64(0), + }, + time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + datums = BuildMetricDatum(true, false, multiStatisticMetric) + assert.Equal(7, len(datums), fmt.Sprintf("Valid point should create a Datum {value: %v}", multiStatisticMetric)) +} + +func TestMetricDatumResolution(t *testing.T) { + const expectedStandardResolutionValue = int64(60) + const expectedHighResolutionValue = int64(1) + + assert := assert.New(t) + + metric := testutil.TestMetric(1) + + standardResolutionDatum := BuildMetricDatum(false, false, metric) + actualStandardResolutionValue := *standardResolutionDatum[0].StorageResolution + assert.Equal(expectedStandardResolutionValue, actualStandardResolutionValue) + + highResolutionDatum := BuildMetricDatum(false, true, metric) + actualHighResolutionValue := *highResolutionDatum[0].StorageResolution + assert.Equal(expectedHighResolutionValue, actualHighResolutionValue) +} + +func TestBuildMetricDatums_SkipEmptyTags(t *testing.T) { + input := testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + "foo": "", + }, + map[string]interface{}{ + "value": int64(42), + }, + time.Unix(0, 0), + ) + + datums := BuildMetricDatum(true, false, input) + require.Len(t, datums[0].Dimensions, 1) } func TestPartitionDatums(t *testing.T) { diff --git a/plugins/outputs/cratedb/cratedb.go b/plugins/outputs/cratedb/cratedb.go index 01213011f..f6840cc38 100644 --- a/plugins/outputs/cratedb/cratedb.go +++ b/plugins/outputs/cratedb/cratedb.go @@ -165,7 +165,7 @@ func escapeObject(m map[string]interface{}) (string, error) { // We find all keys and sort them first because iterating a map in go is // randomized and we need consistent output for our unit tests. keys := make([]string, 0, len(m)) - for k, _ := range m { + for k := range m { keys = append(keys, k) } sort.Strings(keys) @@ -183,7 +183,7 @@ func escapeObject(m map[string]interface{}) (string, error) { return `{` + strings.Join(pairs, ", ") + `}`, nil } -// escapeString wraps s in the given quote string and replaces all occurences +// escapeString wraps s in the given quote string and replaces all occurrences // of it inside of s with a double quote. func escapeString(s string, quote string) string { return quote + strings.Replace(s, quote, quote+quote, -1) + quote @@ -191,7 +191,7 @@ func escapeString(s string, quote string) string { // hashID returns a cryptographic hash int64 hash that includes the metric name // and tags. It's used instead of m.HashID() because it's not considered stable -// and because a cryptogtaphic hash makes more sense for the use case of +// and because a cryptographic hash makes more sense for the use case of // deduplication. // [1] https://github.com/influxdata/telegraf/pull/3210#discussion_r148411201 func hashID(m telegraf.Metric) int64 { diff --git a/plugins/outputs/datadog/README.md b/plugins/outputs/datadog/README.md index 0563d6444..ad1c7a025 100644 --- a/plugins/outputs/datadog/README.md +++ b/plugins/outputs/datadog/README.md @@ -1,9 +1,30 @@ # Datadog Output Plugin -This plugin writes to the [Datadog Metrics API](http://docs.datadoghq.com/api/#metrics) -and requires an `apikey` which can be obtained [here](https://app.datadoghq.com/account/settings#api) -for the account. +This plugin writes to the [Datadog Metrics API][metrics] and requires an +`apikey` which can be obtained [here][apikey] for the account. -If the point value being sent cannot be converted to a float64, the metric is skipped. -Metrics are grouped by converting any `_` characters to `.` in the Point Name. \ No newline at end of file +### Configuration + +```toml +[[outputs.datadog]] + ## Datadog API key + apikey = "my-secret-key" + + ## Connection timeout. + # timeout = "5s" + + ## Write URL override; useful for debugging. + # url = "https://app.datadoghq.com/api/v1/series" +``` + +### Metrics + +Datadog metric names are formed by joining the Telegraf metric name and the field +key with a `.` character. + +Field values are converted to floating point numbers. Strings and floats that +cannot be sent over JSON, namely NaN and Inf, are ignored. + +[metrics]: https://docs.datadoghq.com/api/v1/metrics/#submit-metrics +[apikey]: https://app.datadoghq.com/account/settings#api diff --git a/plugins/outputs/datadog/datadog.go b/plugins/outputs/datadog/datadog.go index 2ab3dcd58..2d1a93788 100644 --- a/plugins/outputs/datadog/datadog.go +++ b/plugins/outputs/datadog/datadog.go @@ -5,9 +5,9 @@ import ( "encoding/json" "fmt" "log" + "math" "net/http" "net/url" - "sort" "strings" "github.com/influxdata/telegraf" @@ -19,16 +19,19 @@ type Datadog struct { Apikey string Timeout internal.Duration - apiUrl string + URL string `toml:"url"` client *http.Client } var sampleConfig = ` ## Datadog API key - apikey = "my-secret-key" # required. + apikey = "my-secret-key" ## Connection timeout. # timeout = "5s" + + ## Write URL override; useful for debugging. + # url = "https://app.datadoghq.com/api/v1/series" ` type TimeSeries struct { @@ -46,12 +49,6 @@ type Point [2]float64 const datadog_api = "https://app.datadoghq.com/api/v1/series" -func NewDatadog(apiUrl string) *Datadog { - return &Datadog{ - apiUrl: apiUrl, - } -} - func (d *Datadog) Connect() error { if d.Apikey == "" { return fmt.Errorf("apikey is a required field for datadog output") @@ -67,15 +64,19 @@ func (d *Datadog) Connect() error { } func (d *Datadog) Write(metrics []telegraf.Metric) error { - if len(metrics) == 0 { - return nil - } ts := TimeSeries{} tempSeries := []*Metric{} metricCounter := 0 for _, m := range metrics { if dogMs, err := buildMetrics(m); err == nil { + metricTags := buildTags(m.TagList()) + host, _ := m.GetTag("host") + + if len(dogMs) == 0 { + continue + } + for fieldName, dogM := range dogMs { // name of the datadog measurement var dname string @@ -85,11 +86,9 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { } else { dname = m.Name() + "." + fieldName } - var host string - host, _ = m.Tags()["host"] metric := &Metric{ Metric: dname, - Tags: buildTags(m.Tags()), + Tags: metricTags, Host: host, } metric.Points[0] = dogM @@ -101,6 +100,10 @@ func (d *Datadog) Write(metrics []telegraf.Metric) error { } } + if len(tempSeries) == 0 { + return nil + } + redactedApiKey := "****************" ts.Series = make([]*Metric, metricCounter) copy(ts.Series, tempSeries[0:]) @@ -139,40 +142,42 @@ func (d *Datadog) authenticatedUrl() string { q := url.Values{ "api_key": []string{d.Apikey}, } - return fmt.Sprintf("%s?%s", d.apiUrl, q.Encode()) + return fmt.Sprintf("%s?%s", d.URL, q.Encode()) } func buildMetrics(m telegraf.Metric) (map[string]Point, error) { ms := make(map[string]Point) - for k, v := range m.Fields() { - if !verifyValue(v) { + for _, field := range m.FieldList() { + if !verifyValue(field.Value) { continue } var p Point - if err := p.setValue(v); err != nil { - return ms, fmt.Errorf("unable to extract value from Fields %v error %v", k, err.Error()) + if err := p.setValue(field.Value); err != nil { + return ms, fmt.Errorf("unable to extract value from Fields %v error %v", field.Key, err.Error()) } p[0] = float64(m.Time().Unix()) - ms[k] = p + ms[field.Key] = p } return ms, nil } -func buildTags(mTags map[string]string) []string { - tags := make([]string, len(mTags)) +func buildTags(tagList []*telegraf.Tag) []string { + tags := make([]string, len(tagList)) index := 0 - for k, v := range mTags { - tags[index] = fmt.Sprintf("%s:%s", k, v) + for _, tag := range tagList { + tags[index] = fmt.Sprintf("%s:%s", tag.Key, tag.Value) index += 1 } - sort.Strings(tags) return tags } func verifyValue(v interface{}) bool { - switch v.(type) { + switch v := v.(type) { case string: return false + case float64: + // The payload will be encoded as JSON, which does not allow NaN or Inf. + return !math.IsNaN(v) && !math.IsInf(v, 0) } return true } @@ -202,6 +207,8 @@ func (d *Datadog) Close() error { func init() { outputs.Add("datadog", func() telegraf.Output { - return NewDatadog(datadog_api) + return &Datadog{ + URL: datadog_api, + } }) } diff --git a/plugins/outputs/datadog/datadog_test.go b/plugins/outputs/datadog/datadog_test.go index 045bf4b43..be8541ee8 100644 --- a/plugins/outputs/datadog/datadog_test.go +++ b/plugins/outputs/datadog/datadog_test.go @@ -3,15 +3,15 @@ package datadog import ( "encoding/json" "fmt" + "math" "net/http" "net/http/httptest" "reflect" "testing" "time" - "github.com/influxdata/telegraf/testutil" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -21,6 +21,12 @@ var ( fakeApiKey = "123456" ) +func NewDatadog(url string) *Datadog { + return &Datadog{ + URL: url, + } +} + func fakeDatadog() *Datadog { d := NewDatadog(fakeUrl) d.Apikey = fakeApiKey @@ -74,19 +80,33 @@ func TestAuthenticatedUrl(t *testing.T) { func TestBuildTags(t *testing.T) { var tagtests = []struct { - ptIn map[string]string + ptIn []*telegraf.Tag outTags []string }{ { - map[string]string{"one": "two", "three": "four"}, + []*telegraf.Tag{ + { + Key: "one", + Value: "two", + }, + { + Key: "three", + Value: "four", + }, + }, []string{"one:two", "three:four"}, }, { - map[string]string{"aaa": "bbb"}, + []*telegraf.Tag{ + { + Key: "aaa", + Value: "bbb", + }, + }, []string{"aaa:bbb"}, }, { - map[string]string{}, + []*telegraf.Tag{}, []string{}, }, } @@ -229,3 +249,45 @@ func TestVerifyValue(t *testing.T) { } } } + +func TestNaNIsSkipped(t *testing.T) { + plugin := &Datadog{ + Apikey: "testing", + URL: "", // No request will be sent because all fields are skipped + } + + err := plugin.Connect() + require.NoError(t, err) + + err = plugin.Write([]telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": math.NaN(), + }, + time.Now()), + }) + require.NoError(t, err) +} + +func TestInfIsSkipped(t *testing.T) { + plugin := &Datadog{ + Apikey: "testing", + URL: "", // No request will be sent because all fields are skipped + } + + err := plugin.Connect() + require.NoError(t, err) + + err = plugin.Write([]telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": math.Inf(0), + }, + time.Now()), + }) + require.NoError(t, err) +} diff --git a/plugins/outputs/discard/discard.go b/plugins/outputs/discard/discard.go index 4a6d634b7..919f74b47 100644 --- a/plugins/outputs/discard/discard.go +++ b/plugins/outputs/discard/discard.go @@ -7,11 +7,13 @@ import ( type Discard struct{} -func (d *Discard) Connect() error { return nil } -func (d *Discard) Close() error { return nil } -func (d *Discard) SampleConfig() string { return "" } -func (d *Discard) Description() string { return "Send metrics to nowhere at all" } -func (d *Discard) Write(metrics []telegraf.Metric) error { return nil } +func (d *Discard) Connect() error { return nil } +func (d *Discard) Close() error { return nil } +func (d *Discard) SampleConfig() string { return "" } +func (d *Discard) Description() string { return "Send metrics to nowhere at all" } +func (d *Discard) Write(metrics []telegraf.Metric) error { + return nil +} func init() { outputs.Add("discard", func() telegraf.Output { return &Discard{} }) diff --git a/plugins/outputs/elasticsearch/README.md b/plugins/outputs/elasticsearch/README.md index 11f3c1385..cf8c4d9ca 100644 --- a/plugins/outputs/elasticsearch/README.md +++ b/plugins/outputs/elasticsearch/README.md @@ -1,10 +1,10 @@ -## Elasticsearch Output Plugin for Telegraf +# Elasticsearch Output Plugin -This plugin writes to [Elasticsearch](https://www.elastic.co) via HTTP using Elastic (http://olivere.github.io/elastic/). +This plugin writes to [Elasticsearch](https://www.elastic.co) via HTTP using Elastic ( -Currently it only supports Elasticsearch 5.x series. +It supports Elasticsearch releases from 5.x up to 7.x. -## Elasticsearch indexes and templates +### Elasticsearch indexes and templates ### Indexes per time-frame @@ -22,7 +22,7 @@ For more information on how this works, see https://www.elastic.co/guide/en/elas This plugin can create a working template for use with telegraf metrics. It uses Elasticsearch dynamic templates feature to set proper types for the tags and metrics fields. If the template specified already exists, it will not overwrite unless you configure this plugin to do so. Thus you can customize this template after its creation if necessary. -Example of an index template created by telegraf: +Example of an index template created by telegraf on Elasticsearch 5.x: ```json { @@ -35,6 +35,8 @@ Example of an index template created by telegraf: "limit": "5000" } }, + "auto_expand_replicas" : "0-1", + "codec" : "best_compression", "refresh_interval": "10s" } }, @@ -142,10 +144,9 @@ This plugin will format the events in the following way: } ``` -### Configuration: +### Configuration ```toml -# Configuration for Elasticsearch to send metrics to. [[outputs.elasticsearch]] ## The full HTTP endpoint URL for your Elasticsearch instance ## Multiple urls can be specified as part of the same cluster, @@ -159,7 +160,7 @@ This plugin will format the events in the following way: ## Set the interval to check if the Elasticsearch nodes are available ## Setting to "0s" will disable the health check (not recommended in production) health_check_interval = "10s" - ## HTTP basic authentication details (eg. when using Shield) + ## HTTP basic authentication details. # username = "telegraf" # password = "mypassword" @@ -197,7 +198,16 @@ This plugin will format the events in the following way: overwrite_template = false ``` -### Required parameters: +#### Permissions + +If you are using authentication within your Elasticsearch cluster, you need +to create a account and create a role with at least the manage role in the +Cluster Privileges category. Overwise, your account will not be able to +connect to your Elasticsearch cluster and send logs to your cluster. After +that, you need to add "create_indice" and "write" permission to your specific +index pattern. + +#### Required parameters: * `urls`: A list containing the full HTTP URL of one or more nodes from your Elasticsearch instance. * `index_name`: The target index for metrics. You can use the date specifiers below to create indexes per time frame. @@ -209,9 +219,10 @@ This plugin will format the events in the following way: %H - hour (00..23) %V - week of the year (ISO week) (01..53) ``` + Additionally, you can specify dynamic index names by using tags with the notation ```{{tag_name}}```. This will store the metrics with different tag values in different indices. If the tag does not exist in a particular metric, the `default_tag_value` will be used instead. -### Optional parameters: +#### Optional parameters: * `timeout`: Elasticsearch client timeout, defaults to "5s" if not set. * `enable_sniffer`: Set to true to ask Elasticsearch a list of all cluster nodes, thus it is not necessary to list all nodes in the urls config option. @@ -222,7 +233,7 @@ Additionally, you can specify dynamic index names by using tags with the notatio * `template_name`: The template name used for telegraf indexes. * `overwrite_template`: Set to true if you want telegraf to overwrite an existing template. -## Known issues +### Known issues Integer values collected that are bigger than 2^63 and smaller than 1e21 (or in this exact same window of their negative counterparts) are encoded by golang JSON encoder in decimal format and that is not fully supported by Elasticsearch dynamic field mapping. This causes the metrics with such values to be dropped in case a field mapping has not been created yet on the telegraf index. If that's the case you will see an exception on Elasticsearch side like this: diff --git a/plugins/outputs/elasticsearch/elasticsearch.go b/plugins/outputs/elasticsearch/elasticsearch.go index 56169135a..7c4d4755a 100644 --- a/plugins/outputs/elasticsearch/elasticsearch.go +++ b/plugins/outputs/elasticsearch/elasticsearch.go @@ -1,12 +1,14 @@ package elasticsearch import ( + "bytes" "context" "fmt" "log" "net/http" "strconv" "strings" + "text/template" "time" "github.com/influxdata/telegraf" @@ -29,6 +31,7 @@ type Elasticsearch struct { ManageTemplate bool TemplateName string OverwriteTemplate bool + MajorReleaseNumber int tls.ClientConfig Client *elastic.Client @@ -47,7 +50,7 @@ var sampleConfig = ` ## Set the interval to check if the Elasticsearch nodes are available ## Setting to "0s" will disable the health check (not recommended in production) health_check_interval = "10s" - ## HTTP basic authentication details (eg. when using Shield) + ## HTTP basic authentication details # username = "telegraf" # password = "mypassword" @@ -85,6 +88,81 @@ var sampleConfig = ` overwrite_template = false ` +const telegrafTemplate = ` +{ + {{ if (lt .Version 6) }} + "template": "{{.TemplatePattern}}", + {{ else }} + "index_patterns" : [ "{{.TemplatePattern}}" ], + {{ end }} + "settings": { + "index": { + "refresh_interval": "10s", + "mapping.total_fields.limit": 5000, + "auto_expand_replicas" : "0-1", + "codec" : "best_compression" + } + }, + "mappings" : { + {{ if (lt .Version 7) }} + "metrics" : { + {{ if (lt .Version 6) }} + "_all": { "enabled": false }, + {{ end }} + {{ end }} + "properties" : { + "@timestamp" : { "type" : "date" }, + "measurement_name" : { "type" : "keyword" } + }, + "dynamic_templates": [ + { + "tags": { + "match_mapping_type": "string", + "path_match": "tag.*", + "mapping": { + "ignore_above": 512, + "type": "keyword" + } + } + }, + { + "metrics_long": { + "match_mapping_type": "long", + "mapping": { + "type": "float", + "index": false + } + } + }, + { + "metrics_double": { + "match_mapping_type": "double", + "mapping": { + "type": "float", + "index": false + } + } + }, + { + "text_fields": { + "match": "*", + "mapping": { + "norms": false + } + } + } + ] + {{ if (lt .Version 7) }} + } + {{ end }} + } +}` + +type templatePart struct { + TemplatePattern string + Version int +} + func (a *Elasticsearch) Connect() error { if a.URLs == nil || a.IndexName == "" { return fmt.Errorf("Elasticsearch urls or index_name is not defined") @@ -142,14 +220,15 @@ func (a *Elasticsearch) Connect() error { } // quit if ES version is not supported - i, err := strconv.Atoi(strings.Split(esVersion, ".")[0]) - if err != nil || i < 5 { + majorReleaseNumber, err := strconv.Atoi(strings.Split(esVersion, ".")[0]) + if err != nil || majorReleaseNumber < 5 { return fmt.Errorf("Elasticsearch version not supported: %s", esVersion) } log.Println("I! Elasticsearch version: " + esVersion) a.Client = client + a.MajorReleaseNumber = majorReleaseNumber if a.ManageTemplate { err := a.manageTemplate(ctx) @@ -184,10 +263,13 @@ func (a *Elasticsearch) Write(metrics []telegraf.Metric) error { m["tag"] = metric.Tags() m[name] = metric.Fields() - bulkRequest.Add(elastic.NewBulkIndexRequest(). - Index(indexName). - Type("metrics"). - Doc(m)) + br := elastic.NewBulkIndexRequest().Index(indexName).Doc(m) + + if a.MajorReleaseNumber <= 6 { + br.Type("metrics") + } + + bulkRequest.Add(br) } @@ -237,65 +319,16 @@ func (a *Elasticsearch) manageTemplate(ctx context.Context) error { } if (a.OverwriteTemplate) || (!templateExists) || (templatePattern != "") { - // Create or update the template - tmpl := fmt.Sprintf(` - { - "template":"%s", - "settings": { - "index": { - "refresh_interval": "10s", - "mapping.total_fields.limit": 5000 - } - }, - "mappings" : { - "_default_" : { - "_all": { "enabled": false }, - "properties" : { - "@timestamp" : { "type" : "date" }, - "measurement_name" : { "type" : "keyword" } - }, - "dynamic_templates": [ - { - "tags": { - "match_mapping_type": "string", - "path_match": "tag.*", - "mapping": { - "ignore_above": 512, - "type": "keyword" - } - } - }, - { - "metrics_long": { - "match_mapping_type": "long", - "mapping": { - "type": "float", - "index": false - } - } - }, - { - "metrics_double": { - "match_mapping_type": "double", - "mapping": { - "type": "float", - "index": false - } - } - }, - { - "text_fields": { - "match": "*", - "mapping": { - "norms": false - } - } - } - ] - } - } - }`, templatePattern+"*") - _, errCreateTemplate := a.Client.IndexPutTemplate(a.TemplateName).BodyString(tmpl).Do(ctx) + tp := templatePart{ + TemplatePattern: templatePattern + "*", + Version: a.MajorReleaseNumber, + } + + t := template.Must(template.New("template").Parse(telegrafTemplate)) + var tmpl bytes.Buffer + + t.Execute(&tmpl, tp) + _, errCreateTemplate := a.Client.IndexPutTemplate(a.TemplateName).BodyString(tmpl.String()).Do(ctx) if errCreateTemplate != nil { return fmt.Errorf("Elasticsearch failed to create index template %s : %s", a.TemplateName, errCreateTemplate) diff --git a/plugins/outputs/exec/README.md b/plugins/outputs/exec/README.md new file mode 100644 index 000000000..d82676a25 --- /dev/null +++ b/plugins/outputs/exec/README.md @@ -0,0 +1,26 @@ +# Exec Output Plugin + +This plugin sends telegraf metrics to an external application over stdin. + +The command should be defined similar to docker's `exec` form: + + ["executable", "param1", "param2"] + +On non-zero exit stderr will be logged at error level. + +### Configuration + +```toml +[[outputs.exec]] + ## Command to ingest metrics via stdin. + command = ["tee", "-a", "/dev/null"] + + ## Timeout for command to complete. + # timeout = "5s" + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + # data_format = "influx" +``` diff --git a/plugins/outputs/exec/exec.go b/plugins/outputs/exec/exec.go new file mode 100644 index 000000000..d3697627e --- /dev/null +++ b/plugins/outputs/exec/exec.go @@ -0,0 +1,151 @@ +package exec + +import ( + "bytes" + "fmt" + "io" + "log" + "os/exec" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" +) + +const maxStderrBytes = 512 + +// Exec defines the exec output plugin. +type Exec struct { + Command []string `toml:"command"` + Timeout internal.Duration `toml:"timeout"` + + runner Runner + serializer serializers.Serializer +} + +var sampleConfig = ` + ## Command to ingest metrics via stdin. + command = ["tee", "-a", "/dev/null"] + + ## Timeout for command to complete. + # timeout = "5s" + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + # data_format = "influx" +` + +// SetSerializer sets the serializer for the output. +func (e *Exec) SetSerializer(serializer serializers.Serializer) { + e.serializer = serializer +} + +// Connect satisfies the Output interface. +func (e *Exec) Connect() error { + return nil +} + +// Close satisfies the Output interface. +func (e *Exec) Close() error { + return nil +} + +// Description describes the plugin. +func (e *Exec) Description() string { + return "Send metrics to command as input over stdin" +} + +// SampleConfig returns a sample configuration. +func (e *Exec) SampleConfig() string { + return sampleConfig +} + +// Write writes the metrics to the configured command. +func (e *Exec) Write(metrics []telegraf.Metric) error { + var buffer bytes.Buffer + serializedMetrics, err := e.serializer.SerializeBatch(metrics) + if err != nil { + return err + } + buffer.Write(serializedMetrics) + + if buffer.Len() <= 0 { + return nil + } + + return e.runner.Run(e.Timeout.Duration, e.Command, &buffer) +} + +// Runner provides an interface for running exec.Cmd. +type Runner interface { + Run(time.Duration, []string, io.Reader) error +} + +// CommandRunner runs a command with the ability to kill the process before the timeout. +type CommandRunner struct { + cmd *exec.Cmd +} + +// Run runs the command. +func (c *CommandRunner) Run(timeout time.Duration, command []string, buffer io.Reader) error { + cmd := exec.Command(command[0], command[1:]...) + cmd.Stdin = buffer + var stderr bytes.Buffer + cmd.Stderr = &stderr + + err := internal.RunTimeout(cmd, timeout) + s := stderr + + if err != nil { + if err == internal.TimeoutErr { + return fmt.Errorf("%q timed out and was killed", command) + } + + if s.Len() > 0 { + log.Printf("E! [outputs.exec] Command error: %q", truncate(s)) + } + + if status, ok := internal.ExitStatus(err); ok { + return fmt.Errorf("%q exited %d with %s", command, status, err.Error()) + } + + return fmt.Errorf("%q failed with %s", command, err.Error()) + } + + c.cmd = cmd + + return nil +} + +func truncate(buf bytes.Buffer) string { + // Limit the number of bytes. + didTruncate := false + if buf.Len() > maxStderrBytes { + buf.Truncate(maxStderrBytes) + didTruncate = true + } + if i := bytes.IndexByte(buf.Bytes(), '\n'); i > 0 { + // Only show truncation if the newline wasn't the last character. + if i < buf.Len()-1 { + didTruncate = true + } + buf.Truncate(i) + } + if didTruncate { + buf.WriteString("...") + } + return buf.String() +} + +func init() { + outputs.Add("exec", func() telegraf.Output { + return &Exec{ + runner: &CommandRunner{}, + Timeout: internal.Duration{Duration: time.Second * 5}, + } + }) +} diff --git a/plugins/outputs/exec/exec_test.go b/plugins/outputs/exec/exec_test.go new file mode 100644 index 000000000..850ba7328 --- /dev/null +++ b/plugins/outputs/exec/exec_test.go @@ -0,0 +1,105 @@ +package exec + +import ( + "bytes" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/serializers" + "github.com/influxdata/telegraf/testutil" +) + +func TestExec(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test due to OS/executable dependencies") + } + + tests := []struct { + name string + command []string + err bool + metrics []telegraf.Metric + }{ + { + name: "test success", + command: []string{"tee"}, + err: false, + metrics: testutil.MockMetrics(), + }, + { + name: "test doesn't accept stdin", + command: []string{"sleep", "5s"}, + err: true, + metrics: testutil.MockMetrics(), + }, + { + name: "test command not found", + command: []string{"/no/exist", "-h"}, + err: true, + metrics: testutil.MockMetrics(), + }, + { + name: "test no metrics output", + command: []string{"tee"}, + err: false, + metrics: []telegraf.Metric{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &Exec{ + Command: tt.command, + Timeout: internal.Duration{Duration: time.Second}, + runner: &CommandRunner{}, + } + + s, _ := serializers.NewInfluxSerializer() + e.SetSerializer(s) + + e.Connect() + + require.Equal(t, tt.err, e.Write(tt.metrics) != nil) + }) + } +} + +func TestTruncate(t *testing.T) { + tests := []struct { + name string + buf *bytes.Buffer + len int + }{ + { + name: "long out", + buf: bytes.NewBufferString(strings.Repeat("a", maxStderrBytes+100)), + len: maxStderrBytes + len("..."), + }, + { + name: "multiline out", + buf: bytes.NewBufferString("hola\ngato\n"), + len: len("hola") + len("..."), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := truncate(*tt.buf) + require.Equal(t, tt.len, len(s)) + }) + } +} + +func TestExecDocs(t *testing.T) { + e := &Exec{} + e.Description() + e.SampleConfig() + require.NoError(t, e.Close()) + + e = &Exec{runner: &CommandRunner{}} + require.NoError(t, e.Close()) +} diff --git a/plugins/outputs/file/README.md b/plugins/outputs/file/README.md index de577eacf..45d0ac155 100644 --- a/plugins/outputs/file/README.md +++ b/plugins/outputs/file/README.md @@ -1,13 +1,31 @@ -# file Output Plugin +# File Output Plugin This plugin writes telegraf metrics to files ### Configuration -``` + +```toml [[outputs.file]] ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] + ## Use batch serialization format instead of line based delimiting. The + ## batch format allows for the production of non line based output formats and + ## may more efficiently encode and write metrics. + # use_batch_format = false + + ## The file will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # rotation_interval = "0h" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # rotation_max_archives = 5 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/file/file.go b/plugins/outputs/file/file.go index 0bbff2f64..3798f107a 100644 --- a/plugins/outputs/file/file.go +++ b/plugins/outputs/file/file.go @@ -6,16 +6,22 @@ import ( "os" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/rotate" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) type File struct { - Files []string - - writers []io.Writer - closers []io.Closer + Files []string `toml:"files"` + RotationInterval internal.Duration `toml:"rotation_interval"` + RotationMaxSize internal.Size `toml:"rotation_max_size"` + RotationMaxArchives int `toml:"rotation_max_archives"` + UseBatchFormat bool `toml:"use_batch_format"` + Log telegraf.Logger `toml:"-"` + writer io.Writer + closers []io.Closer serializer serializers.Serializer } @@ -23,6 +29,23 @@ var sampleConfig = ` ## Files to write to, "stdout" is a specially handled file. files = ["stdout", "/tmp/metrics.out"] + ## Use batch serialization format instead of line based delimiting. The + ## batch format allows for the production of non line based output formats and + ## may more efficiently encode metric groups. + # use_batch_format = false + + ## The file will be rotated after the time interval specified. When set + ## to 0 no time based rotation is performed. + # rotation_interval = "0d" + + ## The logfile will be rotated when it becomes larger than the specified + ## size. When set to 0 no size based rotation is performed. + # rotation_max_size = "0MB" + + ## Maximum number of rotated archives to keep, any older logs are deleted. + ## If set to -1, no archives are removed. + # rotation_max_archives = 5 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -35,43 +58,39 @@ func (f *File) SetSerializer(serializer serializers.Serializer) { } func (f *File) Connect() error { + writers := []io.Writer{} + if len(f.Files) == 0 { f.Files = []string{"stdout"} } for _, file := range f.Files { if file == "stdout" { - f.writers = append(f.writers, os.Stdout) + writers = append(writers, os.Stdout) } else { - var of *os.File - var err error - if _, err := os.Stat(file); os.IsNotExist(err) { - of, err = os.Create(file) - } else { - of, err = os.OpenFile(file, os.O_APPEND|os.O_WRONLY, os.ModeAppend) - } - + of, err := rotate.NewFileWriter( + file, f.RotationInterval.Duration, f.RotationMaxSize.Size, f.RotationMaxArchives) if err != nil { return err } - f.writers = append(f.writers, of) + + writers = append(writers, of) f.closers = append(f.closers, of) } } + f.writer = io.MultiWriter(writers...) return nil } func (f *File) Close() error { - var errS string + var err error for _, c := range f.closers { - if err := c.Close(); err != nil { - errS += err.Error() + "\n" + errClose := c.Close() + if errClose != nil { + err = errClose } } - if errS != "" { - return fmt.Errorf(errS) - } - return nil + return err } func (f *File) SampleConfig() string { @@ -84,19 +103,31 @@ func (f *File) Description() string { func (f *File) Write(metrics []telegraf.Metric) error { var writeErr error = nil - for _, metric := range metrics { - b, err := f.serializer.Serialize(metric) + + if f.UseBatchFormat { + octets, err := f.serializer.SerializeBatch(metrics) if err != nil { - return fmt.Errorf("failed to serialize message: %s", err) + f.Log.Errorf("Could not serialize metric: %v", err) } - for _, writer := range f.writers { - _, err = writer.Write(b) - if err != nil && writer != os.Stdout { - writeErr = fmt.Errorf("E! failed to write message: %s, %s", b, err) + _, err = f.writer.Write(octets) + if err != nil { + f.Log.Errorf("Error writing to file: %v", err) + } + } else { + for _, metric := range metrics { + b, err := f.serializer.Serialize(metric) + if err != nil { + f.Log.Debugf("Could not serialize metric: %v", err) + } + + _, err = f.writer.Write(b) + if err != nil { + writeErr = fmt.Errorf("E! [outputs.file] failed to write message: %v", err) } } } + return writeErr } diff --git a/plugins/outputs/graphite/README.md b/plugins/outputs/graphite/README.md index 878eb8048..b6b36cfca 100644 --- a/plugins/outputs/graphite/README.md +++ b/plugins/outputs/graphite/README.md @@ -21,9 +21,22 @@ see the [Graphite Data Format](../../../docs/DATA_FORMATS_OUTPUT.md) ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md template = "host.tags.measurement.field" + ## Graphite templates patterns + ## 1. Template for cpu + ## 2. Template for disk* + ## 3. Default template + # templates = [ + # "cpu tags.measurement.host.field", + # "disk* measurement.field", + # "host.measurement.tags.field" + #] + ## Enable Graphite tags support # graphite_tag_support = false + ## Character for separating metric name and field for Graphite tags + # graphite_separator = "." + ## timeout in seconds for the write connection to graphite timeout = 2 diff --git a/plugins/outputs/graphite/graphite.go b/plugins/outputs/graphite/graphite.go index c26c1587f..4e284609d 100644 --- a/plugins/outputs/graphite/graphite.go +++ b/plugins/outputs/graphite/graphite.go @@ -17,12 +17,14 @@ import ( type Graphite struct { GraphiteTagSupport bool + GraphiteSeparator string // URL is only for backwards compatibility - Servers []string - Prefix string - Template string - Timeout int - conns []net.Conn + Servers []string + Prefix string + Template string + Templates []string + Timeout int + conns []net.Conn tlsint.ClientConfig } @@ -40,6 +42,19 @@ var sampleConfig = ` ## Enable Graphite tags support # graphite_tag_support = false + ## Character for separating metric name and field for Graphite tags + # graphite_separator = "." + + ## Graphite templates patterns + ## 1. Template for cpu + ## 2. Template for disk* + ## 3. Default template + # templates = [ + # "cpu tags.measurement.host.field", + # "disk* measurement.field", + # "host.measurement.tags.field" + #] + ## timeout in seconds for the write connection to graphite timeout = 2 @@ -134,7 +149,7 @@ func checkEOF(conn net.Conn) { func (g *Graphite) Write(metrics []telegraf.Metric) error { // Prepare data var batch []byte - s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport) + s, err := serializers.NewGraphiteSerializer(g.Prefix, g.Template, g.GraphiteTagSupport, g.GraphiteSeparator, g.Templates) if err != nil { return err } @@ -173,7 +188,7 @@ func (g *Graphite) send(batch []byte) error { if _, e := g.conns[n].Write(batch); e != nil { // Error log.Println("E! Graphite Error: " + e.Error()) - // Close explicitely + // Close explicitly g.conns[n].Close() // Let's try the next one } else { diff --git a/plugins/outputs/graphite/graphite_test.go b/plugins/outputs/graphite/graphite_test.go index 3857236e5..82aad0d7d 100644 --- a/plugins/outputs/graphite/graphite_test.go +++ b/plugins/outputs/graphite/graphite_test.go @@ -98,6 +98,190 @@ func TestGraphiteOK(t *testing.T) { g.Close() } +func TestGraphiteOkWithSeparatorDot(t *testing.T) { + var wg sync.WaitGroup + // Start TCP server + wg.Add(1) + t.Log("Starting server") + TCPServer1(t, &wg) + + // Init plugin + g := Graphite{ + Prefix: "my.prefix", + GraphiteSeparator: ".", + } + + // Init metrics + m1, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"myfield": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := metric.New( + "my_measurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + // Prepare point list + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} + err1 := g.Connect() + require.NoError(t, err1) + // Send Data + t.Log("Send first data") + err2 := g.Write(metrics) + require.NoError(t, err2) + + // Waiting TCPserver, should reconnect and resend + wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + wg2.Add(1) + TCPServer2(t, &wg2) + //Write but expect an error, but reconnect + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have reconnected automatically") + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() + g.Close() +} + +func TestGraphiteOkWithSeparatorUnderscore(t *testing.T) { + var wg sync.WaitGroup + // Start TCP server + wg.Add(1) + t.Log("Starting server") + TCPServer1(t, &wg) + + // Init plugin + g := Graphite{ + Prefix: "my.prefix", + GraphiteSeparator: "_", + } + + // Init metrics + m1, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"myfield": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := metric.New( + "my_measurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + // Prepare point list + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} + err1 := g.Connect() + require.NoError(t, err1) + // Send Data + t.Log("Send first data") + err2 := g.Write(metrics) + require.NoError(t, err2) + + // Waiting TCPserver, should reconnect and resend + wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + wg2.Add(1) + TCPServer2(t, &wg2) + //Write but expect an error, but reconnect + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have reconnected automatically") + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() + g.Close() +} + +func TestGraphiteOKWithMultipleTemplates(t *testing.T) { + var wg sync.WaitGroup + // Start TCP server + wg.Add(1) + t.Log("Starting server") + TCPServer1WithMultipleTemplates(t, &wg) + + // Init plugin + g := Graphite{ + Prefix: "my.prefix", + Template: "measurement.host.tags.field", + Templates: []string{ + "my_* host.measurement.tags.field", + "measurement.tags.host.field", + }, + } + + // Init metrics + m1, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, + map[string]interface{}{"myfield": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := metric.New( + "my_measurement", + map[string]string{"host": "192.168.0.1", "mytag": "valuetag"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + // Prepare point list + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} + err1 := g.Connect() + require.NoError(t, err1) + // Send Data + t.Log("Send first data") + err2 := g.Write(metrics) + require.NoError(t, err2) + + // Waiting TCPserver, should reconnect and resend + wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + wg2.Add(1) + TCPServer2WithMultipleTemplates(t, &wg2) + //Write but expect an error, but reconnect + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have reconnected automatically") + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() + g.Close() +} + func TestGraphiteOkWithTags(t *testing.T) { var wg sync.WaitGroup // Start TCP server @@ -158,6 +342,128 @@ func TestGraphiteOkWithTags(t *testing.T) { g.Close() } +func TestGraphiteOkWithTagsAndSeparatorDot(t *testing.T) { + var wg sync.WaitGroup + // Start TCP server + wg.Add(1) + t.Log("Starting server") + TCPServer1WithTags(t, &wg) + + // Init plugin + g := Graphite{ + Prefix: "my.prefix", + GraphiteTagSupport: true, + GraphiteSeparator: ".", + } + + // Init metrics + m1, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"myfield": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := metric.New( + "my_measurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + // Prepare point list + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} + err1 := g.Connect() + require.NoError(t, err1) + // Send Data + t.Log("Send first data") + err2 := g.Write(metrics) + require.NoError(t, err2) + + // Waiting TCPserver, should reconnect and resend + wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + wg2.Add(1) + TCPServer2WithTags(t, &wg2) + //Write but expect an error, but reconnect + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have reconnected automatically") + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() + g.Close() +} + +func TestGraphiteOkWithTagsAndSeparatorUnderscore(t *testing.T) { + var wg sync.WaitGroup + // Start TCP server + wg.Add(1) + t.Log("Starting server") + TCPServer1WithTagsSeparatorUnderscore(t, &wg) + + // Init plugin + g := Graphite{ + Prefix: "my_prefix", + GraphiteTagSupport: true, + GraphiteSeparator: "_", + } + + // Init metrics + m1, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"myfield": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m2, _ := metric.New( + "mymeasurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + m3, _ := metric.New( + "my_measurement", + map[string]string{"host": "192.168.0.1"}, + map[string]interface{}{"value": float64(3.14)}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + // Prepare point list + metrics := []telegraf.Metric{m1} + metrics2 := []telegraf.Metric{m2, m3} + err1 := g.Connect() + require.NoError(t, err1) + // Send Data + t.Log("Send first data") + err2 := g.Write(metrics) + require.NoError(t, err2) + + // Waiting TCPserver, should reconnect and resend + wg.Wait() + t.Log("Finished Waiting for first data") + var wg2 sync.WaitGroup + // Start TCP server + wg2.Add(1) + TCPServer2WithTagsSeparatorUnderscore(t, &wg2) + //Write but expect an error, but reconnect + err3 := g.Write(metrics2) + t.Log("Finished writing second data, it should have reconnected automatically") + + require.NoError(t, err3) + t.Log("Finished writing third data") + wg2.Wait() + g.Close() +} + func TCPServer1(t *testing.T, wg *sync.WaitGroup) { tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") go func() { @@ -188,6 +494,36 @@ func TCPServer2(t *testing.T, wg *sync.WaitGroup) { }() } +func TCPServer1WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + go func() { + defer wg.Done() + conn, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn) + tp := textproto.NewReader(reader) + data1, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.mymeasurement.valuetag.192_168_0_1.myfield 3.14 1289430000", data1) + conn.Close() + tcpServer.Close() + }() +} + +func TCPServer2WithMultipleTemplates(t *testing.T, wg *sync.WaitGroup) { + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + go func() { + defer wg.Done() + conn2, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn2) + tp := textproto.NewReader(reader) + data2, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.mymeasurement.valuetag.192_168_0_1 3.14 1289430000", data2) + data3, _ := tp.ReadLine() + assert.Equal(t, "my.prefix.192_168_0_1.my_measurement.valuetag 3.14 1289430000", data3) + conn2.Close() + tcpServer.Close() + }() +} + func TCPServer1WithTags(t *testing.T, wg *sync.WaitGroup) { tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") go func() { @@ -217,3 +553,33 @@ func TCPServer2WithTags(t *testing.T, wg *sync.WaitGroup) { tcpServer.Close() }() } + +func TCPServer1WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + go func() { + defer wg.Done() + conn, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn) + tp := textproto.NewReader(reader) + data1, _ := tp.ReadLine() + assert.Equal(t, "my_prefix_mymeasurement_myfield;host=192.168.0.1 3.14 1289430000", data1) + conn.Close() + tcpServer.Close() + }() +} + +func TCPServer2WithTagsSeparatorUnderscore(t *testing.T, wg *sync.WaitGroup) { + tcpServer, _ := net.Listen("tcp", "127.0.0.1:2003") + go func() { + defer wg.Done() + conn2, _ := (tcpServer).Accept() + reader := bufio.NewReader(conn2) + tp := textproto.NewReader(reader) + data2, _ := tp.ReadLine() + assert.Equal(t, "my_prefix_mymeasurement;host=192.168.0.1 3.14 1289430000", data2) + data3, _ := tp.ReadLine() + assert.Equal(t, "my_prefix_my_measurement;host=192.168.0.1 3.14 1289430000", data3) + conn2.Close() + tcpServer.Close() + }() +} diff --git a/plugins/outputs/graylog/README.md b/plugins/outputs/graylog/README.md index 39863b541..4945ce46f 100644 --- a/plugins/outputs/graylog/README.md +++ b/plugins/outputs/graylog/README.md @@ -1,14 +1,18 @@ # Graylog Output Plugin -This plugin writes to a Graylog instance using the "gelf" format. +This plugin writes to a Graylog instance using the "[GELF][]" format. -It requires a `servers` name. +[GELF]: https://docs.graylog.org/en/3.1/pages/gelf.html#gelf-payload-specification ### Configuration: ```toml -# Send telegraf metrics to graylog(s) [[outputs.graylog]] - ## UDP endpoint for your graylog instance(s). - servers = ["127.0.0.1:12201", "192.168.1.1:12201"] + ## UDP endpoint for your graylog instances. + servers = ["127.0.0.1:12201"] + + ## The field to use as the GELF short_message, if unset the static string + ## "telegraf" will be used. + ## example: short_message_field = "message" + # short_message_field = "" ``` diff --git a/plugins/outputs/graylog/graylog.go b/plugins/outputs/graylog/graylog.go index 4b2c1693a..34f2ec6d9 100644 --- a/plugins/outputs/graylog/graylog.go +++ b/plugins/outputs/graylog/graylog.go @@ -150,13 +150,19 @@ func (g *Gelf) send(b []byte) (n int, err error) { } type Graylog struct { - Servers []string - writer io.Writer + Servers []string `toml:"servers"` + ShortMessageField string `toml:"short_message_field"` + writer io.Writer } var sampleConfig = ` ## UDP endpoint for your graylog instance. - servers = ["127.0.0.1:12201", "192.168.1.1:12201"] + servers = ["127.0.0.1:12201"] + + ## The field to use as the GELF short_message, if unset the static string + ## "telegraf" will be used. + ## example: short_message_field = "message" + # short_message_field = "" ` func (g *Graylog) Connect() error { @@ -184,16 +190,12 @@ func (g *Graylog) SampleConfig() string { } func (g *Graylog) Description() string { - return "Send telegraf metrics to graylog(s)" + return "Send telegraf metrics to graylog" } func (g *Graylog) Write(metrics []telegraf.Metric) error { - if len(metrics) == 0 { - return nil - } - for _, metric := range metrics { - values, err := serialize(metric) + values, err := g.serialize(metric) if err != nil { return err } @@ -201,14 +203,14 @@ func (g *Graylog) Write(metrics []telegraf.Metric) error { for _, value := range values { _, err := g.writer.Write([]byte(value)) if err != nil { - return fmt.Errorf("FAILED to write message: %s, %s", value, err) + return fmt.Errorf("error writing message: %q, %v", value, err) } } } return nil } -func serialize(metric telegraf.Metric) ([]string, error) { +func (g *Graylog) serialize(metric telegraf.Metric) ([]string, error) { out := []string{} m := make(map[string]interface{}) @@ -217,7 +219,7 @@ func serialize(metric telegraf.Metric) ([]string, error) { m["short_message"] = "telegraf" m["name"] = metric.Name() - if host, ok := metric.Tags()["host"]; ok { + if host, ok := metric.GetTag("host"); ok { m["host"] = host } else { host, err := os.Hostname() @@ -227,14 +229,18 @@ func serialize(metric telegraf.Metric) ([]string, error) { m["host"] = host } - for key, value := range metric.Tags() { - if key != "host" { - m["_"+key] = value + for _, tag := range metric.TagList() { + if tag.Key != "host" { + m["_"+tag.Key] = tag.Value } } - for key, value := range metric.Fields() { - m["_"+key] = value + for _, field := range metric.FieldList() { + if field.Key == g.ShortMessageField { + m["short_message"] = field.Value + } else { + m["_"+field.Key] = field.Value + } } serialized, err := ejson.Marshal(m) diff --git a/plugins/outputs/health/README.md b/plugins/outputs/health/README.md new file mode 100644 index 000000000..0a56d5192 --- /dev/null +++ b/plugins/outputs/health/README.md @@ -0,0 +1,64 @@ +# Health Output Plugin + +The health plugin provides a HTTP health check resource that can be configured +to return a failure status code based on the value of a metric. + +When the plugin is healthy it will return a 200 response; when unhealthy it +will return a 503 response. The default state is healthy, one or more checks +must fail in order for the resource to enter the failed state. + +### Configuration +```toml +[[outputs.health]] + ## Address and port to listen on. + ## ex: service_address = "http://localhost:8080" + ## service_address = "unix:///var/run/telegraf-health.sock" + # service_address = "http://:8080" + + ## The maximum duration for reading the entire request. + # read_timeout = "5s" + ## The maximum duration for writing the entire response. + # write_timeout = "5s" + + ## Username and password to accept for HTTP basic authentication. + # basic_username = "user1" + # basic_password = "secret" + + ## Allowed CA certificates for client certificates. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## TLS server certificate and private key. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## One or more check sub-tables should be defined, it is also recommended to + ## use metric filtering to limit the metrics that flow into this output. + ## + ## When using the default buffer sizes, this example will fail when the + ## metric buffer is half full. + ## + ## namepass = ["internal_write"] + ## tagpass = { output = ["influxdb"] } + ## + ## [[outputs.health.compares]] + ## field = "buffer_size" + ## lt = 5000.0 + ## + ## [[outputs.health.contains]] + ## field = "buffer_size" +``` + +#### compares + +The `compares` check is used to assert basic mathematical relationships. Use +it by choosing a field key and one or more comparisons that must hold true. If +the field is not found on a metric no comparison will be made. + +Comparisons must be hold true on all metrics for the check to pass. + +#### contains + +The `contains` check can be used to require a field key to exist on at least +one metric. + +If the field is found on any metric the check passes. diff --git a/plugins/outputs/health/compares.go b/plugins/outputs/health/compares.go new file mode 100644 index 000000000..9228bd2df --- /dev/null +++ b/plugins/outputs/health/compares.go @@ -0,0 +1,77 @@ +package health + +import ( + "github.com/influxdata/telegraf" +) + +type Compares struct { + Field string `toml:"field"` + GT *float64 `toml:"gt"` + GE *float64 `toml:"ge"` + LT *float64 `toml:"lt"` + LE *float64 `toml:"le"` + EQ *float64 `toml:"eq"` + NE *float64 `toml:"ne"` +} + +func (c *Compares) runChecks(fv float64) bool { + if c.GT != nil && !(fv > *c.GT) { + return false + } + if c.GE != nil && !(fv >= *c.GE) { + return false + } + if c.LT != nil && !(fv < *c.LT) { + return false + } + if c.LE != nil && !(fv <= *c.LE) { + return false + } + if c.EQ != nil && !(fv == *c.EQ) { + return false + } + if c.NE != nil && !(fv != *c.NE) { + return false + } + return true +} + +func (c *Compares) Check(metrics []telegraf.Metric) bool { + success := true + for _, m := range metrics { + fv, ok := m.GetField(c.Field) + if !ok { + continue + } + + f, ok := asFloat(fv) + if !ok { + return false + } + + result := c.runChecks(f) + if !result { + success = false + } + } + return success +} + +func asFloat(fv interface{}) (float64, bool) { + switch v := fv.(type) { + case int64: + return float64(v), true + case float64: + return v, true + case uint64: + return float64(v), true + case bool: + if v { + return 1.0, true + } else { + return 0.0, true + } + default: + return 0.0, false + } +} diff --git a/plugins/outputs/health/compares_test.go b/plugins/outputs/health/compares_test.go new file mode 100644 index 000000000..26f0dc1e1 --- /dev/null +++ b/plugins/outputs/health/compares_test.go @@ -0,0 +1,268 @@ +package health_test + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/health" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func addr(v float64) *float64 { + return &v +} + +func TestFieldNotFoundIsSuccess(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Now()), + } + + compares := &health.Compares{ + Field: "time_idle", + GT: addr(42.0), + } + result := compares.Check(metrics) + require.True(t, result) +} + +func TestStringFieldIsFailure(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": "foo", + }, + time.Now()), + } + + compares := &health.Compares{ + Field: "time_idle", + GT: addr(42.0), + } + result := compares.Check(metrics) + require.False(t, result) +} + +func TestFloatConvert(t *testing.T) { + tests := []struct { + name string + metrics []telegraf.Metric + expected bool + }{ + { + name: "int64 field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": int64(42.0), + }, + time.Now()), + }, + expected: true, + }, + { + name: "uint64 field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": uint64(42.0), + }, + time.Now()), + }, + expected: true, + }, + { + name: "float64 field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": float64(42.0), + }, + time.Now()), + }, + expected: true, + }, + { + name: "bool field true", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": true, + }, + time.Now()), + }, + expected: true, + }, + { + name: "bool field false", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": false, + }, + time.Now()), + }, + expected: false, + }, + { + name: "string field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": "42.0", + }, + time.Now()), + }, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + compares := &health.Compares{ + Field: "time_idle", + GT: addr(0.0), + } + actual := compares.Check(tt.metrics) + require.Equal(t, tt.expected, actual) + }) + } +} + +func TestOperators(t *testing.T) { + tests := []struct { + name string + compares *health.Compares + expected bool + }{ + { + name: "gt", + compares: &health.Compares{ + Field: "time_idle", + GT: addr(41.0), + }, + expected: true, + }, + { + name: "not gt", + compares: &health.Compares{ + Field: "time_idle", + GT: addr(42.0), + }, + expected: false, + }, + { + name: "ge", + compares: &health.Compares{ + Field: "time_idle", + GE: addr(42.0), + }, + expected: true, + }, + { + name: "not ge", + compares: &health.Compares{ + Field: "time_idle", + GE: addr(43.0), + }, + expected: false, + }, + { + name: "lt", + compares: &health.Compares{ + Field: "time_idle", + LT: addr(43.0), + }, + expected: true, + }, + { + name: "not lt", + compares: &health.Compares{ + Field: "time_idle", + LT: addr(42.0), + }, + expected: false, + }, + { + name: "le", + compares: &health.Compares{ + Field: "time_idle", + LE: addr(42.0), + }, + expected: true, + }, + { + name: "not le", + compares: &health.Compares{ + Field: "time_idle", + LE: addr(41.0), + }, + expected: false, + }, + { + name: "eq", + compares: &health.Compares{ + Field: "time_idle", + EQ: addr(42.0), + }, + expected: true, + }, + { + name: "not eq", + compares: &health.Compares{ + Field: "time_idle", + EQ: addr(41.0), + }, + expected: false, + }, + { + name: "ne", + compares: &health.Compares{ + Field: "time_idle", + NE: addr(41.0), + }, + expected: true, + }, + { + name: "not ne", + compares: &health.Compares{ + Field: "time_idle", + NE: addr(42.0), + }, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Now()), + } + actual := tt.compares.Check(metrics) + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/plugins/outputs/health/contains.go b/plugins/outputs/health/contains.go new file mode 100644 index 000000000..ff03667e0 --- /dev/null +++ b/plugins/outputs/health/contains.go @@ -0,0 +1,19 @@ +package health + +import "github.com/influxdata/telegraf" + +type Contains struct { + Field string `toml:"field"` +} + +func (c *Contains) Check(metrics []telegraf.Metric) bool { + success := false + for _, m := range metrics { + ok := m.HasField(c.Field) + if ok { + success = true + } + } + + return success +} diff --git a/plugins/outputs/health/contains_test.go b/plugins/outputs/health/contains_test.go new file mode 100644 index 000000000..2337dd867 --- /dev/null +++ b/plugins/outputs/health/contains_test.go @@ -0,0 +1,68 @@ +package health_test + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/health" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestFieldFound(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Now()), + } + + contains := &health.Contains{ + Field: "time_idle", + } + result := contains.Check(metrics) + require.True(t, result) +} + +func TestFieldNotFound(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Now()), + } + + contains := &health.Contains{ + Field: "time_idle", + } + result := contains.Check(metrics) + require.False(t, result) +} + +func TestOneMetricWithFieldIsSuccess(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Now()), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Now()), + } + + contains := &health.Contains{ + Field: "time_idle", + } + result := contains.Check(metrics) + require.True(t, result) +} diff --git a/plugins/outputs/health/health.go b/plugins/outputs/health/health.go new file mode 100644 index 000000000..b85329342 --- /dev/null +++ b/plugins/outputs/health/health.go @@ -0,0 +1,272 @@ +package health + +import ( + "context" + "crypto/tls" + "errors" + "log" + "net" + "net/http" + "net/url" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const ( + defaultServiceAddress = "tcp://:8080" + defaultReadTimeout = 5 * time.Second + defaultWriteTimeout = 5 * time.Second +) + +var sampleConfig = ` + ## Address and port to listen on. + ## ex: service_address = "http://localhost:8080" + ## service_address = "unix:///var/run/telegraf-health.sock" + # service_address = "http://:8080" + + ## The maximum duration for reading the entire request. + # read_timeout = "5s" + ## The maximum duration for writing the entire response. + # write_timeout = "5s" + + ## Username and password to accept for HTTP basic authentication. + # basic_username = "user1" + # basic_password = "secret" + + ## Allowed CA certificates for client certificates. + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## TLS server certificate and private key. + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + + ## One or more check sub-tables should be defined, it is also recommended to + ## use metric filtering to limit the metrics that flow into this output. + ## + ## When using the default buffer sizes, this example will fail when the + ## metric buffer is half full. + ## + ## namepass = ["internal_write"] + ## tagpass = { output = ["influxdb"] } + ## + ## [[outputs.health.compares]] + ## field = "buffer_size" + ## lt = 5000.0 + ## + ## [[outputs.health.contains]] + ## field = "buffer_size" +` + +type Checker interface { + // Check returns true if the metrics meet its criteria. + Check(metrics []telegraf.Metric) bool +} + +type Health struct { + ServiceAddress string `toml:"service_address"` + ReadTimeout internal.Duration `toml:"read_timeout"` + WriteTimeout internal.Duration `toml:"write_timeout"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + tlsint.ServerConfig + + Compares []*Compares `toml:"compares"` + Contains []*Contains `toml:"contains"` + checkers []Checker + + wg sync.WaitGroup + server *http.Server + origin string + network string + address string + tlsConf *tls.Config + + mu sync.Mutex + healthy bool +} + +func (h *Health) SampleConfig() string { + return sampleConfig +} + +func (h *Health) Description() string { + return "Configurable HTTP health check resource based on metrics" +} + +func (h *Health) Init() error { + u, err := url.Parse(h.ServiceAddress) + if err != nil { + return err + } + + switch u.Scheme { + case "http", "https": + h.network = "tcp" + h.address = u.Host + case "unix": + h.network = u.Scheme + h.address = u.Path + case "tcp4", "tcp6", "tcp": + h.network = u.Scheme + h.address = u.Host + default: + return errors.New("service_address contains invalid scheme") + } + + h.tlsConf, err = h.ServerConfig.TLSConfig() + if err != nil { + return err + } + + h.checkers = make([]Checker, 0) + for i := range h.Compares { + h.checkers = append(h.checkers, h.Compares[i]) + } + for i := range h.Contains { + h.checkers = append(h.checkers, h.Contains[i]) + } + + return nil +} + +// Connect starts the HTTP server. +func (h *Health) Connect() error { + authHandler := internal.AuthHandler(h.BasicUsername, h.BasicPassword, "health", onAuthError) + + h.server = &http.Server{ + Addr: h.ServiceAddress, + Handler: authHandler(h), + ReadTimeout: h.ReadTimeout.Duration, + WriteTimeout: h.WriteTimeout.Duration, + TLSConfig: h.tlsConf, + } + + listener, err := h.listen() + if err != nil { + return err + } + + h.origin = h.getOrigin(listener) + + log.Printf("I! [outputs.health] Listening on %s", h.origin) + + h.wg.Add(1) + go func() { + defer h.wg.Done() + err := h.server.Serve(listener) + if err != http.ErrServerClosed { + log.Printf("E! [outputs.health] Serve error on %s: %v", h.origin, err) + } + h.origin = "" + }() + + return nil +} + +func onAuthError(_ http.ResponseWriter) { +} + +func (h *Health) listen() (net.Listener, error) { + if h.tlsConf != nil { + return tls.Listen(h.network, h.address, h.tlsConf) + } else { + return net.Listen(h.network, h.address) + } +} + +func (h *Health) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + var code = http.StatusOK + if !h.isHealthy() { + code = http.StatusServiceUnavailable + } + + rw.Header().Set("Server", internal.ProductToken()) + http.Error(rw, http.StatusText(code), code) +} + +// Write runs all checks over the metric batch and adjust health state. +func (h *Health) Write(metrics []telegraf.Metric) error { + healthy := true + for _, checker := range h.checkers { + success := checker.Check(metrics) + if !success { + healthy = false + } + } + + h.setHealthy(healthy) + return nil +} + +// Close shuts down the HTTP server. +func (h *Health) Close() error { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + h.server.Shutdown(ctx) + h.wg.Wait() + return nil +} + +// Origin returns the URL of the HTTP server. +func (h *Health) Origin() string { + return h.origin +} + +func (h *Health) getOrigin(listener net.Listener) string { + scheme := "http" + if h.tlsConf != nil { + scheme = "https" + } + if h.network == "unix" { + scheme = "unix" + } + + switch h.network { + case "unix": + origin := &url.URL{ + Scheme: scheme, + Path: listener.Addr().String(), + } + return origin.String() + default: + origin := &url.URL{ + Scheme: scheme, + Host: listener.Addr().String(), + } + return origin.String() + } + +} + +func (h *Health) setHealthy(healthy bool) { + h.mu.Lock() + defer h.mu.Unlock() + h.healthy = healthy +} + +func (h *Health) isHealthy() bool { + h.mu.Lock() + defer h.mu.Unlock() + return h.healthy +} + +func NewHealth() *Health { + return &Health{ + ServiceAddress: defaultServiceAddress, + ReadTimeout: internal.Duration{Duration: defaultReadTimeout}, + WriteTimeout: internal.Duration{Duration: defaultWriteTimeout}, + healthy: true, + } +} + +func init() { + outputs.Add("health", func() telegraf.Output { + return NewHealth() + }) +} diff --git a/plugins/outputs/health/health_test.go b/plugins/outputs/health/health_test.go new file mode 100644 index 000000000..5bf35ad83 --- /dev/null +++ b/plugins/outputs/health/health_test.go @@ -0,0 +1,204 @@ +package health_test + +import ( + "io/ioutil" + "net/http" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/health" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var pki = testutil.NewPKI("../../../testutil/pki") + +func TestHealth(t *testing.T) { + type Options struct { + Compares []*health.Compares `toml:"compares"` + Contains []*health.Contains `toml:"contains"` + } + + now := time.Now() + tests := []struct { + name string + options Options + metrics []telegraf.Metric + expectedCode int + }{ + { + name: "healthy on startup", + expectedCode: 200, + }, + { + name: "check passes", + options: Options{ + Compares: []*health.Compares{ + { + Field: "time_idle", + GT: func() *float64 { v := 0.0; return &v }(), + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42, + }, + now), + }, + expectedCode: 200, + }, + { + name: "check fails", + options: Options{ + Compares: []*health.Compares{ + { + Field: "time_idle", + LT: func() *float64 { v := 0.0; return &v }(), + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42, + }, + now), + }, + expectedCode: 503, + }, + { + name: "mixed check fails", + options: Options{ + Compares: []*health.Compares{ + { + Field: "time_idle", + LT: func() *float64 { v := 0.0; return &v }(), + }, + }, + Contains: []*health.Contains{ + { + Field: "foo", + }, + }, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42, + }, + now), + }, + expectedCode: 503, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output := health.NewHealth() + output.ServiceAddress = "tcp://127.0.0.1:0" + output.Compares = tt.options.Compares + output.Contains = tt.options.Contains + + err := output.Init() + require.NoError(t, err) + + err = output.Connect() + require.NoError(t, err) + + err = output.Write(tt.metrics) + require.NoError(t, err) + + resp, err := http.Get(output.Origin()) + require.NoError(t, err) + require.Equal(t, tt.expectedCode, resp.StatusCode) + + _, err = ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + err = output.Close() + require.NoError(t, err) + }) + } +} + +func TestInitServiceAddress(t *testing.T) { + tests := []struct { + name string + plugin *health.Health + err bool + origin string + }{ + { + name: "port without scheme is not allowed", + plugin: &health.Health{ + ServiceAddress: ":8080", + }, + err: true, + }, + { + name: "path without scheme is not allowed", + plugin: &health.Health{ + ServiceAddress: "/tmp/telegraf", + }, + err: true, + }, + { + name: "tcp with port maps to http", + plugin: &health.Health{ + ServiceAddress: "tcp://:8080", + }, + }, + { + name: "tcp with tlsconf maps to https", + plugin: &health.Health{ + ServiceAddress: "tcp://:8080", + ServerConfig: *pki.TLSServerConfig(), + }, + }, + { + name: "tcp4 is allowed", + plugin: &health.Health{ + ServiceAddress: "tcp4://:8080", + }, + }, + { + name: "tcp6 is allowed", + plugin: &health.Health{ + ServiceAddress: "tcp6://:8080", + }, + }, + { + name: "http scheme", + plugin: &health.Health{ + ServiceAddress: "http://:8080", + }, + }, + { + name: "https scheme", + plugin: &health.Health{ + ServiceAddress: "https://:8080", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output := health.NewHealth() + output.ServiceAddress = tt.plugin.ServiceAddress + + err := output.Init() + if tt.err { + require.Error(t, err) + return + } + require.NoError(t, err) + }) + } +} diff --git a/plugins/outputs/http/README.md b/plugins/outputs/http/README.md index 5005e9f02..0229c0e6a 100644 --- a/plugins/outputs/http/README.md +++ b/plugins/outputs/http/README.md @@ -9,7 +9,7 @@ data formats. For data_formats that support batching, metrics are sent in batch # A plugin that can transmit metrics over HTTP [[outputs.http]] ## URL is the address to send metrics to - url = "http://127.0.0.1:8080/metric" + url = "http://127.0.0.1:8080/telegraf" ## Timeout for HTTP message # timeout = "5s" @@ -21,6 +21,12 @@ data formats. For data_formats that support batching, metrics are sent in batch # username = "username" # password = "pa$$word" + ## OAuth2 Client Credentials Grant + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # scopes = ["urn:opc:idm:__myscopes__"] + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -33,7 +39,11 @@ data formats. For data_formats that support batching, metrics are sent in batch ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" - + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + ## Additional HTTP headers # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format diff --git a/plugins/outputs/http/http.go b/plugins/outputs/http/http.go index 198aefe07..66fc0d5f0 100644 --- a/plugins/outputs/http/http.go +++ b/plugins/outputs/http/http.go @@ -2,7 +2,9 @@ package http import ( "bytes" + "context" "fmt" + "io" "io/ioutil" "net/http" "strings" @@ -13,11 +15,17 @@ import ( "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +const ( + defaultURL = "http://127.0.0.1:8080/telegraf" ) var sampleConfig = ` ## URL is the address to send metrics to - url = "http://127.0.0.1:8080/metric" + url = "http://127.0.0.1:8080/telegraf" ## Timeout for HTTP message # timeout = "5s" @@ -29,6 +37,12 @@ var sampleConfig = ` # username = "username" # password = "pa$$word" + ## OAuth2 Client Credentials Grant + # client_id = "clientid" + # client_secret = "secret" + # token_url = "https://indentityprovider/oauth2/v1/token" + # scopes = ["urn:opc:idm:__myscopes__"] + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -41,7 +55,11 @@ var sampleConfig = ` ## more about them here: ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md # data_format = "influx" - + + ## HTTP Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "identity" + ## Additional HTTP headers # [outputs.http.headers] # # Should be set manually to "application/json" for json data_format @@ -55,12 +73,17 @@ const ( ) type HTTP struct { - URL string `toml:"url"` - Timeout internal.Duration `toml:"timeout"` - Method string `toml:"method"` - Username string `toml:"username"` - Password string `toml:"password"` - Headers map[string]string `toml:"headers"` + URL string `toml:"url"` + Timeout internal.Duration `toml:"timeout"` + Method string `toml:"method"` + Username string `toml:"username"` + Password string `toml:"password"` + Headers map[string]string `toml:"headers"` + ClientID string `toml:"client_id"` + ClientSecret string `toml:"client_secret"` + TokenURL string `toml:"token_url"` + Scopes []string `toml:"scopes"` + ContentEncoding string `toml:"content_encoding"` tls.ClientConfig client *http.Client @@ -71,6 +94,34 @@ func (h *HTTP) SetSerializer(serializer serializers.Serializer) { h.serializer = serializer } +func (h *HTTP) createClient(ctx context.Context) (*http.Client, error) { + tlsCfg, err := h.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: h.Timeout.Duration, + } + + if h.ClientID != "" && h.ClientSecret != "" && h.TokenURL != "" { + oauthConfig := clientcredentials.Config{ + ClientID: h.ClientID, + ClientSecret: h.ClientSecret, + TokenURL: h.TokenURL, + Scopes: h.Scopes, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, client) + client = oauthConfig.Client(ctx) + } + + return client, nil +} + func (h *HTTP) Connect() error { if h.Method == "" { h.Method = http.MethodPost @@ -84,18 +135,13 @@ func (h *HTTP) Connect() error { h.Timeout.Duration = defaultClientTimeout } - tlsCfg, err := h.ClientConfig.TLSConfig() + ctx := context.Background() + client, err := h.createClient(ctx) if err != nil { return err } - h.client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsCfg, - Proxy: http.ProxyFromEnvironment, - }, - Timeout: h.Timeout.Duration, - } + h.client = client return nil } @@ -126,10 +172,36 @@ func (h *HTTP) Write(metrics []telegraf.Metric) error { } func (h *HTTP) write(reqBody []byte) error { - req, err := http.NewRequest(h.Method, h.URL, bytes.NewBuffer(reqBody)) + var reqBodyBuffer io.Reader = bytes.NewBuffer(reqBody) + var err error + if h.ContentEncoding == "gzip" { + rc, err := internal.CompressWithGzip(reqBodyBuffer) + if err != nil { + return err + } + defer rc.Close() + reqBodyBuffer = rc + } + + req, err := http.NewRequest(h.Method, h.URL, reqBodyBuffer) + if err != nil { + return err + } + + if h.Username != "" || h.Password != "" { + req.SetBasicAuth(h.Username, h.Password) + } + + req.Header.Set("User-Agent", internal.ProductToken()) req.Header.Set("Content-Type", defaultContentType) + if h.ContentEncoding == "gzip" { + req.Header.Set("Content-Encoding", "gzip") + } for k, v := range h.Headers { + if strings.ToLower(k) == "host" { + req.Host = v + } req.Header.Set(k, v) } @@ -152,6 +224,7 @@ func init() { return &HTTP{ Timeout: internal.Duration{Duration: defaultClientTimeout}, Method: defaultMethod, + URL: defaultURL, } }) } diff --git a/plugins/outputs/http/http_test.go b/plugins/outputs/http/http_test.go index 1d511d85b..abcf2db33 100644 --- a/plugins/outputs/http/http_test.go +++ b/plugins/outputs/http/http_test.go @@ -1,7 +1,9 @@ package http import ( + "compress/gzip" "fmt" + "io/ioutil" "net/http" "net/http/httptest" "net/url" @@ -9,6 +11,7 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/stretchr/testify/require" @@ -227,7 +230,7 @@ func TestContentType(t *testing.T) { } } -func TestBasicAuth(t *testing.T) { +func TestContentEncodingGzip(t *testing.T) { ts := httptest.NewServer(http.NotFoundHandler()) defer ts.Close() @@ -237,8 +240,66 @@ func TestBasicAuth(t *testing.T) { tests := []struct { name string plugin *HTTP - username string - password string + payload string + expected string + }{ + { + name: "default is no content encoding", + plugin: &HTTP{ + URL: u.String(), + }, + expected: "", + }, + { + name: "overwrite content_encoding", + plugin: &HTTP{ + URL: u.String(), + ContentEncoding: "gzip", + }, + expected: "gzip", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, tt.expected, r.Header.Get("Content-Encoding")) + + body := r.Body + var err error + if r.Header.Get("Content-Encoding") == "gzip" { + body, err = gzip.NewReader(r.Body) + require.NoError(t, err) + } + + payload, err := ioutil.ReadAll(body) + require.NoError(t, err) + require.Contains(t, string(payload), "cpu value=42") + + w.WriteHeader(http.StatusNoContent) + }) + + serializer := influx.NewSerializer() + tt.plugin.SetSerializer(serializer) + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + +func TestBasicAuth(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *HTTP }{ { name: "default", @@ -274,8 +335,8 @@ func TestBasicAuth(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { username, password, _ := r.BasicAuth() - require.Equal(t, tt.username, username) - require.Equal(t, tt.password, password) + require.Equal(t, tt.plugin.Username, username) + require.Equal(t, tt.plugin.Password, password) w.WriteHeader(http.StatusOK) }) @@ -289,3 +350,104 @@ func TestBasicAuth(t *testing.T) { }) } } + +type TestHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + +func TestOAuthClientCredentialsGrant(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + var token = "2YotnFZFEjr1zCsicMWpAA" + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + plugin *HTTP + tokenHandler TestHandlerFunc + handler TestHandlerFunc + }{ + { + name: "no credentials", + plugin: &HTTP{ + URL: u.String(), + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Len(t, r.Header["Authorization"], 0) + w.WriteHeader(http.StatusOK) + }, + }, + { + name: "success", + plugin: &HTTP{ + URL: u.String() + "/write", + ClientID: "howdy", + ClientSecret: "secret", + TokenURL: u.String() + "/token", + Scopes: []string{"urn:opc:idm:__myscopes__"}, + }, + tokenHandler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + values := url.Values{} + values.Add("access_token", token) + values.Add("token_type", "bearer") + values.Add("expires_in", "3600") + w.Write([]byte(values.Encode())) + }, + handler: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, []string{"Bearer " + token}, r.Header["Authorization"]) + w.WriteHeader(http.StatusOK) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + tt.handler(t, w, r) + case "/token": + tt.tokenHandler(t, w, r) + } + }) + + serializer := influx.NewSerializer() + tt.plugin.SetSerializer(serializer) + err = tt.plugin.Connect() + require.NoError(t, err) + + err = tt.plugin.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) + } +} + +func TestDefaultUserAgent(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + t.Run("default-user-agent", func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent")) + w.WriteHeader(http.StatusOK) + }) + + client := &HTTP{ + URL: u.String(), + Method: defaultMethod, + } + + serializer := influx.NewSerializer() + client.SetSerializer(serializer) + err = client.Connect() + require.NoError(t, err) + + err = client.Write([]telegraf.Metric{getMetric()}) + require.NoError(t, err) + }) +} diff --git a/plugins/outputs/influxdb/README.md b/plugins/outputs/influxdb/README.md index aed96e463..a53b7a0f2 100644 --- a/plugins/outputs/influxdb/README.md +++ b/plugins/outputs/influxdb/README.md @@ -1,6 +1,6 @@ -# InfluxDB Output Plugin +# InfluxDB v1.x Output Plugin -This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/influxdata/influxdb) HTTP or UDP service. +The InfluxDB output plugin writes metrics to the [InfluxDB v1.x] HTTP or UDP service. ### Configuration: @@ -16,8 +16,16 @@ This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/ # urls = ["http://127.0.0.1:8086"] ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. # database = "telegraf" + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the 'database_tag' will not be included in the written metric. + # exclude_database_tag = false + ## If true, no CREATE DATABASE queries will be sent. Set to true when using ## Telegraf with a user without permissions to create databases or when the ## database already exists. @@ -27,6 +35,13 @@ This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/ ## the default retention policy. Only takes effect when using HTTP. # retention_policy = "" + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be included in the written metric. + # exclude_retention_policy_tag = false + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". ## Only takes effect when using HTTP. # write_consistency = "any" @@ -42,7 +57,7 @@ This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/ # user_agent = "telegraf" ## UDP payload size is the maximum packet size to send. - # udp_payload = 512 + # udp_payload = "512B" ## Optional TLS Config for use on HTTP connections. # tls_ca = "/etc/telegraf/ca.pem" @@ -68,3 +83,5 @@ This InfluxDB output plugin writes metrics to the [InfluxDB](https://github.com/ ## existing data has been written. # influx_uint_support = false ``` + +[InfluxDB v1.x]: https://github.com/influxdata/influxdb diff --git a/plugins/outputs/influxdb/http.go b/plugins/outputs/influxdb/http.go index 164261feb..19ae6f31f 100644 --- a/plugins/outputs/influxdb/http.go +++ b/plugins/outputs/influxdb/http.go @@ -1,13 +1,12 @@ package influxdb import ( - "compress/gzip" "context" "crypto/tls" "encoding/json" "fmt" "io" - "log" + "io/ioutil" "net" "net/http" "net/url" @@ -16,21 +15,13 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/plugins/serializers/influx" ) -type APIErrorType int - const ( - _ APIErrorType = iota - DatabaseNotFound -) - -const ( - defaultRequestTimeout = time.Second * 5 - defaultDatabase = "telegraf" - defaultUserAgent = "telegraf" - + defaultRequestTimeout = time.Second * 5 + defaultDatabase = "telegraf" errStringDatabaseNotFound = "database not found" errStringHintedHandoffNotEmpty = "hinted handoff queue not empty" errStringPartialWrite = "partial write" @@ -39,7 +30,6 @@ const ( ) var ( - // Escape an identifier in InfluxQL. escapeIdentifier = strings.NewReplacer( "\n", `\n`, @@ -48,12 +38,11 @@ var ( ) ) -// APIError is an error reported by the InfluxDB server +// APIError is a general error reported by the InfluxDB server type APIError struct { StatusCode int Title string Description string - Type APIErrorType } func (e APIError) Error() string { @@ -63,6 +52,11 @@ func (e APIError) Error() string { return e.Title } +type DatabaseNotFoundError struct { + APIError + Database string +} + // QueryResponse is the response body from the /query endpoint type QueryResponse struct { Results []QueryResult `json:"results"` @@ -89,62 +83,65 @@ func (r WriteResponse) Error() string { } type HTTPConfig struct { - URL *url.URL - UserAgent string - Timeout time.Duration - Username string - Password string - TLSConfig *tls.Config - Proxy *url.URL - Headers map[string]string - ContentEncoding string - Database string - RetentionPolicy string - Consistency string + URL *url.URL + UserAgent string + Timeout time.Duration + Username string + Password string + TLSConfig *tls.Config + Proxy *url.URL + Headers map[string]string + ContentEncoding string + Database string + DatabaseTag string + ExcludeDatabaseTag bool + RetentionPolicy string + RetentionPolicyTag string + ExcludeRetentionPolicyTag bool + Consistency string + SkipDatabaseCreation bool InfluxUintSupport bool `toml:"influx_uint_support"` Serializer *influx.Serializer + Log telegraf.Logger } type httpClient struct { - WriteURL string - QueryURL string - ContentEncoding string - Timeout time.Duration - Username string - Password string - Headers map[string]string + client *http.Client + config HTTPConfig + // Tracks that the 'create database` statement was executed for the + // database. An attempt to create the database is made each time a new + // database is encountered in the database_tag and after a "database not + // found" error occurs. + createDatabaseExecuted map[string]bool - client *http.Client - serializer *influx.Serializer - url *url.URL - database string + log telegraf.Logger } -func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { +func NewHTTPClient(config HTTPConfig) (*httpClient, error) { if config.URL == nil { return nil, ErrMissingURL } - database := config.Database - if database == "" { - database = defaultDatabase + if config.Database == "" { + config.Database = defaultDatabase } - timeout := config.Timeout - if timeout == 0 { - timeout = defaultRequestTimeout + if config.Timeout == 0 { + config.Timeout = defaultRequestTimeout } userAgent := config.UserAgent if userAgent == "" { - userAgent = defaultUserAgent + userAgent = internal.ProductToken() } - var headers = make(map[string]string, len(config.Headers)+1) - headers["User-Agent"] = userAgent + if config.Headers == nil { + config.Headers = make(map[string]string) + } + config.Headers["User-Agent"] = userAgent for k, v := range config.Headers { - headers[k] = v + config.Headers[k] = v } var proxy func(*http.Request) (*url.URL, error) @@ -154,22 +151,8 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { proxy = http.ProxyFromEnvironment } - serializer := config.Serializer - if serializer == nil { - serializer = influx.NewSerializer() - } - - writeURL, err := makeWriteURL( - config.URL, - database, - config.RetentionPolicy, - config.Consistency) - if err != nil { - return nil, err - } - queryURL, err := makeQueryURL(config.URL) - if err != nil { - return nil, err + if config.Serializer == nil { + config.Serializer = influx.NewSerializer() } var transport *http.Transport @@ -194,45 +177,39 @@ func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { } client := &httpClient{ - serializer: serializer, client: &http.Client{ - Timeout: timeout, + Timeout: config.Timeout, Transport: transport, }, - database: database, - url: config.URL, - WriteURL: writeURL, - QueryURL: queryURL, - ContentEncoding: config.ContentEncoding, - Timeout: timeout, - Username: config.Username, - Password: config.Password, - Headers: headers, + createDatabaseExecuted: make(map[string]bool), + config: config, + log: config.Log, } return client, nil } // URL returns the origin URL that this client connects too. func (c *httpClient) URL() string { - return c.url.String() + return c.config.URL.String() } -// URL returns the database that this client connects too. +// Database returns the default database that this client connects too. func (c *httpClient) Database() string { - return c.database + return c.config.Database } -// CreateDatabase attemps to create a new database in the InfluxDB server. +// CreateDatabase attempts to create a new database in the InfluxDB server. // Note that some names are not allowed by the server, notably those with // non-printable characters or slashes. -func (c *httpClient) CreateDatabase(ctx context.Context) error { +func (c *httpClient) CreateDatabase(ctx context.Context, database string) error { query := fmt.Sprintf(`CREATE DATABASE "%s"`, - escapeIdentifier.Replace(c.database)) + escapeIdentifier.Replace(database)) req, err := c.makeQueryRequest(query) resp, err := c.client.Do(req.WithContext(ctx)) if err != nil { + internal.OnClientError(c.client, err) return err } defer resp.Body.Close() @@ -252,11 +229,19 @@ func (c *httpClient) CreateDatabase(ctx context.Context) error { } } - // Even with a 200 response there can be an error + // Even with a 200 status code there can be an error in the response body. + // If there is also no error string then the operation was successful. if resp.StatusCode == http.StatusOK && queryResp.Error() == "" { + c.createDatabaseExecuted[database] = true return nil } + // Don't attempt to recreate the database after a 403 Forbidden error. + // This behavior exists only to maintain backwards compatibility. + if resp.StatusCode == http.StatusForbidden { + c.createDatabaseExecuted[database] = true + } + return &APIError{ StatusCode: resp.StatusCode, Title: resp.Status, @@ -264,18 +249,88 @@ func (c *httpClient) CreateDatabase(ctx context.Context) error { } } +type dbrp struct { + Database string + RetentionPolicy string +} + // Write sends the metrics to InfluxDB func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error { - var err error + // If these options are not used, we can skip in plugin batching and send + // the full batch in a single request. + if c.config.DatabaseTag == "" && c.config.RetentionPolicyTag == "" { + return c.writeBatch(ctx, c.config.Database, c.config.RetentionPolicy, metrics) + } - reader := influx.NewReader(metrics, c.serializer) - req, err := c.makeWriteRequest(reader) + batches := make(map[dbrp][]telegraf.Metric) + for _, metric := range metrics { + db, ok := metric.GetTag(c.config.DatabaseTag) + if !ok { + db = c.config.Database + } + + rp, ok := metric.GetTag(c.config.RetentionPolicyTag) + if !ok { + rp = c.config.RetentionPolicy + } + + dbrp := dbrp{ + Database: db, + RetentionPolicy: rp, + } + + if c.config.ExcludeDatabaseTag || c.config.ExcludeRetentionPolicyTag { + // Avoid modifying the metric in case we need to retry the request. + metric = metric.Copy() + metric.Accept() + if c.config.ExcludeDatabaseTag { + metric.RemoveTag(c.config.DatabaseTag) + } + if c.config.ExcludeRetentionPolicyTag { + metric.RemoveTag(c.config.RetentionPolicyTag) + } + } + + batches[dbrp] = append(batches[dbrp], metric) + } + + for dbrp, batch := range batches { + if !c.config.SkipDatabaseCreation && !c.createDatabaseExecuted[dbrp.Database] { + err := c.CreateDatabase(ctx, dbrp.Database) + if err != nil { + c.log.Warnf("When writing to [%s]: database %q creation failed: %v", + c.config.URL, dbrp.Database, err) + } + } + + err := c.writeBatch(ctx, dbrp.Database, dbrp.RetentionPolicy, batch) + if err != nil { + return err + } + } + return nil +} + +func (c *httpClient) writeBatch(ctx context.Context, db, rp string, metrics []telegraf.Metric) error { + loc, err := makeWriteURL(c.config.URL, db, rp, c.config.Consistency) + if err != nil { + return err + } + + reader, err := c.requestBodyReader(metrics) + if err != nil { + return err + } + defer reader.Close() + + req, err := c.makeWriteRequest(loc, reader) if err != nil { return err } resp, err := c.client.Do(req.WithContext(ctx)) if err != nil { + internal.OnClientError(c.client, err) return err } defer resp.Body.Close() @@ -294,11 +349,13 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } if strings.Contains(desc, errStringDatabaseNotFound) { - return &APIError{ - StatusCode: resp.StatusCode, - Title: resp.Status, - Description: desc, - Type: DatabaseNotFound, + return &DatabaseNotFoundError{ + APIError: APIError{ + StatusCode: resp.StatusCode, + Title: resp.Status, + Description: desc, + }, + Database: db, } } @@ -312,7 +369,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error // discarded for being older than the retention policy. Usually this not // a cause for concern and we don't want to retry. if strings.Contains(desc, errStringPointsBeyondRP) { - log.Printf("W! [outputs.influxdb]: when writing to [%s]: received error %v", + c.log.Warnf("When writing to [%s]: received error %v", c.URL(), desc) return nil } @@ -321,7 +378,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error // correctable at this point and so the point is dropped instead of // retrying. if strings.Contains(desc, errStringPartialWrite) { - log.Printf("E! [outputs.influxdb]: when writing to [%s]: received error %v; discarding points", + c.log.Errorf("When writing to [%s]: received error %v; discarding points", c.URL(), desc) return nil } @@ -329,7 +386,7 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error // This error indicates a bug in either Telegraf line protocol // serialization, retries would not be successful. if strings.Contains(desc, errStringUnableToParse) { - log.Printf("E! [outputs.influxdb]: when writing to [%s]: received error %v; discarding points", + c.log.Errorf("When writing to [%s]: received error %v; discarding points", c.URL(), desc) return nil } @@ -342,11 +399,16 @@ func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error } func (c *httpClient) makeQueryRequest(query string) (*http.Request, error) { + queryURL, err := makeQueryURL(c.config.URL) + if err != nil { + return nil, err + } + params := url.Values{} params.Set("q", query) form := strings.NewReader(params.Encode()) - req, err := http.NewRequest("POST", c.QueryURL, form) + req, err := http.NewRequest("POST", queryURL, form) if err != nil { return nil, err } @@ -357,16 +419,10 @@ func (c *httpClient) makeQueryRequest(query string) (*http.Request, error) { return req, nil } -func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { +func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { var err error - if c.ContentEncoding == "gzip" { - body, err = compressWithGzip(body) - if err != nil { - return nil, err - } - } - req, err := http.NewRequest("POST", c.WriteURL, body) + req, err := http.NewRequest("POST", url, body) if err != nil { return nil, err } @@ -374,33 +430,36 @@ func (c *httpClient) makeWriteRequest(body io.Reader) (*http.Request, error) { req.Header.Set("Content-Type", "text/plain; charset=utf-8") c.addHeaders(req) - if c.ContentEncoding == "gzip" { + if c.config.ContentEncoding == "gzip" { req.Header.Set("Content-Encoding", "gzip") } return req, nil } -func compressWithGzip(data io.Reader) (io.Reader, error) { - pr, pw := io.Pipe() - gw := gzip.NewWriter(pw) - var err error +// requestBodyReader warp io.Reader from influx.NewReader to io.ReadCloser, which is usefully to fast close the write +// side of the connection in case of error +func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser, error) { + reader := influx.NewReader(metrics, c.config.Serializer) - go func() { - _, err = io.Copy(gw, data) - gw.Close() - pw.Close() - }() + if c.config.ContentEncoding == "gzip" { + rc, err := internal.CompressWithGzip(reader) + if err != nil { + return nil, err + } - return pr, err + return rc, nil + } + + return ioutil.NopCloser(reader), nil } func (c *httpClient) addHeaders(req *http.Request) { - if c.Username != "" || c.Password != "" { - req.SetBasicAuth(c.Username, c.Password) + if c.config.Username != "" || c.config.Password != "" { + req.SetBasicAuth(c.config.Username, c.config.Password) } - for header, value := range c.Headers { + for header, value := range c.config.Headers { req.Header.Set(header, value) } } @@ -446,3 +505,7 @@ func makeQueryURL(loc *url.URL) (string, error) { } return u.String(), nil } + +func (c *httpClient) Close() { + c.client.CloseIdleConnections() +} diff --git a/plugins/outputs/influxdb/http_test.go b/plugins/outputs/influxdb/http_test.go index 30cc1f8b6..1d030d36c 100644 --- a/plugins/outputs/influxdb/http_test.go +++ b/plugins/outputs/influxdb/http_test.go @@ -18,8 +18,10 @@ import ( "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/influxdb" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -32,14 +34,14 @@ func getHTTPURL() *url.URL { } func TestHTTP_EmptyConfig(t *testing.T) { - config := &influxdb.HTTPConfig{} + config := influxdb.HTTPConfig{} _, err := influxdb.NewHTTPClient(config) require.Error(t, err) require.Contains(t, err.Error(), influxdb.ErrMissingURL.Error()) } func TestHTTP_MinimalConfig(t *testing.T) { - config := &influxdb.HTTPConfig{ + config := influxdb.HTTPConfig{ URL: getHTTPURL(), } _, err := influxdb.NewHTTPClient(config) @@ -47,7 +49,7 @@ func TestHTTP_MinimalConfig(t *testing.T) { } func TestHTTP_UnsupportedScheme(t *testing.T) { - config := &influxdb.HTTPConfig{ + config := influxdb.HTTPConfig{ URL: &url.URL{ Scheme: "foo", Host: "localhost", @@ -68,14 +70,14 @@ func TestHTTP_CreateDatabase(t *testing.T) { tests := []struct { name string - config *influxdb.HTTPConfig + config influxdb.HTTPConfig database string queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) errFunc func(t *testing.T, err error) }{ { name: "success", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "xyzzy", }, @@ -87,7 +89,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "send basic auth", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Username: "guy", Password: "smiley", @@ -105,7 +107,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "send user agent", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Headers: map[string]string{ "A": "B", @@ -123,7 +125,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "send headers", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Headers: map[string]string{ "A": "B", @@ -140,7 +142,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "database default", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { @@ -151,7 +153,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "database name is escaped", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: `a " b`, }, @@ -163,7 +165,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "invalid database name creates api error", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: `a \\ b`, }, @@ -184,7 +186,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "error with no response body", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -202,7 +204,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { }, { name: "ok with no response body", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", }, @@ -229,7 +231,7 @@ func TestHTTP_CreateDatabase(t *testing.T) { client, err := influxdb.NewHTTPClient(tt.config) require.NoError(t, err) - err = client.CreateDatabase(ctx) + err = client.CreateDatabase(ctx, client.Database()) if tt.errFunc != nil { tt.errFunc(t, err) } else { @@ -248,16 +250,17 @@ func TestHTTP_Write(t *testing.T) { tests := []struct { name string - config *influxdb.HTTPConfig + config influxdb.HTTPConfig queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) errFunc func(t *testing.T, err error) logFunc func(t *testing.T, str string) }{ { name: "success", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.FormValue("db"), "telegraf") @@ -269,11 +272,12 @@ func TestHTTP_Write(t *testing.T) { }, { name: "send basic auth", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", Username: "guy", Password: "smiley", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { username, password, ok := r.BasicAuth() @@ -285,20 +289,34 @@ func TestHTTP_Write(t *testing.T) { }, { name: "send user agent", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", UserAgent: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.Header.Get("User-Agent"), "telegraf") w.WriteHeader(http.StatusNoContent) }, }, + { + name: "default user agent", + config: influxdb.HTTPConfig{ + URL: u, + Database: "telegraf", + Log: testutil.Logger{}, + }, + queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, internal.ProductToken(), r.Header.Get("User-Agent")) + w.WriteHeader(http.StatusNoContent) + }, + }, { name: "default database", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, "telegraf", r.FormValue("db")) @@ -307,12 +325,13 @@ func TestHTTP_Write(t *testing.T) { }, { name: "send headers", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Headers: map[string]string{ "A": "B", "C": "D", }, + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, r.Header.Get("A"), "B") @@ -322,10 +341,11 @@ func TestHTTP_Write(t *testing.T) { }, { name: "send retention policy", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", RetentionPolicy: "foo", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, "foo", r.FormValue("rp")) @@ -334,10 +354,11 @@ func TestHTTP_Write(t *testing.T) { }, { name: "send consistency", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", Consistency: "all", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, "all", r.FormValue("consistency")) @@ -346,9 +367,10 @@ func TestHTTP_Write(t *testing.T) { }, { name: "hinted handoff not empty no log no error", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) @@ -360,9 +382,10 @@ func TestHTTP_Write(t *testing.T) { }, { name: "partial write errors are logged no error", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) @@ -374,9 +397,10 @@ func TestHTTP_Write(t *testing.T) { }, { name: "parse errors are logged no error", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadRequest) @@ -388,9 +412,10 @@ func TestHTTP_Write(t *testing.T) { }, { name: "http error", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusBadGateway) @@ -405,9 +430,10 @@ func TestHTTP_Write(t *testing.T) { }, { name: "http error with desc", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusServiceUnavailable) @@ -506,14 +532,15 @@ func TestHTTP_WritePathPrefix(t *testing.T) { require.NoError(t, err) metrics := []telegraf.Metric{m} - config := &influxdb.HTTPConfig{ + config := influxdb.HTTPConfig{ URL: u, Database: "telegraf", + Log: testutil.Logger{}, } client, err := influxdb.NewHTTPClient(config) require.NoError(t, err) - err = client.CreateDatabase(ctx) + err = client.CreateDatabase(ctx, config.Database) require.NoError(t, err) err = client.Write(ctx, metrics) require.NoError(t, err) @@ -559,10 +586,11 @@ func TestHTTP_WriteContentEncodingGzip(t *testing.T) { require.NoError(t, err) metrics := []telegraf.Metric{m} - config := &influxdb.HTTPConfig{ + config := influxdb.HTTPConfig{ URL: u, Database: "telegraf", ContentEncoding: "gzip", + Log: testutil.Logger{}, } client, err := influxdb.NewHTTPClient(config) @@ -591,7 +619,7 @@ func TestHTTP_UnixSocket(t *testing.T) { tests := []struct { name string - config *influxdb.HTTPConfig + config influxdb.HTTPConfig database string queryHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) writeHandlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) @@ -599,9 +627,10 @@ func TestHTTP_UnixSocket(t *testing.T) { }{ { name: "success", - config: &influxdb.HTTPConfig{ + config: influxdb.HTTPConfig{ URL: &url.URL{Scheme: "unix", Path: sock}, Database: "xyzzy", + Log: testutil.Logger{}, }, queryHandlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { require.Equal(t, `CREATE DATABASE "xyzzy"`, r.FormValue("q")) @@ -635,7 +664,7 @@ func TestHTTP_UnixSocket(t *testing.T) { client, err := influxdb.NewHTTPClient(tt.config) require.NoError(t, err) - err = client.CreateDatabase(ctx) + err = client.CreateDatabase(ctx, tt.config.Database) if tt.errFunc != nil { tt.errFunc(t, err) } else { @@ -644,3 +673,465 @@ func TestHTTP_UnixSocket(t *testing.T) { }) } } + +func TestHTTP_WriteDatabaseTagWorksOnRetry(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + r.ParseForm() + require.Equal(t, r.Form["db"], []string{"foo"}) + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Contains(t, string(body), "cpu value=42") + + w.WriteHeader(http.StatusNoContent) + return + default: + w.WriteHeader(http.StatusNotFound) + return + } + }), + ) + defer ts.Close() + + addr := &url.URL{ + Scheme: "http", + Host: ts.Listener.Addr().String(), + } + + config := influxdb.HTTPConfig{ + URL: addr, + Database: "telegraf", + DatabaseTag: "database", + ExcludeDatabaseTag: true, + Log: testutil.Logger{}, + } + + client, err := influxdb.NewHTTPClient(config) + require.NoError(t, err) + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "database": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + } + + ctx := context.Background() + err = client.Write(ctx, metrics) + require.NoError(t, err) + err = client.Write(ctx, metrics) + require.NoError(t, err) +} + +func TestDBRPTags(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + tests := []struct { + name string + config influxdb.HTTPConfig + metrics []telegraf.Metric + handlerFunc func(t *testing.T, w http.ResponseWriter, r *http.Request) + url string + }{ + { + name: "defaults", + config: influxdb.HTTPConfig{ + URL: u, + Database: "telegraf", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "database": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "static retention policy", + config: influxdb.HTTPConfig{ + URL: u, + Database: "telegraf", + RetentionPolicy: "foo", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "foo") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "retention policy tag", + config: influxdb.HTTPConfig{ + URL: u, + SkipDatabaseCreation: true, + Database: "telegraf", + RetentionPolicyTag: "rp", + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "rp": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "foo") + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Contains(t, string(body), "cpu,rp=foo value=42") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "retention policy tag fallback to static rp", + config: influxdb.HTTPConfig{ + URL: u, + SkipDatabaseCreation: true, + Database: "telegraf", + RetentionPolicy: "foo", + RetentionPolicyTag: "rp", + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "foo") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "retention policy tag fallback to unset rp", + config: influxdb.HTTPConfig{ + URL: u, + SkipDatabaseCreation: true, + Database: "telegraf", + RetentionPolicyTag: "rp", + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "exclude retention policy tag", + config: influxdb.HTTPConfig{ + URL: u, + SkipDatabaseCreation: true, + Database: "telegraf", + RetentionPolicyTag: "rp", + ExcludeRetentionPolicyTag: true, + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "rp": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "foo") + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Contains(t, string(body), "cpu value=42") + w.WriteHeader(http.StatusNoContent) + }, + }, + { + name: "exclude database tag keeps retention policy tag", + config: influxdb.HTTPConfig{ + URL: u, + SkipDatabaseCreation: true, + Database: "telegraf", + RetentionPolicyTag: "rp", + ExcludeDatabaseTag: true, + Log: testutil.Logger{}, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "rp": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + handlerFunc: func(t *testing.T, w http.ResponseWriter, r *http.Request) { + require.Equal(t, r.FormValue("db"), "telegraf") + require.Equal(t, r.FormValue("rp"), "foo") + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Contains(t, string(body), "cpu,rp=foo value=42") + w.WriteHeader(http.StatusNoContent) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + tt.handlerFunc(t, w, r) + return + default: + w.WriteHeader(http.StatusNotFound) + return + } + }) + + client, err := influxdb.NewHTTPClient(tt.config) + require.NoError(t, err) + + ctx := context.Background() + err = client.Write(ctx, tt.metrics) + require.NoError(t, err) + }) + } +} + +type MockHandlerChain struct { + handlers []http.HandlerFunc +} + +func (h *MockHandlerChain) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if len(h.handlers) == 0 { + w.WriteHeader(http.StatusInternalServerError) + return + } + next, rest := h.handlers[0], h.handlers[1:] + h.handlers = rest + next(w, r) +} + +func (h *MockHandlerChain) Done() bool { + return len(h.handlers) == 0 +} + +func TestDBRPTagsCreateDatabaseNotCalledOnRetryAfterForbidden(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + handlers := &MockHandlerChain{ + handlers: []http.HandlerFunc{ + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/query": + if r.FormValue("q") != `CREATE DATABASE "telegraf"` { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`)) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNoContent) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNoContent) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + }, + } + ts.Config.Handler = handlers + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + } + + output := influxdb.InfluxDB{ + URL: u.String(), + Database: "telegraf", + DatabaseTag: "database", + Log: testutil.Logger{}, + CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { + return influxdb.NewHTTPClient(*config) + }, + } + err = output.Connect() + require.NoError(t, err) + err = output.Write(metrics) + require.NoError(t, err) + err = output.Write(metrics) + require.NoError(t, err) + + require.True(t, handlers.Done(), "all handlers not called") +} + +func TestDBRPTagsCreateDatabaseCalledOnDatabaseNotFound(t *testing.T) { + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + u, err := url.Parse(fmt.Sprintf("http://%s", ts.Listener.Addr().String())) + require.NoError(t, err) + + handlers := &MockHandlerChain{ + handlers: []http.HandlerFunc{ + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/query": + if r.FormValue("q") != `CREATE DATABASE "telegraf"` { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusForbidden) + w.Write([]byte(`{"results": [{"error": "error authorizing query"}]}`)) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNotFound) + w.Write([]byte(`{"error": "database not found: \"telegraf\""}`)) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/query": + if r.FormValue("q") != `CREATE DATABASE "telegraf"` { + w.WriteHeader(http.StatusInternalServerError) + return + } + w.WriteHeader(http.StatusForbidden) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/write": + w.WriteHeader(http.StatusNoContent) + default: + w.WriteHeader(http.StatusInternalServerError) + } + }, + }, + } + ts.Config.Handler = handlers + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + } + + output := influxdb.InfluxDB{ + URL: u.String(), + Database: "telegraf", + DatabaseTag: "database", + Log: testutil.Logger{}, + CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { + return influxdb.NewHTTPClient(*config) + }, + } + + err = output.Connect() + require.NoError(t, err) + err = output.Write(metrics) + require.Error(t, err) + err = output.Write(metrics) + require.NoError(t, err) + + require.True(t, handlers.Done(), "all handlers not called") +} diff --git a/plugins/outputs/influxdb/influxdb.go b/plugins/outputs/influxdb/influxdb.go index f80722bc3..1c4af5bca 100644 --- a/plugins/outputs/influxdb/influxdb.go +++ b/plugins/outputs/influxdb/influxdb.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log" "math/rand" "net/url" "time" @@ -24,29 +23,33 @@ var ( type Client interface { Write(context.Context, []telegraf.Metric) error - CreateDatabase(ctx context.Context) error - - URL() string + CreateDatabase(ctx context.Context, database string) error Database() string + URL() string + Close() } // InfluxDB struct is the primary data structure for the plugin type InfluxDB struct { - URL string // url deprecated in 0.1.9; use urls - URLs []string `toml:"urls"` - Username string - Password string - Database string - UserAgent string - RetentionPolicy string - WriteConsistency string - Timeout internal.Duration - UDPPayload int `toml:"udp_payload"` - HTTPProxy string `toml:"http_proxy"` - HTTPHeaders map[string]string `toml:"http_headers"` - ContentEncoding string `toml:"content_encoding"` - SkipDatabaseCreation bool `toml:"skip_database_creation"` - InfluxUintSupport bool `toml:"influx_uint_support"` + URL string // url deprecated in 0.1.9; use urls + URLs []string `toml:"urls"` + Username string `toml:"username"` + Password string `toml:"password"` + Database string `toml:"database"` + DatabaseTag string `toml:"database_tag"` + ExcludeDatabaseTag bool `toml:"exclude_database_tag"` + RetentionPolicy string `toml:"retention_policy"` + RetentionPolicyTag string `toml:"retention_policy_tag"` + ExcludeRetentionPolicyTag bool `toml:"exclude_retention_policy_tag"` + UserAgent string `toml:"user_agent"` + WriteConsistency string `toml:"write_consistency"` + Timeout internal.Duration `toml:"timeout"` + UDPPayload internal.Size `toml:"udp_payload"` + HTTPProxy string `toml:"http_proxy"` + HTTPHeaders map[string]string `toml:"http_headers"` + ContentEncoding string `toml:"content_encoding"` + SkipDatabaseCreation bool `toml:"skip_database_creation"` + InfluxUintSupport bool `toml:"influx_uint_support"` tls.ClientConfig Precision string // precision deprecated in 1.0; value is ignored @@ -56,7 +59,7 @@ type InfluxDB struct { CreateHTTPClientF func(config *HTTPConfig) (Client, error) CreateUDPClientF func(config *UDPConfig) (Client, error) - serializer *influx.Serializer + Log telegraf.Logger } var sampleConfig = ` @@ -69,8 +72,16 @@ var sampleConfig = ` # urls = ["http://127.0.0.1:8086"] ## The target database for metrics; will be created as needed. + ## For UDP url endpoint database needs to be configured on server side. # database = "telegraf" + ## The value of this tag will be used to determine the database. If this + ## tag is not set the 'database' option is used as the default. + # database_tag = "" + + ## If true, the 'database_tag' will not be included in the written metric. + # exclude_database_tag = false + ## If true, no CREATE DATABASE queries will be sent. Set to true when using ## Telegraf with a user without permissions to create databases or when the ## database already exists. @@ -80,6 +91,13 @@ var sampleConfig = ` ## the default retention policy. Only takes effect when using HTTP. # retention_policy = "" + ## The value of this tag will be used to determine the retention policy. If this + ## tag is not set the 'retention_policy' option is used as the default. + # retention_policy_tag = "" + + ## If true, the 'retention_policy_tag' will not be included in the written metric. + # exclude_retention_policy_tag = false + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". ## Only takes effect when using HTTP. # write_consistency = "any" @@ -95,7 +113,7 @@ var sampleConfig = ` # user_agent = "telegraf" ## UDP payload size is the maximum packet size to send. - # udp_payload = 512 + # udp_payload = "512B" ## Optional TLS Config for use on HTTP connections. # tls_ca = "/etc/telegraf/ca.pem" @@ -135,42 +153,37 @@ func (i *InfluxDB) Connect() error { urls = append(urls, defaultURL) } - i.serializer = influx.NewSerializer() - if i.InfluxUintSupport { - i.serializer.SetFieldTypeSupport(influx.UintSupport) - } - for _, u := range urls { - u, err := url.Parse(u) + parts, err := url.Parse(u) if err != nil { - return fmt.Errorf("error parsing url [%s]: %v", u, err) + return fmt.Errorf("error parsing url [%q]: %v", u, err) } var proxy *url.URL if len(i.HTTPProxy) > 0 { proxy, err = url.Parse(i.HTTPProxy) if err != nil { - return fmt.Errorf("error parsing proxy_url [%s]: %v", proxy, err) + return fmt.Errorf("error parsing proxy_url [%s]: %v", i.HTTPProxy, err) } } - switch u.Scheme { + switch parts.Scheme { case "udp", "udp4", "udp6": - c, err := i.udpClient(u) + c, err := i.udpClient(parts) if err != nil { return err } i.clients = append(i.clients, c) case "http", "https", "unix": - c, err := i.httpClient(ctx, u, proxy) + c, err := i.httpClient(ctx, parts, proxy) if err != nil { return err } i.clients = append(i.clients, c) default: - return fmt.Errorf("unsupported scheme [%s]: %q", u, u.Scheme) + return fmt.Errorf("unsupported scheme [%q]: %q", u, parts.Scheme) } } @@ -178,6 +191,9 @@ func (i *InfluxDB) Connect() error { } func (i *InfluxDB) Close() error { + for _, client := range i.clients { + client.Close() + } return nil } @@ -204,19 +220,17 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { } switch apiError := err.(type) { - case *APIError: + case *DatabaseNotFoundError: if !i.SkipDatabaseCreation { - if apiError.Type == DatabaseNotFound { - err := client.CreateDatabase(ctx) - if err != nil { - log.Printf("E! [outputs.influxdb] when writing to [%s]: database %q not found and failed to recreate", - client.URL(), client.Database()) - } + err := client.CreateDatabase(ctx, apiError.Database) + if err != nil { + i.Log.Errorf("When writing to [%s]: database %q not found and failed to recreate", + client.URL(), apiError.Database) } } } - log.Printf("E! [outputs.influxdb]: when writing to [%s]: %v", client.URL(), err) + i.Log.Errorf("When writing to [%s]: %v", client.URL(), err) } return errors.New("could not write any address") @@ -225,8 +239,9 @@ func (i *InfluxDB) Write(metrics []telegraf.Metric) error { func (i *InfluxDB) udpClient(url *url.URL) (Client, error) { config := &UDPConfig{ URL: url, - MaxPayloadSize: i.UDPPayload, - Serializer: i.serializer, + MaxPayloadSize: int(i.UDPPayload.Size), + Serializer: i.newSerializer(), + Log: i.Log, } c, err := i.CreateUDPClientF(config) @@ -244,19 +259,25 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) } config := &HTTPConfig{ - URL: url, - Timeout: i.Timeout.Duration, - TLSConfig: tlsConfig, - UserAgent: i.UserAgent, - Username: i.Username, - Password: i.Password, - Proxy: proxy, - ContentEncoding: i.ContentEncoding, - Headers: i.HTTPHeaders, - Database: i.Database, - RetentionPolicy: i.RetentionPolicy, - Consistency: i.WriteConsistency, - Serializer: i.serializer, + URL: url, + Timeout: i.Timeout.Duration, + TLSConfig: tlsConfig, + UserAgent: i.UserAgent, + Username: i.Username, + Password: i.Password, + Proxy: proxy, + ContentEncoding: i.ContentEncoding, + Headers: i.HTTPHeaders, + Database: i.Database, + DatabaseTag: i.DatabaseTag, + ExcludeDatabaseTag: i.ExcludeDatabaseTag, + SkipDatabaseCreation: i.SkipDatabaseCreation, + RetentionPolicy: i.RetentionPolicy, + RetentionPolicyTag: i.RetentionPolicyTag, + ExcludeRetentionPolicyTag: i.ExcludeRetentionPolicyTag, + Consistency: i.WriteConsistency, + Serializer: i.newSerializer(), + Log: i.Log, } c, err := i.CreateHTTPClientF(config) @@ -265,9 +286,9 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) } if !i.SkipDatabaseCreation { - err = c.CreateDatabase(ctx) + err = c.CreateDatabase(ctx, c.Database()) if err != nil { - log.Printf("W! [outputs.influxdb] when writing to [%s]: database %q creation failed: %v", + i.Log.Warnf("When writing to [%s]: database %q creation failed: %v", c.URL(), c.Database(), err) } } @@ -275,15 +296,24 @@ func (i *InfluxDB) httpClient(ctx context.Context, url *url.URL, proxy *url.URL) return c, nil } +func (i *InfluxDB) newSerializer() *influx.Serializer { + serializer := influx.NewSerializer() + if i.InfluxUintSupport { + serializer.SetFieldTypeSupport(influx.UintSupport) + } + + return serializer +} + func init() { outputs.Add("influxdb", func() telegraf.Output { return &InfluxDB{ Timeout: internal.Duration{Duration: time.Second * 5}, CreateHTTPClientF: func(config *HTTPConfig) (Client, error) { - return NewHTTPClient(config) + return NewHTTPClient(*config) }, CreateUDPClientF: func(config *UDPConfig) (Client, error) { - return NewUDPClient(config) + return NewUDPClient(*config) }, } }) diff --git a/plugins/outputs/influxdb/influxdb_test.go b/plugins/outputs/influxdb/influxdb_test.go index 3ec10989e..4b86de4de 100644 --- a/plugins/outputs/influxdb/influxdb_test.go +++ b/plugins/outputs/influxdb/influxdb_test.go @@ -11,30 +11,42 @@ import ( "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/influxdb" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) type MockClient struct { URLF func() string - DatabaseF func() string WriteF func(context.Context, []telegraf.Metric) error - CreateDatabaseF func(ctx context.Context) error + CreateDatabaseF func(ctx context.Context, database string) error + DatabaseF func() string + CloseF func() + + log telegraf.Logger } func (c *MockClient) URL() string { return c.URLF() } -func (c *MockClient) Database() string { - return c.DatabaseF() -} - func (c *MockClient) Write(ctx context.Context, metrics []telegraf.Metric) error { return c.WriteF(ctx, metrics) } -func (c *MockClient) CreateDatabase(ctx context.Context) error { - return c.CreateDatabaseF(ctx) +func (c *MockClient) CreateDatabase(ctx context.Context, database string) error { + return c.CreateDatabaseF(ctx, database) +} + +func (c *MockClient) Database() string { + return c.DatabaseF() +} + +func (c *MockClient) Close() { + c.CloseF() +} + +func (c *MockClient) SetLogger(log telegraf.Logger) { + c.log = log } func TestDeprecatedURLSupport(t *testing.T) { @@ -47,6 +59,9 @@ func TestDeprecatedURLSupport(t *testing.T) { return &MockClient{}, nil }, } + + output.Log = testutil.Logger{} + err := output.Connect() require.NoError(t, err) require.Equal(t, "udp://localhost:8089", actual.URL.String()) @@ -58,12 +73,18 @@ func TestDefaultURL(t *testing.T) { CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { actual = config return &MockClient{ - CreateDatabaseF: func(ctx context.Context) error { + DatabaseF: func() string { + return "telegraf" + }, + CreateDatabaseF: func(ctx context.Context, database string) error { return nil }, }, nil }, } + + output.Log = testutil.Logger{} + err := output.Connect() require.NoError(t, err) require.Equal(t, "http://localhost:8086", actual.URL.String()) @@ -74,13 +95,15 @@ func TestConnectUDPConfig(t *testing.T) { output := influxdb.InfluxDB{ URLs: []string{"udp://localhost:8089"}, - UDPPayload: 42, + UDPPayload: internal.Size{Size: 42}, CreateUDPClientF: func(config *influxdb.UDPConfig) (influxdb.Client, error) { actual = config return &MockClient{}, nil }, } + output.Log = testutil.Logger{} + err := output.Connect() require.NoError(t, err) @@ -113,12 +136,18 @@ func TestConnectHTTPConfig(t *testing.T) { CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { actual = config return &MockClient{ - CreateDatabaseF: func(ctx context.Context) error { + DatabaseF: func() string { + return "telegraf" + }, + CreateDatabaseF: func(ctx context.Context, database string) error { return nil }, }, nil }, } + + output.Log = testutil.Logger{} + err := output.Connect() require.NoError(t, err) @@ -142,28 +171,32 @@ func TestConnectHTTPConfig(t *testing.T) { func TestWriteRecreateDatabaseIfDatabaseNotFound(t *testing.T) { output := influxdb.InfluxDB{ URLs: []string{"http://localhost:8086"}, - CreateHTTPClientF: func(config *influxdb.HTTPConfig) (influxdb.Client, error) { return &MockClient{ - CreateDatabaseF: func(ctx context.Context) error { + DatabaseF: func() string { + return "telegraf" + }, + CreateDatabaseF: func(ctx context.Context, database string) error { return nil }, WriteF: func(ctx context.Context, metrics []telegraf.Metric) error { - return &influxdb.APIError{ - StatusCode: http.StatusNotFound, - Title: "404 Not Found", - Description: `database not found "telegraf"`, - Type: influxdb.DatabaseNotFound, + return &influxdb.DatabaseNotFoundError{ + APIError: influxdb.APIError{ + StatusCode: http.StatusNotFound, + Title: "404 Not Found", + Description: `database not found "telegraf"`, + }, } }, URLF: func() string { return "http://localhost:8086" - }, }, nil }, } + output.Log = testutil.Logger{} + err := output.Connect() require.NoError(t, err) diff --git a/plugins/outputs/influxdb/udp.go b/plugins/outputs/influxdb/udp.go index 5b3f5ce51..0add3c6c3 100644 --- a/plugins/outputs/influxdb/udp.go +++ b/plugins/outputs/influxdb/udp.go @@ -1,13 +1,14 @@ package influxdb import ( + "bufio" + "bytes" "context" "fmt" "net" "net/url" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/plugins/serializers/influx" ) @@ -28,11 +29,12 @@ type Conn interface { type UDPConfig struct { MaxPayloadSize int URL *url.URL - Serializer serializers.Serializer + Serializer *influx.Serializer Dialer Dialer + Log telegraf.Logger } -func NewUDPClient(config *UDPConfig) (*udpClient, error) { +func NewUDPClient(config UDPConfig) (*udpClient, error) { if config.URL == nil { return nil, ErrMissingURL } @@ -45,9 +47,9 @@ func NewUDPClient(config *UDPConfig) (*udpClient, error) { serializer := config.Serializer if serializer == nil { s := influx.NewSerializer() - s.SetMaxLineBytes(config.MaxPayloadSize) serializer = s } + serializer.SetMaxLineBytes(size) dialer := config.Dialer if dialer == nil { @@ -58,6 +60,7 @@ func NewUDPClient(config *UDPConfig) (*udpClient, error) { url: config.URL, serializer: serializer, dialer: dialer, + log: config.Log, } return client, nil } @@ -65,8 +68,9 @@ func NewUDPClient(config *UDPConfig) (*udpClient, error) { type udpClient struct { conn Conn dialer Dialer - serializer serializers.Serializer + serializer *influx.Serializer url *url.URL + log telegraf.Logger } func (c *udpClient) URL() string { @@ -89,10 +93,18 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error for _, metric := range metrics { octets, err := c.serializer.Serialize(metric) if err != nil { - return fmt.Errorf("could not serialize metric: %v", err) + // Since we are serializing multiple metrics, don't fail the + // entire batch just because of one unserializable metric. + c.log.Errorf("When writing to [%s] could not serialize metric: %v", + c.URL(), err) + continue } - _, err = c.conn.Write(octets) + scanner := bufio.NewScanner(bytes.NewReader(octets)) + scanner.Split(scanLines) + for scanner.Scan() { + _, err = c.conn.Write(scanner.Bytes()) + } if err != nil { c.conn.Close() c.conn = nil @@ -103,7 +115,7 @@ func (c *udpClient) Write(ctx context.Context, metrics []telegraf.Metric) error return nil } -func (c *udpClient) CreateDatabase(ctx context.Context) error { +func (c *udpClient) CreateDatabase(ctx context.Context, database string) error { return nil } @@ -114,3 +126,18 @@ type netDialer struct { func (d *netDialer) DialContext(ctx context.Context, network, address string) (Conn, error) { return d.Dialer.DialContext(ctx, network, address) } + +func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + if i := bytes.IndexByte(data, '\n'); i >= 0 { + // We have a full newline-terminated line. + return i + 1, data[0 : i+1], nil + + } + return 0, nil, nil +} + +func (c *udpClient) Close() { +} diff --git a/plugins/outputs/influxdb/udp_test.go b/plugins/outputs/influxdb/udp_test.go index 017ee0be9..2e60c586c 100644 --- a/plugins/outputs/influxdb/udp_test.go +++ b/plugins/outputs/influxdb/udp_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "log" "net" "net/url" "sync" @@ -13,7 +14,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/outputs/influxdb" - "github.com/influxdata/telegraf/plugins/serializers/influx" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -65,28 +66,15 @@ func (d *MockDialer) DialContext(ctx context.Context, network string, address st return d.DialContextF(network, address) } -type MockSerializer struct { - SerializeF func(metric telegraf.Metric) ([]byte, error) - SerializeBatchF func(metrics []telegraf.Metric) ([]byte, error) -} - -func (s *MockSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { - return s.SerializeF(metric) -} - -func (s *MockSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { - return s.SerializeBatchF(metrics) -} - func TestUDP_NewUDPClientNoURL(t *testing.T) { - config := &influxdb.UDPConfig{} + config := influxdb.UDPConfig{} _, err := influxdb.NewUDPClient(config) require.Equal(t, err, influxdb.ErrMissingURL) } func TestUDP_URL(t *testing.T) { u := getURL() - config := &influxdb.UDPConfig{ + config := influxdb.UDPConfig{ URL: u, } @@ -99,7 +87,7 @@ func TestUDP_URL(t *testing.T) { func TestUDP_Simple(t *testing.T) { var buffer bytes.Buffer - config := &influxdb.UDPConfig{ + config := influxdb.UDPConfig{ URL: getURL(), Dialer: &MockDialer{ DialContextF: func(network, address string) (influxdb.Conn, error) { @@ -130,7 +118,7 @@ func TestUDP_DialError(t *testing.T) { u, err := url.Parse("invalid://127.0.0.1:9999") require.NoError(t, err) - config := &influxdb.UDPConfig{ + config := influxdb.UDPConfig{ URL: u, Dialer: &MockDialer{ DialContextF: func(network, address string) (influxdb.Conn, error) { @@ -150,7 +138,7 @@ func TestUDP_DialError(t *testing.T) { func TestUDP_WriteError(t *testing.T) { closed := false - config := &influxdb.UDPConfig{ + config := influxdb.UDPConfig{ URL: getURL(), Dialer: &MockDialer{ DialContextF: func(network, address string) (influxdb.Conn, error) { @@ -177,32 +165,75 @@ func TestUDP_WriteError(t *testing.T) { require.True(t, closed) } -func TestUDP_SerializeError(t *testing.T) { - config := &influxdb.UDPConfig{ - URL: getURL(), - Dialer: &MockDialer{ - DialContextF: func(network, address string) (influxdb.Conn, error) { - conn := &MockConn{} - return conn, nil +func TestUDP_ErrorLogging(t *testing.T) { + tests := []struct { + name string + config influxdb.UDPConfig + metrics []telegraf.Metric + logContains string + }{ + { + name: "logs need more space", + config: influxdb.UDPConfig{ + MaxPayloadSize: 1, + URL: getURL(), + Dialer: &MockDialer{ + DialContextF: func(network, address string) (influxdb.Conn, error) { + conn := &MockConn{} + return conn, nil + }, + }, + Log: testutil.Logger{}, }, + metrics: []telegraf.Metric{getMetric()}, + logContains: `could not serialize metric: "cpu": need more space`, }, - Serializer: &MockSerializer{ - SerializeF: func(metric telegraf.Metric) ([]byte, error) { - return nil, influx.ErrNeedMoreSpace + { + name: "logs series name", + config: influxdb.UDPConfig{ + URL: getURL(), + Dialer: &MockDialer{ + DialContextF: func(network, address string) (influxdb.Conn, error) { + conn := &MockConn{} + return conn, nil + }, + }, + Log: testutil.Logger{}, }, + metrics: []telegraf.Metric{ + func() telegraf.Metric { + metric, _ := metric.New( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ) + return metric + }(), + }, + logContains: `could not serialize metric: "cpu,host=example.org": no serializable fields`, }, } - client, err := influxdb.NewUDPClient(config) - require.NoError(t, err) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var b bytes.Buffer + log.SetOutput(&b) - ctx := context.Background() - err = client.Write(ctx, []telegraf.Metric{getMetric()}) - require.Error(t, err) - require.Contains(t, err.Error(), influx.ErrNeedMoreSpace.Error()) + client, err := influxdb.NewUDPClient(tt.config) + require.NoError(t, err) + + ctx := context.Background() + err = client.Write(ctx, tt.metrics) + require.NoError(t, err) + require.Contains(t, b.String(), tt.logContains) + }) + } } func TestUDP_WriteWithRealConn(t *testing.T) { - conn, err := net.ListenPacket("udp", "127.0.0.0:0") + conn, err := net.ListenPacket("udp", "127.0.0.1:0") require.NoError(t, err) metrics := []telegraf.Metric{ @@ -216,7 +247,7 @@ func TestUDP_WriteWithRealConn(t *testing.T) { go func() { defer wg.Done() var total int - for _, _ = range metrics { + for range metrics { n, _, err := conn.ReadFrom(buf[total:]) if err != nil { break @@ -230,7 +261,7 @@ func TestUDP_WriteWithRealConn(t *testing.T) { u, err := url.Parse(fmt.Sprintf("%s://%s", addr.Network(), addr)) require.NoError(t, err) - config := &influxdb.UDPConfig{ + config := influxdb.UDPConfig{ URL: u, } client, err := influxdb.NewUDPClient(config) diff --git a/plugins/outputs/influxdb_v2/README.md b/plugins/outputs/influxdb_v2/README.md new file mode 100644 index 000000000..49c080f33 --- /dev/null +++ b/plugins/outputs/influxdb_v2/README.md @@ -0,0 +1,61 @@ +# InfluxDB v2.x Output Plugin + +The InfluxDB output plugin writes metrics to the [InfluxDB v2.x] HTTP service. + +### Configuration: + +```toml +# Configuration for sending metrics to InfluxDB 2.0 +[[outputs.influxdb_v2]] + ## The URLs of the InfluxDB cluster nodes. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] + urls = ["http://127.0.0.1:9999"] + + ## Token for authentication. + token = "" + + ## Organization is the name of the organization you wish to write to. + organization = "" + + ## Destination bucket to write into. + bucket = "" + + ## The value of this tag will be used to determine the bucket. If this + ## tag is not set the 'bucket' option is used as the default. + # bucket_tag = "" + + ## If true, the bucket tag will not be added to the metric. + # exclude_bucket_tag = false + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## Enable or disable uint support for writing uints influxdb 2.0. + # influx_uint_support = false + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +[InfluxDB v2.x]: https://github.com/influxdata/influxdb diff --git a/plugins/outputs/influxdb_v2/http.go b/plugins/outputs/influxdb_v2/http.go new file mode 100644 index 000000000..2a32c5f4c --- /dev/null +++ b/plugins/outputs/influxdb_v2/http.go @@ -0,0 +1,352 @@ +package influxdb_v2 + +import ( + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "path" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/serializers/influx" +) + +type APIError struct { + StatusCode int + Title string + Description string +} + +func (e APIError) Error() string { + if e.Description != "" { + return fmt.Sprintf("%s: %s", e.Title, e.Description) + } + return e.Title +} + +const ( + defaultRequestTimeout = time.Second * 5 + defaultMaxWait = 10 // seconds + defaultDatabase = "telegraf" +) + +type HTTPConfig struct { + URL *url.URL + Token string + Organization string + Bucket string + BucketTag string + ExcludeBucketTag bool + Timeout time.Duration + Headers map[string]string + Proxy *url.URL + UserAgent string + ContentEncoding string + TLSConfig *tls.Config + + Serializer *influx.Serializer +} + +type httpClient struct { + ContentEncoding string + Timeout time.Duration + Headers map[string]string + Organization string + Bucket string + BucketTag string + ExcludeBucketTag bool + + client *http.Client + serializer *influx.Serializer + url *url.URL + retryTime time.Time +} + +func NewHTTPClient(config *HTTPConfig) (*httpClient, error) { + if config.URL == nil { + return nil, ErrMissingURL + } + + timeout := config.Timeout + if timeout == 0 { + timeout = defaultRequestTimeout + } + + userAgent := config.UserAgent + if userAgent == "" { + userAgent = internal.ProductToken() + } + + var headers = make(map[string]string, len(config.Headers)+2) + headers["User-Agent"] = userAgent + headers["Authorization"] = "Token " + config.Token + for k, v := range config.Headers { + headers[k] = v + } + + var proxy func(*http.Request) (*url.URL, error) + if config.Proxy != nil { + proxy = http.ProxyURL(config.Proxy) + } else { + proxy = http.ProxyFromEnvironment + } + + serializer := config.Serializer + if serializer == nil { + serializer = influx.NewSerializer() + } + + var transport *http.Transport + switch config.URL.Scheme { + case "http", "https": + transport = &http.Transport{ + Proxy: proxy, + TLSClientConfig: config.TLSConfig, + } + case "unix": + transport = &http.Transport{ + Dial: func(_, _ string) (net.Conn, error) { + return net.DialTimeout( + config.URL.Scheme, + config.URL.Path, + timeout, + ) + }, + } + default: + return nil, fmt.Errorf("unsupported scheme %q", config.URL.Scheme) + } + + client := &httpClient{ + serializer: serializer, + client: &http.Client{ + Timeout: timeout, + Transport: transport, + }, + url: config.URL, + ContentEncoding: config.ContentEncoding, + Timeout: timeout, + Headers: headers, + Organization: config.Organization, + Bucket: config.Bucket, + BucketTag: config.BucketTag, + ExcludeBucketTag: config.ExcludeBucketTag, + } + return client, nil +} + +// URL returns the origin URL that this client connects too. +func (c *httpClient) URL() string { + return c.url.String() +} + +type genericRespError struct { + Code string + Message string + Line *int32 + MaxLength *int32 +} + +func (g genericRespError) Error() string { + errString := fmt.Sprintf("%s: %s", g.Code, g.Message) + if g.Line != nil { + return fmt.Sprintf("%s - line[%d]", errString, g.Line) + } else if g.MaxLength != nil { + return fmt.Sprintf("%s - maxlen[%d]", errString, g.MaxLength) + } + return errString +} + +func (c *httpClient) Write(ctx context.Context, metrics []telegraf.Metric) error { + if c.retryTime.After(time.Now()) { + return errors.New("Retry time has not elapsed") + } + + batches := make(map[string][]telegraf.Metric) + if c.BucketTag == "" { + err := c.writeBatch(ctx, c.Bucket, metrics) + if err != nil { + return err + } + } else { + for _, metric := range metrics { + bucket, ok := metric.GetTag(c.BucketTag) + if !ok { + bucket = c.Bucket + } + + if _, ok := batches[bucket]; !ok { + batches[bucket] = make([]telegraf.Metric, 0) + } + + if c.ExcludeBucketTag { + // Avoid modifying the metric in case we need to retry the request. + metric = metric.Copy() + metric.Accept() + metric.RemoveTag(c.BucketTag) + } + + batches[bucket] = append(batches[bucket], metric) + } + + for bucket, batch := range batches { + err := c.writeBatch(ctx, bucket, batch) + if err != nil { + return err + } + } + } + return nil +} + +func (c *httpClient) writeBatch(ctx context.Context, bucket string, metrics []telegraf.Metric) error { + loc, err := makeWriteURL(*c.url, c.Organization, bucket) + if err != nil { + return err + } + + reader, err := c.requestBodyReader(metrics) + if err != nil { + return err + } + defer reader.Close() + + req, err := c.makeWriteRequest(loc, reader) + if err != nil { + return err + } + + resp, err := c.client.Do(req.WithContext(ctx)) + if err != nil { + internal.OnClientError(c.client, err) + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNoContent { + return nil + } + + writeResp := &genericRespError{} + err = json.NewDecoder(resp.Body).Decode(writeResp) + desc := writeResp.Error() + if err != nil { + desc = resp.Status + } + + switch resp.StatusCode { + case http.StatusBadRequest, http.StatusRequestEntityTooLarge: + log.Printf("E! [outputs.influxdb_v2] Failed to write metric: %s\n", desc) + return nil + case http.StatusUnauthorized, http.StatusForbidden: + return fmt.Errorf("failed to write metric: %s", desc) + case http.StatusTooManyRequests: + retryAfter := resp.Header.Get("Retry-After") + retry, err := strconv.Atoi(retryAfter) + if err != nil { + return errors.New("rate limit exceeded") + } + if retry > defaultMaxWait { + retry = defaultMaxWait + } + c.retryTime = time.Now().Add(time.Duration(retry) * time.Second) + return fmt.Errorf("waiting %ds for server before sending metric again", retry) + case http.StatusServiceUnavailable: + retryAfter := resp.Header.Get("Retry-After") + retry, err := strconv.Atoi(retryAfter) + if err != nil { + return errors.New("server responded: service unavailable") + } + if retry > defaultMaxWait { + retry = defaultMaxWait + } + c.retryTime = time.Now().Add(time.Duration(retry) * time.Second) + return fmt.Errorf("waiting %ds for server before sending metric again", retry) + } + + // This is only until platform spec is fully implemented. As of the + // time of writing, there is no error body returned. + if xErr := resp.Header.Get("X-Influx-Error"); xErr != "" { + desc = fmt.Sprintf("%s; %s", desc, xErr) + } + + return &APIError{ + StatusCode: resp.StatusCode, + Title: resp.Status, + Description: desc, + } +} + +func (c *httpClient) makeWriteRequest(url string, body io.Reader) (*http.Request, error) { + var err error + + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "text/plain; charset=utf-8") + c.addHeaders(req) + + if c.ContentEncoding == "gzip" { + req.Header.Set("Content-Encoding", "gzip") + } + + return req, nil +} + +// requestBodyReader warp io.Reader from influx.NewReader to io.ReadCloser, which is usefully to fast close the write +// side of the connection in case of error +func (c *httpClient) requestBodyReader(metrics []telegraf.Metric) (io.ReadCloser, error) { + reader := influx.NewReader(metrics, c.serializer) + + if c.ContentEncoding == "gzip" { + rc, err := internal.CompressWithGzip(reader) + if err != nil { + return nil, err + } + + return rc, nil + } + + return ioutil.NopCloser(reader), nil +} + +func (c *httpClient) addHeaders(req *http.Request) { + for header, value := range c.Headers { + req.Header.Set(header, value) + } +} + +func makeWriteURL(loc url.URL, org, bucket string) (string, error) { + params := url.Values{} + params.Set("bucket", bucket) + params.Set("org", org) + + switch loc.Scheme { + case "unix": + loc.Scheme = "http" + loc.Host = "127.0.0.1" + loc.Path = "/api/v2/write" + case "http", "https": + loc.Path = path.Join(loc.Path, "/api/v2/write") + default: + return "", fmt.Errorf("unsupported scheme: %q", loc.Scheme) + } + loc.RawQuery = params.Encode() + return loc.String(), nil +} + +func (c *httpClient) Close() { + c.client.CloseIdleConnections() +} diff --git a/plugins/outputs/influxdb_v2/http_internal_test.go b/plugins/outputs/influxdb_v2/http_internal_test.go new file mode 100644 index 000000000..e9685da12 --- /dev/null +++ b/plugins/outputs/influxdb_v2/http_internal_test.go @@ -0,0 +1,47 @@ +package influxdb_v2 + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +func genURL(u string) *url.URL { + URL, _ := url.Parse(u) + return URL +} + +func TestMakeWriteURL(t *testing.T) { + tests := []struct { + err bool + url *url.URL + act string + }{ + { + url: genURL("http://localhost:9999"), + act: "http://localhost:9999/api/v2/write?bucket=telegraf&org=influx", + }, + { + url: genURL("unix://var/run/influxd.sock"), + act: "http://127.0.0.1/api/v2/write?bucket=telegraf&org=influx", + }, + { + err: true, + url: genURL("udp://localhost:9999"), + }, + } + + for i := range tests { + rURL, err := makeWriteURL(*tests[i].url, "influx", "telegraf") + if !tests[i].err { + require.NoError(t, err) + } else { + require.Error(t, err) + t.Log(err) + } + if err == nil { + require.Equal(t, tests[i].act, rURL) + } + } +} diff --git a/plugins/outputs/influxdb_v2/http_test.go b/plugins/outputs/influxdb_v2/http_test.go new file mode 100644 index 000000000..23c3ff05e --- /dev/null +++ b/plugins/outputs/influxdb_v2/http_test.go @@ -0,0 +1,113 @@ +package influxdb_v2_test + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/influxdata/telegraf" + influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func genURL(u string) *url.URL { + URL, _ := url.Parse(u) + return URL +} +func TestNewHTTPClient(t *testing.T) { + tests := []struct { + err bool + cfg *influxdb.HTTPConfig + }{ + { + err: true, + cfg: &influxdb.HTTPConfig{}, + }, + { + err: true, + cfg: &influxdb.HTTPConfig{ + URL: genURL("udp://localhost:9999"), + }, + }, + { + cfg: &influxdb.HTTPConfig{ + URL: genURL("unix://var/run/influxd.sock"), + }, + }, + } + + for i := range tests { + client, err := influxdb.NewHTTPClient(tests[i].cfg) + if !tests[i].err { + require.NoError(t, err) + } else { + require.Error(t, err) + t.Log(err) + } + if err == nil { + client.URL() + } + } +} + +func TestWriteBucketTagWorksOnRetry(t *testing.T) { + ts := httptest.NewServer( + http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/api/v2/write": + r.ParseForm() + require.Equal(t, r.Form["bucket"], []string{"foo"}) + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + require.Contains(t, string(body), "cpu value=42") + + w.WriteHeader(http.StatusNoContent) + return + default: + w.WriteHeader(http.StatusNotFound) + return + } + }), + ) + defer ts.Close() + + addr := &url.URL{ + Scheme: "http", + Host: ts.Listener.Addr().String(), + } + + config := &influxdb.HTTPConfig{ + URL: addr, + Bucket: "telegraf", + BucketTag: "bucket", + ExcludeBucketTag: true, + } + + client, err := influxdb.NewHTTPClient(config) + require.NoError(t, err) + + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "bucket": "foo", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + } + + ctx := context.Background() + err = client.Write(ctx, metrics) + require.NoError(t, err) + err = client.Write(ctx, metrics) + require.NoError(t, err) +} diff --git a/plugins/outputs/influxdb_v2/influxdb.go b/plugins/outputs/influxdb_v2/influxdb.go new file mode 100644 index 000000000..4e2314691 --- /dev/null +++ b/plugins/outputs/influxdb_v2/influxdb.go @@ -0,0 +1,220 @@ +package influxdb_v2 + +import ( + "context" + "errors" + "fmt" + "log" + "math/rand" + "net/url" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers/influx" +) + +var ( + defaultURL = "http://localhost:9999" + + ErrMissingURL = errors.New("missing URL") +) + +var sampleConfig = ` + ## The URLs of the InfluxDB cluster nodes. + ## + ## Multiple URLs can be specified for a single cluster, only ONE of the + ## urls will be written to each interval. + ## ex: urls = ["https://us-west-2-1.aws.cloud2.influxdata.com"] + urls = ["http://127.0.0.1:9999"] + + ## Token for authentication. + token = "" + + ## Organization is the name of the organization you wish to write to; must exist. + organization = "" + + ## Destination bucket to write into. + bucket = "" + + ## The value of this tag will be used to determine the bucket. If this + ## tag is not set the 'bucket' option is used as the default. + # bucket_tag = "" + + ## If true, the bucket tag will not be added to the metric. + # exclude_bucket_tag = false + + ## Timeout for HTTP messages. + # timeout = "5s" + + ## Additional HTTP headers + # http_headers = {"X-Special-Header" = "Special-Value"} + + ## HTTP Proxy override, if unset values the standard proxy environment + ## variables are consulted to determine which proxy, if any, should be used. + # http_proxy = "http://corporate.proxy:3128" + + ## HTTP User-Agent + # user_agent = "telegraf" + + ## Content-Encoding for write request body, can be set to "gzip" to + ## compress body or "identity" to apply no encoding. + # content_encoding = "gzip" + + ## Enable or disable uint support for writing uints influxdb 2.0. + # influx_uint_support = false + + ## Optional TLS Config for use on HTTP connections. + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +type Client interface { + Write(context.Context, []telegraf.Metric) error + + URL() string // for logging + Close() +} + +type InfluxDB struct { + URLs []string `toml:"urls"` + Token string `toml:"token"` + Organization string `toml:"organization"` + Bucket string `toml:"bucket"` + BucketTag string `toml:"bucket_tag"` + ExcludeBucketTag bool `toml:"exclude_bucket_tag"` + Timeout internal.Duration `toml:"timeout"` + HTTPHeaders map[string]string `toml:"http_headers"` + HTTPProxy string `toml:"http_proxy"` + UserAgent string `toml:"user_agent"` + ContentEncoding string `toml:"content_encoding"` + UintSupport bool `toml:"influx_uint_support"` + tls.ClientConfig + + clients []Client +} + +func (i *InfluxDB) Connect() error { + ctx := context.Background() + + if len(i.URLs) == 0 { + i.URLs = append(i.URLs, defaultURL) + } + + for _, u := range i.URLs { + parts, err := url.Parse(u) + if err != nil { + return fmt.Errorf("error parsing url [%q]: %v", u, err) + } + + var proxy *url.URL + if len(i.HTTPProxy) > 0 { + proxy, err = url.Parse(i.HTTPProxy) + if err != nil { + return fmt.Errorf("error parsing proxy_url [%s]: %v", i.HTTPProxy, err) + } + } + + switch parts.Scheme { + case "http", "https", "unix": + c, err := i.getHTTPClient(ctx, parts, proxy) + if err != nil { + return err + } + + i.clients = append(i.clients, c) + default: + return fmt.Errorf("unsupported scheme [%q]: %q", u, parts.Scheme) + } + } + + return nil +} + +func (i *InfluxDB) Close() error { + for _, client := range i.clients { + client.Close() + } + return nil +} + +func (i *InfluxDB) Description() string { + return "Configuration for sending metrics to InfluxDB" +} + +func (i *InfluxDB) SampleConfig() string { + return sampleConfig +} + +// Write sends metrics to one of the configured servers, logging each +// unsuccessful. If all servers fail, return an error. +func (i *InfluxDB) Write(metrics []telegraf.Metric) error { + ctx := context.Background() + + var err error + p := rand.Perm(len(i.clients)) + for _, n := range p { + client := i.clients[n] + err = client.Write(ctx, metrics) + if err == nil { + return nil + } + + log.Printf("E! [outputs.influxdb_v2] when writing to [%s]: %v", client.URL(), err) + } + + return err +} + +func (i *InfluxDB) getHTTPClient(ctx context.Context, url *url.URL, proxy *url.URL) (Client, error) { + tlsConfig, err := i.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + config := &HTTPConfig{ + URL: url, + Token: i.Token, + Organization: i.Organization, + Bucket: i.Bucket, + BucketTag: i.BucketTag, + ExcludeBucketTag: i.ExcludeBucketTag, + Timeout: i.Timeout.Duration, + Headers: i.HTTPHeaders, + Proxy: proxy, + UserAgent: i.UserAgent, + ContentEncoding: i.ContentEncoding, + TLSConfig: tlsConfig, + Serializer: i.newSerializer(), + } + + c, err := NewHTTPClient(config) + if err != nil { + return nil, fmt.Errorf("error creating HTTP client [%s]: %v", url, err) + } + + return c, nil +} + +func (i *InfluxDB) newSerializer() *influx.Serializer { + serializer := influx.NewSerializer() + if i.UintSupport { + serializer.SetFieldTypeSupport(influx.UintSupport) + } + + return serializer +} + +func init() { + outputs.Add("influxdb_v2", func() telegraf.Output { + return &InfluxDB{ + Timeout: internal.Duration{Duration: time.Second * 5}, + ContentEncoding: "gzip", + } + }) +} diff --git a/plugins/outputs/influxdb_v2/influxdb_test.go b/plugins/outputs/influxdb_v2/influxdb_test.go new file mode 100644 index 000000000..3702b4309 --- /dev/null +++ b/plugins/outputs/influxdb_v2/influxdb_test.go @@ -0,0 +1,103 @@ +package influxdb_v2_test + +import ( + "testing" + + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/outputs" + influxdb "github.com/influxdata/telegraf/plugins/outputs/influxdb_v2" + "github.com/stretchr/testify/require" +) + +func TestDefaultURL(t *testing.T) { + output := influxdb.InfluxDB{} + err := output.Connect() + require.NoError(t, err) + if len(output.URLs) < 1 { + t.Fatal("Default URL failed to get set") + } + require.Equal(t, "http://localhost:9999", output.URLs[0]) +} +func TestConnect(t *testing.T) { + tests := []struct { + err bool + out influxdb.InfluxDB + }{ + { + out: influxdb.InfluxDB{ + URLs: []string{"http://localhost:1234"}, + HTTPProxy: "http://localhost:9999", + HTTPHeaders: map[string]string{ + "x": "y", + }, + }, + }, + { + err: true, + out: influxdb.InfluxDB{ + URLs: []string{"!@#$qwert"}, + HTTPProxy: "http://localhost:9999", + HTTPHeaders: map[string]string{ + "x": "y", + }, + }, + }, + { + err: true, + out: influxdb.InfluxDB{ + URLs: []string{"http://localhost:1234"}, + HTTPProxy: "!@#$%^&*()_+", + HTTPHeaders: map[string]string{ + "x": "y", + }, + }, + }, + { + err: true, + out: influxdb.InfluxDB{ + URLs: []string{"!@#$%^&*()_+"}, + HTTPProxy: "http://localhost:9999", + HTTPHeaders: map[string]string{ + "x": "y", + }, + }, + }, + { + err: true, + out: influxdb.InfluxDB{ + URLs: []string{":::@#$qwert"}, + HTTPProxy: "http://localhost:9999", + HTTPHeaders: map[string]string{ + "x": "y", + }, + }, + }, + { + err: true, + out: influxdb.InfluxDB{ + URLs: []string{"https://localhost:8080"}, + ClientConfig: tls.ClientConfig{ + TLSCA: "thing", + }, + }, + }, + } + + for i := range tests { + err := tests[i].out.Connect() + if !tests[i].err { + require.NoError(t, err) + } else { + require.Error(t, err) + t.Log(err) + } + } +} + +func TestUnused(t *testing.T) { + thing := influxdb.InfluxDB{} + thing.Close() + thing.Description() + thing.SampleConfig() + outputs.Outputs["influxdb_v2"]() +} diff --git a/plugins/outputs/instrumental/README.md b/plugins/outputs/instrumental/README.md index 128599ee8..f8b48fd1e 100644 --- a/plugins/outputs/instrumental/README.md +++ b/plugins/outputs/instrumental/README.md @@ -20,6 +20,6 @@ by whitespace. The `increment` type is only used if the metric comes in as a cou template = "host.tags.measurement.field" ## Timeout in seconds to connect timeout = "2s" - ## Debug true - Print communcation to Instrumental + ## Debug true - Print communication to Instrumental debug = false ``` diff --git a/plugins/outputs/instrumental/instrumental.go b/plugins/outputs/instrumental/instrumental.go index 117c9d434..e5decbf7f 100644 --- a/plugins/outputs/instrumental/instrumental.go +++ b/plugins/outputs/instrumental/instrumental.go @@ -27,6 +27,7 @@ type Instrumental struct { Prefix string DataFormat string Template string + Templates []string Timeout internal.Duration Debug bool @@ -50,7 +51,7 @@ var sampleConfig = ` template = "host.tags.measurement.field" ## Timeout in seconds to connect timeout = "2s" - ## Display Communcation to Instrumental + ## Display Communication to Instrumental debug = false ` @@ -85,7 +86,7 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { } } - s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false) + s, err := serializers.NewGraphiteSerializer(i.Prefix, i.Template, false, ".", i.Templates) if err != nil { return err } @@ -110,7 +111,8 @@ func (i *Instrumental) Write(metrics []telegraf.Metric) error { buf, err := s.Serialize(m) if err != nil { - log.Printf("E! Error serializing a metric to Instrumental: %s", err) + log.Printf("D! [outputs.instrumental] Could not serialize metric: %v", err) + continue } switch metricType { diff --git a/plugins/outputs/kafka/README.md b/plugins/outputs/kafka/README.md index 196e2e914..d1cc9f0cb 100644 --- a/plugins/outputs/kafka/README.md +++ b/plugins/outputs/kafka/README.md @@ -10,6 +10,22 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## Kafka topic for producer messages topic = "telegraf" + ## The value of this tag will be used as the topic. If not set the 'topic' + ## option is used. + # topic_tag = "" + + ## If true, the 'topic_tag' will be removed from to the metric. + # exclude_topic_tag = false + + ## Optional Client id + # client_id = "Telegraf" + + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Of particular interested, lz4 compression + ## requires at least version 0.10.0.0. + ## ex: version = "1.1.0" + # version = "" + ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. ## Following topic suffix methods are supported: @@ -17,7 +33,7 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm ## tags - suffix equals to separator + specified tags' values ## interleaved with separator - ## Suffix equals to "_" + measurement's name + ## Suffix equals to "_" + measurement name # [outputs.kafka.topic_suffix] # method = "measurement" # separator = "_" @@ -37,15 +53,31 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm # keys = ["foo", "bar"] # separator = "_" - ## Telegraf tag to use as a routing key - ## ie, if this tag exists, its value will be used as the routing key + ## The routing tag specifies a tagkey on the metric whose value is used as + ## the message key. The message key is used to determine which partition to + ## send the message to. This tag is prefered over the routing_key option. routing_tag = "host" + ## The routing key is set as the message key and used to determine which + ## partition to send the message to. This value is only used when no + ## routing_tag is set or as a fallback when the tag specified in routing tag + ## is not found. + ## + ## If set to "random", a random value will be generated for each message. + ## + ## When unset, no message key is added and each message is routed to a random + ## partition. + ## + ## ex: routing_key = "random" + ## routing_key = "telegraf" + # routing_key = "" + ## CompressionCodec represents the various compression codecs recognized by ## Kafka in messages. ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression + ## 3 : LZ4 compression # compression_codec = 0 ## RequiredAcks is used in Produce Requests to tell the broker how many @@ -79,6 +111,9 @@ This plugin writes to a [Kafka Broker](http://kafka.apache.org/07/quickstart.htm # sasl_username = "kafka" # sasl_password = "secret" + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/kafka/kafka.go b/plugins/outputs/kafka/kafka.go index 716e06c44..406febc28 100644 --- a/plugins/outputs/kafka/kafka.go +++ b/plugins/outputs/kafka/kafka.go @@ -3,14 +3,17 @@ package kafka import ( "crypto/tls" "fmt" + "log" "strings" - - "github.com/influxdata/telegraf" - tlsint "github.com/influxdata/telegraf/internal/tls" - "github.com/influxdata/telegraf/plugins/outputs" - "github.com/influxdata/telegraf/plugins/serializers" + "time" "github.com/Shopify/sarama" + "github.com/gofrs/uuid" + "github.com/influxdata/telegraf" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/common/kafka" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/serializers" ) var ValidTopicSuffixMethods = []string{ @@ -19,22 +22,24 @@ var ValidTopicSuffixMethods = []string{ "tags", } +var zeroTime = time.Unix(0, 0) + type ( Kafka struct { - // Kafka brokers to send metrics to - Brokers []string - // Kafka topic - Topic string - // Kafka topic suffix option - TopicSuffix TopicSuffix `toml:"topic_suffix"` - // Routing Key Tag - RoutingTag string `toml:"routing_tag"` - // Compression Codec Tag - CompressionCodec int - // RequiredAcks Tag - RequiredAcks int - // MaxRetry Tag - MaxRetry int + Brokers []string `toml:"brokers"` + Topic string `toml:"topic"` + TopicTag string `toml:"topic_tag"` + ExcludeTopicTag bool `toml:"exclude_topic_tag"` + ClientID string `toml:"client_id"` + TopicSuffix TopicSuffix `toml:"topic_suffix"` + RoutingTag string `toml:"routing_tag"` + RoutingKey string `toml:"routing_key"` + CompressionCodec int `toml:"compression_codec"` + RequiredAcks int `toml:"required_acks"` + MaxRetry int `toml:"max_retry"` + MaxMessageBytes int `toml:"max_message_bytes"` + + Version string `toml:"version"` // Legacy TLS config options // TLS client certificate @@ -44,15 +49,19 @@ type ( // TLS certificate authority CA string + EnableTLS *bool `toml:"enable_tls"` tlsint.ClientConfig - // SASL Username SASLUsername string `toml:"sasl_username"` - // SASL Password SASLPassword string `toml:"sasl_password"` + SASLVersion *int `toml:"sasl_version"` + + Log telegraf.Logger `toml:"-"` tlsConfig tls.Config - producer sarama.SyncProducer + + producerFunc func(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) + producer sarama.SyncProducer serializer serializers.Serializer } @@ -63,12 +72,48 @@ type ( } ) +// DebugLogger logs messages from sarama at the debug level. +type DebugLogger struct { +} + +func (*DebugLogger) Print(v ...interface{}) { + args := make([]interface{}, 0, len(v)+1) + args = append(args, "D! [sarama] ") + log.Print(v...) +} + +func (*DebugLogger) Printf(format string, v ...interface{}) { + log.Printf("D! [sarama] "+format, v...) +} + +func (*DebugLogger) Println(v ...interface{}) { + args := make([]interface{}, 0, len(v)+1) + args = append(args, "D! [sarama] ") + log.Println(args...) +} + var sampleConfig = ` ## URLs of kafka brokers brokers = ["localhost:9092"] ## Kafka topic for producer messages topic = "telegraf" + ## The value of this tag will be used as the topic. If not set the 'topic' + ## option is used. + # topic_tag = "" + + ## If true, the 'topic_tag' will be removed from to the metric. + # exclude_topic_tag = false + + ## Optional Client id + # client_id = "Telegraf" + + ## Set the minimal supported Kafka version. Setting this enables the use of new + ## Kafka features and APIs. Of particular interest, lz4 compression + ## requires at least version 0.10.0.0. + ## ex: version = "1.1.0" + # version = "" + ## Optional topic suffix configuration. ## If the section is omitted, no suffix is used. ## Following topic suffix methods are supported: @@ -96,15 +141,31 @@ var sampleConfig = ` # keys = ["foo", "bar"] # separator = "_" - ## Telegraf tag to use as a routing key - ## ie, if this tag exists, its value will be used as the routing key + ## The routing tag specifies a tagkey on the metric whose value is used as + ## the message key. The message key is used to determine which partition to + ## send the message to. This tag is prefered over the routing_key option. routing_tag = "host" + ## The routing key is set as the message key and used to determine which + ## partition to send the message to. This value is only used when no + ## routing_tag is set or as a fallback when the tag specified in routing tag + ## is not found. + ## + ## If set to "random", a random value will be generated for each message. + ## + ## When unset, no message key is added and each message is routed to a random + ## partition. + ## + ## ex: routing_key = "random" + ## routing_key = "telegraf" + # routing_key = "" + ## CompressionCodec represents the various compression codecs recognized by ## Kafka in messages. ## 0 : No compression ## 1 : Gzip compression ## 2 : Snappy compression + ## 3 : LZ4 compression # compression_codec = 0 ## RequiredAcks is used in Produce Requests to tell the broker how many @@ -127,7 +188,12 @@ var sampleConfig = ` ## until the next flush. # max_retry = 3 + ## The maximum permitted size of a message. Should be set equal to or + ## smaller than the broker's 'message.max.bytes'. + # max_message_bytes = 1000000 + ## Optional TLS Config + # enable_tls = true # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" # tls_key = "/etc/telegraf/key.pem" @@ -138,6 +204,9 @@ var sampleConfig = ` # sasl_username = "kafka" # sasl_password = "secret" + ## SASL protocol version. When connecting to Azure EventHub set to 0. + # sasl_version = 1 + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -154,14 +223,29 @@ func ValidateTopicSuffixMethod(method string) error { return fmt.Errorf("Unknown topic suffix method provided: %s", method) } -func (k *Kafka) GetTopicName(metric telegraf.Metric) string { +func (k *Kafka) GetTopicName(metric telegraf.Metric) (telegraf.Metric, string) { + topic := k.Topic + if k.TopicTag != "" { + if t, ok := metric.GetTag(k.TopicTag); ok { + topic = t + + // If excluding the topic tag, a copy is required to avoid modifying + // the metric buffer. + if k.ExcludeTopicTag { + metric = metric.Copy() + metric.Accept() + metric.RemoveTag(k.TopicTag) + } + } + } + var topicName string switch k.TopicSuffix.Method { case "measurement": - topicName = k.Topic + k.TopicSuffix.Separator + metric.Name() + topicName = topic + k.TopicSuffix.Separator + metric.Name() case "tags": var topicNameComponents []string - topicNameComponents = append(topicNameComponents, k.Topic) + topicNameComponents = append(topicNameComponents, topic) for _, tag := range k.TopicSuffix.Keys { tagValue := metric.Tags()[tag] if tagValue != "" { @@ -170,9 +254,9 @@ func (k *Kafka) GetTopicName(metric telegraf.Metric) string { } topicName = strings.Join(topicNameComponents, k.TopicSuffix.Separator) default: - topicName = k.Topic + topicName = topic } - return topicName + return metric, topicName } func (k *Kafka) SetSerializer(serializer serializers.Serializer) { @@ -186,11 +270,29 @@ func (k *Kafka) Connect() error { } config := sarama.NewConfig() + if k.Version != "" { + version, err := sarama.ParseKafkaVersion(k.Version) + if err != nil { + return err + } + config.Version = version + } + + if k.ClientID != "" { + config.ClientID = k.ClientID + } else { + config.ClientID = "Telegraf" + } + config.Producer.RequiredAcks = sarama.RequiredAcks(k.RequiredAcks) config.Producer.Compression = sarama.CompressionCodec(k.CompressionCodec) config.Producer.Retry.Max = k.MaxRetry config.Producer.Return.Successes = true + if k.MaxMessageBytes > 0 { + config.Producer.MaxMessageBytes = k.MaxMessageBytes + } + // Legacy support ssl config if k.Certificate != "" { k.TLSCert = k.Certificate @@ -198,6 +300,10 @@ func (k *Kafka) Connect() error { k.TLSKey = k.Key } + if k.EnableTLS != nil && *k.EnableTLS { + config.Net.TLS.Enable = true + } + tlsConfig, err := k.ClientConfig.TLSConfig() if err != nil { return err @@ -205,16 +311,28 @@ func (k *Kafka) Connect() error { if tlsConfig != nil { config.Net.TLS.Config = tlsConfig - config.Net.TLS.Enable = true + + // To maintain backwards compatibility, if the enable_tls option is not + // set TLS is enabled if a non-default TLS config is used. + if k.EnableTLS == nil { + k.Log.Warnf("Use of deprecated configuration: enable_tls should be set when using TLS") + config.Net.TLS.Enable = true + } } if k.SASLUsername != "" && k.SASLPassword != "" { config.Net.SASL.User = k.SASLUsername config.Net.SASL.Password = k.SASLPassword config.Net.SASL.Enable = true + + version, err := kafka.SASLVersion(config.Version, k.SASLVersion) + if err != nil { + return err + } + config.Net.SASL.Version = version } - producer, err := sarama.NewSyncProducer(k.Brokers, config) + producer, err := k.producerFunc(k.Brokers, config) if err != nil { return err } @@ -234,41 +352,86 @@ func (k *Kafka) Description() string { return "Configuration for the Kafka server to send metrics to" } -func (k *Kafka) Write(metrics []telegraf.Metric) error { - if len(metrics) == 0 { - return nil +func (k *Kafka) routingKey(metric telegraf.Metric) (string, error) { + if k.RoutingTag != "" { + key, ok := metric.GetTag(k.RoutingTag) + if ok { + return key, nil + } } + if k.RoutingKey == "random" { + u, err := uuid.NewV4() + if err != nil { + return "", err + } + return u.String(), nil + } + + return k.RoutingKey, nil +} + +func (k *Kafka) Write(metrics []telegraf.Metric) error { + msgs := make([]*sarama.ProducerMessage, 0, len(metrics)) for _, metric := range metrics { + metric, topic := k.GetTopicName(metric) + buf, err := k.serializer.Serialize(metric) if err != nil { - return err + k.Log.Debugf("Could not serialize metric: %v", err) + continue } - topicName := k.GetTopicName(metric) - m := &sarama.ProducerMessage{ - Topic: topicName, + Topic: topic, Value: sarama.ByteEncoder(buf), } - if h, ok := metric.Tags()[k.RoutingTag]; ok { - m.Key = sarama.StringEncoder(h) + + // Negative timestamps are not allowed by the Kafka protocol. + if !metric.Time().Before(zeroTime) { + m.Timestamp = metric.Time() } - _, _, err = k.producer.SendMessage(m) - + key, err := k.routingKey(metric) if err != nil { - return fmt.Errorf("FAILED to send kafka message: %s\n", err) + return fmt.Errorf("could not generate routing key: %v", err) } + + if key != "" { + m.Key = sarama.StringEncoder(key) + } + msgs = append(msgs, m) } + + err := k.producer.SendMessages(msgs) + if err != nil { + // We could have many errors, return only the first encountered. + if errs, ok := err.(sarama.ProducerErrors); ok { + for _, prodErr := range errs { + if prodErr.Err == sarama.ErrMessageSizeTooLarge { + k.Log.Error("Message too large, consider increasing `max_message_bytes`; dropping batch") + return nil + } + if prodErr.Err == sarama.ErrInvalidTimestamp { + k.Log.Error("The timestamp of the message is out of acceptable range, consider increasing broker `message.timestamp.difference.max.ms`; dropping batch") + return nil + } + return prodErr + } + } + return err + } + return nil } func init() { + sarama.Logger = &DebugLogger{} outputs.Add("kafka", func() telegraf.Output { return &Kafka{ MaxRetry: 3, RequiredAcks: -1, + producerFunc: sarama.NewSyncProducer, } }) } diff --git a/plugins/outputs/kafka/kafka_test.go b/plugins/outputs/kafka/kafka_test.go index b18d9f15d..070eea3f9 100644 --- a/plugins/outputs/kafka/kafka_test.go +++ b/plugins/outputs/kafka/kafka_test.go @@ -2,7 +2,11 @@ package kafka import ( "testing" + "time" + "github.com/Shopify/sarama" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" @@ -78,7 +82,7 @@ func TestTopicSuffixes(t *testing.T) { TopicSuffix: topicSuffix, } - topic := k.GetTopicName(metric) + _, topic := k.GetTopicName(metric) require.Equal(t, expectedTopic, topic) } } @@ -96,3 +100,203 @@ func TestValidateTopicSuffixMethod(t *testing.T) { require.NoError(t, err, "Topic suffix method used should be valid.") } } + +func TestRoutingKey(t *testing.T) { + tests := []struct { + name string + kafka *Kafka + metric telegraf.Metric + check func(t *testing.T, routingKey string) + }{ + { + name: "static routing key", + kafka: &Kafka{ + RoutingKey: "static", + }, + metric: func() telegraf.Metric { + m, _ := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ) + return m + }(), + check: func(t *testing.T, routingKey string) { + require.Equal(t, "static", routingKey) + }, + }, + { + name: "random routing key", + kafka: &Kafka{ + RoutingKey: "random", + }, + metric: func() telegraf.Metric { + m, _ := metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ) + return m + }(), + check: func(t *testing.T, routingKey string) { + require.Equal(t, 36, len(routingKey)) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + key, err := tt.kafka.routingKey(tt.metric) + require.NoError(t, err) + tt.check(t, key) + }) + } +} + +type MockProducer struct { + sent []*sarama.ProducerMessage +} + +func (p *MockProducer) SendMessage(msg *sarama.ProducerMessage) (partition int32, offset int64, err error) { + p.sent = append(p.sent, msg) + return 0, 0, nil +} + +func (p *MockProducer) SendMessages(msgs []*sarama.ProducerMessage) error { + p.sent = append(p.sent, msgs...) + return nil +} + +func (p *MockProducer) Close() error { + return nil +} + +func NewMockProducer(addrs []string, config *sarama.Config) (sarama.SyncProducer, error) { + return &MockProducer{}, nil +} + +func TestTopicTag(t *testing.T) { + tests := []struct { + name string + plugin *Kafka + input []telegraf.Metric + topic string + value string + }{ + { + name: "static topic", + plugin: &Kafka{ + Brokers: []string{"127.0.0.1"}, + Topic: "telegraf", + producerFunc: NewMockProducer, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + topic: "telegraf", + value: "cpu time_idle=42 0\n", + }, + { + name: "topic tag overrides static topic", + plugin: &Kafka{ + Brokers: []string{"127.0.0.1"}, + Topic: "telegraf", + TopicTag: "topic", + producerFunc: NewMockProducer, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "topic": "xyzzy", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + topic: "xyzzy", + value: "cpu,topic=xyzzy time_idle=42 0\n", + }, + { + name: "missing topic tag falls back to static topic", + plugin: &Kafka{ + Brokers: []string{"127.0.0.1"}, + Topic: "telegraf", + TopicTag: "topic", + producerFunc: NewMockProducer, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + topic: "telegraf", + value: "cpu time_idle=42 0\n", + }, + { + name: "exclude topic tag removes tag", + plugin: &Kafka{ + Brokers: []string{"127.0.0.1"}, + Topic: "telegraf", + TopicTag: "topic", + ExcludeTopicTag: true, + producerFunc: NewMockProducer, + }, + input: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "topic": "xyzzy", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + topic: "xyzzy", + value: "cpu time_idle=42 0\n", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := serializers.NewInfluxSerializer() + require.NoError(t, err) + tt.plugin.SetSerializer(s) + + err = tt.plugin.Connect() + require.NoError(t, err) + + producer := &MockProducer{} + tt.plugin.producer = producer + + err = tt.plugin.Write(tt.input) + require.NoError(t, err) + + require.Equal(t, tt.topic, producer.sent[0].Topic) + + encoded, err := producer.sent[0].Value.Encode() + require.NoError(t, err) + require.Equal(t, tt.value, string(encoded)) + }) + } +} diff --git a/plugins/outputs/kinesis/README.md b/plugins/outputs/kinesis/README.md index 809bb7790..1931dacb9 100644 --- a/plugins/outputs/kinesis/README.md +++ b/plugins/outputs/kinesis/README.md @@ -51,7 +51,7 @@ solution to scale out. ### use_random_partitionkey [DEPRECATED] -When true a random UUID will be generated and used as the partitionkey when sending data to Kinesis. This allows data to evenly spread across multiple shards in the stream. Due to using a random paritionKey there can be no guarantee of ordering when consuming the data off the shards. +When true a random UUID will be generated and used as the partitionkey when sending data to Kinesis. This allows data to evenly spread across multiple shards in the stream. Due to using a random partitionKey there can be no guarantee of ordering when consuming the data off the shards. If true then the partitionkey option will be ignored. ### partition @@ -70,8 +70,8 @@ All metrics will be mapped to the same shard which may limit throughput. #### tag -This will take the value of the specified tag from each metric as the paritionKey. -If the tag is not found an empty string will be used. +This will take the value of the specified tag from each metric as the partitionKey. +If the tag is not found the `default` value will be used or `telegraf` if unspecified #### measurement diff --git a/plugins/outputs/kinesis/kinesis.go b/plugins/outputs/kinesis/kinesis.go index d77ff08a5..88620fa70 100644 --- a/plugins/outputs/kinesis/kinesis.go +++ b/plugins/outputs/kinesis/kinesis.go @@ -2,28 +2,27 @@ package kinesis import ( "log" - "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kinesis" - "github.com/satori/go.uuid" - + "github.com/gofrs/uuid" "github.com/influxdata/telegraf" - internalaws "github.com/influxdata/telegraf/internal/config/aws" + internalaws "github.com/influxdata/telegraf/config/aws" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" ) type ( KinesisOutput struct { - Region string `toml:"region"` - AccessKey string `toml:"access_key"` - SecretKey string `toml:"secret_key"` - RoleARN string `toml:"role_arn"` - Profile string `toml:"profile"` - Filename string `toml:"shared_credential_file"` - Token string `toml:"token"` + Region string `toml:"region"` + AccessKey string `toml:"access_key"` + SecretKey string `toml:"secret_key"` + RoleARN string `toml:"role_arn"` + Profile string `toml:"profile"` + Filename string `toml:"shared_credential_file"` + Token string `toml:"token"` + EndpointURL string `toml:"endpoint_url"` StreamName string `toml:"streamname"` PartitionKey string `toml:"partitionkey"` @@ -36,8 +35,9 @@ type ( } Partition struct { - Method string `toml:"method"` - Key string `toml:"key"` + Method string `toml:"method"` + Key string `toml:"key"` + Default string `toml:"default"` } ) @@ -60,11 +60,17 @@ var sampleConfig = ` #profile = "" #shared_credential_file = "" + ## Endpoint to make request against, the correct endpoint is automatically + ## determined and this option should only be set if you wish to override the + ## default. + ## ex: endpoint_url = "http://localhost:8000" + # endpoint_url = "" + ## Kinesis StreamName must exist prior to starting telegraf. streamname = "StreamName" ## DEPRECATED: PartitionKey as used for sharding data. partitionkey = "PartitionKey" - ## DEPRECATED: If set the paritionKey will be a random UUID on every put. + ## DEPRECATED: If set the partitionKey will be a random UUID on every put. ## This allows for scaling across multiple shards in a stream. ## This will cause issues with ordering. use_random_partitionkey = false @@ -84,10 +90,11 @@ var sampleConfig = ` # method = "measurement" # ## Use the value of a tag for all writes, if the tag is not set the empty - ## string will be used: + ## default option will be used. When no default, defaults to "telegraf" # [outputs.kinesis.partition] # method = "tag" # key = "host" + # default = "mykey" ## Data format to output. @@ -108,58 +115,34 @@ func (k *KinesisOutput) Description() string { return "Configuration for the AWS Kinesis output." } -func checkstream(l []*string, s string) bool { - // Check if the StreamName exists in the slice returned from the ListStreams API request. - for _, stream := range l { - if *stream == s { - return true - } - } - return false -} - func (k *KinesisOutput) Connect() error { + if k.Partition == nil { + log.Print("E! kinesis : Deprecated partitionkey configuration in use, please consider using outputs.kinesis.partition") + } + // We attempt first to create a session to Kinesis using an IAMS role, if that fails it will fall through to using // environment variables, and then Shared Credentials. if k.Debug { - log.Printf("E! kinesis: Establishing a connection to Kinesis in %+v", k.Region) + log.Printf("I! kinesis: Establishing a connection to Kinesis in %s", k.Region) } credentialConfig := &internalaws.CredentialConfig{ - Region: k.Region, - AccessKey: k.AccessKey, - SecretKey: k.SecretKey, - RoleARN: k.RoleARN, - Profile: k.Profile, - Filename: k.Filename, - Token: k.Token, + Region: k.Region, + AccessKey: k.AccessKey, + SecretKey: k.SecretKey, + RoleARN: k.RoleARN, + Profile: k.Profile, + Filename: k.Filename, + Token: k.Token, + EndpointURL: k.EndpointURL, } configProvider := credentialConfig.Credentials() svc := kinesis.New(configProvider) - KinesisParams := &kinesis.ListStreamsInput{ - Limit: aws.Int64(100), - } - - resp, err := svc.ListStreams(KinesisParams) - - if err != nil { - log.Printf("E! kinesis: Error in ListSteams API call : %+v \n", err) - } - - if checkstream(resp.StreamNames, k.StreamName) { - if k.Debug { - log.Printf("E! kinesis: Stream Exists") - } - k.svc = svc - return nil - } else { - log.Printf("E! kinesis : You have configured a StreamName %+v which does not exist. exiting.", k.StreamName) - os.Exit(1) - } - if k.Partition == nil { - log.Print("E! kinesis : Deprecated paritionkey configuration in use, please consider using outputs.kinesis.partition") - } + _, err := svc.DescribeStreamSummary(&kinesis.DescribeStreamSummaryInput{ + StreamName: aws.String(k.StreamName), + }) + k.svc = svc return err } @@ -181,14 +164,14 @@ func writekinesis(k *KinesisOutput, r []*kinesis.PutRecordsRequestEntry) time.Du if k.Debug { resp, err := k.svc.PutRecords(payload) if err != nil { - log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error()) + log.Printf("E! kinesis: Unable to write to Kinesis : %s", err.Error()) } - log.Printf("E! %+v \n", resp) + log.Printf("I! Wrote: '%+v'", resp) } else { _, err := k.svc.PutRecords(payload) if err != nil { - log.Printf("E! kinesis: Unable to write to Kinesis : %+v \n", err.Error()) + log.Printf("E! kinesis: Unable to write to Kinesis : %s", err.Error()) } } return time.Since(start) @@ -200,21 +183,30 @@ func (k *KinesisOutput) getPartitionKey(metric telegraf.Metric) string { case "static": return k.Partition.Key case "random": - u := uuid.NewV4() + u, err := uuid.NewV4() + if err != nil { + return k.Partition.Default + } return u.String() case "measurement": return metric.Name() case "tag": - if metric.HasTag(k.Partition.Key) { - return metric.Tags()[k.Partition.Key] + if t, ok := metric.GetTag(k.Partition.Key); ok { + return t + } else if len(k.Partition.Default) > 0 { + return k.Partition.Default } - log.Printf("E! kinesis : You have configured a Partition using tag %+v which does not exist.", k.Partition.Key) + // Default partition name if default is not set + return "telegraf" default: - log.Printf("E! kinesis : You have configured a Partition method of %+v which is not supported", k.Partition.Method) + log.Printf("E! kinesis : You have configured a Partition method of '%s' which is not supported", k.Partition.Method) } } if k.RandomPartitionKey { - u := uuid.NewV4() + u, err := uuid.NewV4() + if err != nil { + return k.Partition.Default + } return u.String() } return k.PartitionKey @@ -234,7 +226,8 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { values, err := k.serializer.Serialize(metric) if err != nil { - return err + log.Printf("D! [outputs.kinesis] Could not serialize metric: %v", err) + continue } partitionKey := k.getPartitionKey(metric) @@ -249,7 +242,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { if sz == 500 { // Max Messages Per PutRecordRequest is 500 elapsed := writekinesis(k, r) - log.Printf("E! Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed) + log.Printf("D! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) sz = 0 r = nil } @@ -257,7 +250,7 @@ func (k *KinesisOutput) Write(metrics []telegraf.Metric) error { } if sz > 0 { elapsed := writekinesis(k, r) - log.Printf("E! Wrote a %+v point batch to Kinesis in %+v.\n", sz, elapsed) + log.Printf("D! Wrote a %d point batch to Kinesis in %+v.", sz, elapsed) } return nil diff --git a/plugins/outputs/kinesis/kinesis_test.go b/plugins/outputs/kinesis/kinesis_test.go index 3c6321abd..9d4f6729b 100644 --- a/plugins/outputs/kinesis/kinesis_test.go +++ b/plugins/outputs/kinesis/kinesis_test.go @@ -3,8 +3,8 @@ package kinesis import ( "testing" + "github.com/gofrs/uuid" "github.com/influxdata/telegraf/testutil" - uuid "github.com/satori/go.uuid" "github.com/stretchr/testify/assert" ) @@ -29,13 +29,22 @@ func TestPartitionKey(t *testing.T) { } assert.Equal(testPoint.Tags()["tag1"], k.getPartitionKey(testPoint), "PartitionKey should be value of 'tag1'") + k = KinesisOutput{ + Partition: &Partition{ + Method: "tag", + Key: "doesnotexist", + Default: "somedefault", + }, + } + assert.Equal("somedefault", k.getPartitionKey(testPoint), "PartitionKey should use default") + k = KinesisOutput{ Partition: &Partition{ Method: "tag", Key: "doesnotexist", }, } - assert.Equal("", k.getPartitionKey(testPoint), "PartitionKey should be value of ''") + assert.Equal("telegraf", k.getPartitionKey(testPoint), "PartitionKey should be telegraf") k = KinesisOutput{ Partition: &Partition{ diff --git a/plugins/outputs/librato/librato.go b/plugins/outputs/librato/librato.go index 0603394ec..53bb8c124 100644 --- a/plugins/outputs/librato/librato.go +++ b/plugins/outputs/librato/librato.go @@ -32,7 +32,7 @@ type Librato struct { var reUnacceptedChar = regexp.MustCompile("[^.a-zA-Z0-9_-]") var sampleConfig = ` - ## Librator API Docs + ## Librato API Docs ## http://dev.librato.com/v1/metrics-authentication ## Librato API user api_user = "telegraf@influxdb.com" # required. diff --git a/plugins/outputs/mqtt/README.md b/plugins/outputs/mqtt/README.md index 14c166f9e..aa028e056 100644 --- a/plugins/outputs/mqtt/README.md +++ b/plugins/outputs/mqtt/README.md @@ -37,6 +37,9 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt ## metrics are written one metric per MQTT message. # batch = false + ## When true, messages will have RETAIN flag set. + # retain = false + ## Data format to output. # data_format = "influx" ``` @@ -50,10 +53,11 @@ This plugin writes to a [MQTT Broker](http://http://mqtt.org/) acting as a mqtt ### Optional parameters: * `username`: The username to connect MQTT server. * `password`: The password to connect MQTT server. -* `client_id`: The unique client id to connect MQTT server. If this paramater is not set then a random ID is generated. +* `client_id`: The unique client id to connect MQTT server. If this parameter is not set then a random ID is generated. * `timeout`: Timeout for write operations. default: 5s * `tls_ca`: TLS CA * `tls_cert`: TLS CERT * `tls_key`: TLS key * `insecure_skip_verify`: Use TLS but skip chain & host verification (default: false) +* `retain`: Set `retain` flag when publishing * `data_format`: [About Telegraf data formats](https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md) diff --git a/plugins/outputs/mqtt/mqtt.go b/plugins/outputs/mqtt/mqtt.go index 18e22daa6..13785cd68 100644 --- a/plugins/outputs/mqtt/mqtt.go +++ b/plugins/outputs/mqtt/mqtt.go @@ -2,17 +2,17 @@ package mqtt import ( "fmt" + "log" "strings" "sync" "time" + paho "github.com/eclipse/paho.mqtt.golang" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" - - paho "github.com/eclipse/paho.mqtt.golang" ) var sampleConfig = ` @@ -50,6 +50,10 @@ var sampleConfig = ` ## metrics are written one metric per MQTT message. # batch = false + ## When true, metric will have RETAIN flag set, making broker cache entries until someone + ## actually reads it + # retain = false + ## Data format to output. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -68,6 +72,7 @@ type MQTT struct { ClientID string `toml:"client_id"` tls.ClientConfig BatchMessage bool `toml:"batch"` + Retain bool `toml:"retain"` client paho.Client opts *paho.ClientOptions @@ -146,9 +151,9 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { metricsmap[topic] = append(metricsmap[topic], metric) } else { buf, err := m.serializer.Serialize(metric) - if err != nil { - return err + log.Printf("D! [outputs.mqtt] Could not serialize metric: %v", err) + continue } err = m.publish(topic, buf) @@ -174,7 +179,7 @@ func (m *MQTT) Write(metrics []telegraf.Metric) error { } func (m *MQTT) publish(topic string, body []byte) error { - token := m.client.Publish(topic, byte(m.QoS), false, body) + token := m.client.Publish(topic, byte(m.QoS), m.Retain, body) token.WaitTimeout(m.Timeout.Duration) if token.Error() != nil { return token.Error() @@ -218,7 +223,7 @@ func (m *MQTT) createOpts() (*paho.ClientOptions, error) { } if len(m.Servers) == 0 { - return opts, fmt.Errorf("could not get host infomations") + return opts, fmt.Errorf("could not get host informations") } for _, host := range m.Servers { server := fmt.Sprintf("%s://%s", scheme, host) diff --git a/plugins/outputs/nats/README.md b/plugins/outputs/nats/README.md index d9462650a..c5539900b 100644 --- a/plugins/outputs/nats/README.md +++ b/plugins/outputs/nats/README.md @@ -2,18 +2,27 @@ This plugin writes to a (list of) specified NATS instance(s). -``` +```toml [[outputs.nats]] ## URLs of NATS servers servers = ["nats://localhost:4222"] ## Optional credentials # username = "" # password = "" + + ## Optional NATS 2.0 and NATS NGS compatible user credentials + # credentials = "/etc/telegraf/nats.creds" + ## NATS subject for producer messages subject = "telegraf" + + ## Use Transport Layer Security + # secure = false + ## Optional TLS Config - ## CA certificate used to self-sign NATS server(s) TLS certificate(s) # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" ## Use TLS but skip chain & host verification # insecure_skip_verify = false @@ -23,15 +32,3 @@ This plugin writes to a (list of) specified NATS instance(s). ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md data_format = "influx" ``` - -### Required parameters: - -* `servers`: List of strings, this is for NATS clustering support. Each URL should start with `nats://`. -* `subject`: The NATS subject to publish to. - -### Optional parameters: - -* `username`: Username for NATS -* `password`: Password for NATS -* `tls_ca`: TLS CA -* `insecure_skip_verify`: Use SSL but skip chain & host verification (default: false) diff --git a/plugins/outputs/nats/nats.go b/plugins/outputs/nats/nats.go index d9fdb0e88..620ac8b44 100644 --- a/plugins/outputs/nats/nats.go +++ b/plugins/outputs/nats/nats.go @@ -2,38 +2,47 @@ package nats import ( "fmt" - - nats_client "github.com/nats-io/go-nats" + "log" + "strings" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" + "github.com/nats-io/nats.go" ) type NATS struct { - // Servers is the NATS server pool to connect to - Servers []string - // Credentials - Username string - Password string - // NATS subject to publish metrics to - Subject string + Servers []string `toml:"servers"` + Secure bool `toml:"secure"` + Username string `toml:"username"` + Password string `toml:"password"` + Credentials string `toml:"credentials"` + Subject string `toml:"subject"` + tls.ClientConfig - conn *nats_client.Conn + conn *nats.Conn serializer serializers.Serializer } var sampleConfig = ` ## URLs of NATS servers servers = ["nats://localhost:4222"] + ## Optional credentials # username = "" # password = "" + + ## Optional NATS 2.0 and NATS NGS compatible user credentials + # credentials = "/etc/telegraf/nats.creds" + ## NATS subject for producer messages subject = "telegraf" + ## Use Transport Layer Security + # secure = false + ## Optional TLS Config # tls_ca = "/etc/telegraf/ca.pem" # tls_cert = "/etc/telegraf/cert.pem" @@ -55,34 +64,26 @@ func (n *NATS) SetSerializer(serializer serializers.Serializer) { func (n *NATS) Connect() error { var err error - // set default NATS connection options - opts := nats_client.DefaultOptions - - // override max reconnection tries - opts.MaxReconnect = -1 - - // override servers, if any were specified - opts.Servers = n.Servers + opts := []nats.Option{ + nats.MaxReconnects(-1), + } // override authentication, if any was specified if n.Username != "" { - opts.User = n.Username - opts.Password = n.Password + opts = append(opts, nats.UserInfo(n.Username, n.Password)) } - // override TLS, if it was specified - tlsConfig, err := n.ClientConfig.TLSConfig() - if err != nil { - return err - } - if tlsConfig != nil { - // set NATS connection TLS options - opts.Secure = true - opts.TLSConfig = tlsConfig + if n.Secure { + tlsConfig, err := n.ClientConfig.TLSConfig() + if err != nil { + return err + } + + opts = append(opts, nats.Secure(tlsConfig)) } // try and connect - n.conn, err = opts.Connect() + n.conn, err = nats.Connect(strings.Join(n.Servers, ","), opts...) return err } @@ -108,7 +109,8 @@ func (n *NATS) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { buf, err := n.serializer.Serialize(metric) if err != nil { - return err + log.Printf("D! [outputs.nats] Could not serialize metric: %v", err) + continue } err = n.conn.Publish(n.Subject, buf) diff --git a/plugins/outputs/newrelic/README.md b/plugins/outputs/newrelic/README.md new file mode 100644 index 000000000..ae056ed2f --- /dev/null +++ b/plugins/outputs/newrelic/README.md @@ -0,0 +1,21 @@ +#New Relic output plugin + +This plugins writes to New Relic Insights using the [Metrics API][]. + +To use this plugin you must first obtain an [Insights API Key][]. + +### Configuration +```toml +[[outputs.newrelic]] + ## New Relic Insights API key + insights_key = "insights api key" + + ## Prefix to add to add to metric name for easy identification. + # metric_prefix = "" + + ## Timeout for writes to the New Relic API. + # timeout = "15s" +``` + +[Metrics API]: https://docs.newrelic.com/docs/data-ingest-apis/get-data-new-relic/metric-api/introduction-metric-api +[Insights API Key]: https://docs.newrelic.com/docs/apis/get-started/intro-apis/types-new-relic-api-keys#user-api-key diff --git a/plugins/outputs/newrelic/newrelic.go b/plugins/outputs/newrelic/newrelic.go new file mode 100644 index 000000000..da000c222 --- /dev/null +++ b/plugins/outputs/newrelic/newrelic.go @@ -0,0 +1,158 @@ +package newrelic + +// newrelic.go +import ( + "context" + "fmt" + "net/http" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "github.com/newrelic/newrelic-telemetry-sdk-go/cumulative" + "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" +) + +// NewRelic nr structure +type NewRelic struct { + InsightsKey string `toml:"insights_key"` + MetricPrefix string `toml:"metric_prefix"` + Timeout internal.Duration `toml:"timeout"` + + harvestor *telemetry.Harvester + dc *cumulative.DeltaCalculator + savedErrors map[int]interface{} + errorCount int + Client http.Client `toml:"-"` +} + +// Description returns a one-sentence description on the Output +func (nr *NewRelic) Description() string { + return "Send metrics to New Relic metrics endpoint" +} + +// SampleConfig : return default configuration of the Output +func (nr *NewRelic) SampleConfig() string { + return ` + ## New Relic Insights API key + insights_key = "insights api key" + + ## Prefix to add to add to metric name for easy identification. + # metric_prefix = "" + + ## Timeout for writes to the New Relic API. + # timeout = "15s" +` +} + +// Connect to the Output +func (nr *NewRelic) Connect() error { + if nr.InsightsKey == "" { + return fmt.Errorf("InsightKey is a required for newrelic") + } + var err error + nr.harvestor, err = telemetry.NewHarvester(telemetry.ConfigAPIKey(nr.InsightsKey), + telemetry.ConfigHarvestPeriod(0), + func(cfg *telemetry.Config) { + cfg.Product = "NewRelic-Telegraf-Plugin" + cfg.ProductVersion = "1.0" + cfg.HarvestTimeout = nr.Timeout.Duration + cfg.Client = &nr.Client + cfg.ErrorLogger = func(e map[string]interface{}) { + var errorString string + for k, v := range e { + errorString += fmt.Sprintf("%s = %s ", k, v) + } + nr.errorCount++ + nr.savedErrors[nr.errorCount] = errorString + } + }) + if err != nil { + return fmt.Errorf("unable to connect to newrelic %v", err) + } + + nr.dc = cumulative.NewDeltaCalculator() + return nil +} + +// Close any connections to the Output +func (nr *NewRelic) Close() error { + nr.errorCount = 0 + nr.Client.CloseIdleConnections() + return nil +} + +// Write takes in group of points to be written to the Output +func (nr *NewRelic) Write(metrics []telegraf.Metric) error { + nr.errorCount = 0 + nr.savedErrors = make(map[int]interface{}) + + for _, metric := range metrics { + // create tag map + tags := make(map[string]interface{}) + for _, tag := range metric.TagList() { + tags[tag.Key] = tag.Value + } + for _, field := range metric.FieldList() { + var mvalue float64 + var mname string + if nr.MetricPrefix != "" { + mname = nr.MetricPrefix + "." + metric.Name() + "." + field.Key + } else { + mname = metric.Name() + "." + field.Key + } + switch n := field.Value.(type) { + case int64: + mvalue = float64(n) + case uint64: + mvalue = float64(n) + case float64: + mvalue = float64(n) + case bool: + mvalue = float64(0) + if n { + mvalue = float64(1) + } + case string: + // Do not log everytime we encounter string + // we just skip + continue + default: + return fmt.Errorf("Undefined field type: %T", field.Value) + } + + switch metric.Type() { + case telegraf.Counter: + if counter, ok := nr.dc.CountMetric(mname, tags, mvalue, metric.Time()); ok { + nr.harvestor.RecordMetric(counter) + } + default: + nr.harvestor.RecordMetric(telemetry.Gauge{ + Timestamp: metric.Time(), + Value: mvalue, + Name: mname, + Attributes: tags}) + } + } + } + // By default, the Harvester sends metrics and spans to the New Relic + // backend every 5 seconds. You can force data to be sent at any time + // using HarvestNow. + nr.harvestor.HarvestNow(context.Background()) + + //Check if we encountered errors + if nr.errorCount != 0 { + return fmt.Errorf("unable to harvest metrics %s ", nr.savedErrors[nr.errorCount]) + } + return nil +} + +func init() { + outputs.Add("newrelic", func() telegraf.Output { + return &NewRelic{ + Timeout: internal.Duration{Duration: time.Second * 15}, + Client: http.Client{}, + } + }) +} diff --git a/plugins/outputs/newrelic/newrelic_test.go b/plugins/outputs/newrelic/newrelic_test.go new file mode 100644 index 000000000..aa23950c7 --- /dev/null +++ b/plugins/outputs/newrelic/newrelic_test.go @@ -0,0 +1,180 @@ +package newrelic + +import ( + "math" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" + "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBasic(t *testing.T) { + nr := &NewRelic{ + MetricPrefix: "Test", + InsightsKey: "12345", + Timeout: internal.Duration{Duration: time.Second * 5}, + } + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + err := nr.Connect() + require.NoError(t, err) + + err = nr.Write(testutil.MockMetrics()) + assert.Contains(t, err.Error(), "unable to harvest metrics") +} + +func TestNewRelic_Write(t *testing.T) { + type args struct { + metrics []telegraf.Metric + } + tests := []struct { + name string + metrics []telegraf.Metric + auditMessage string + wantErr bool + }{ + { + name: "Test: Basic mock metric write", + metrics: testutil.MockMetrics(), + wantErr: false, + auditMessage: `"metrics":[{"name":"test1.value","type":"gauge","value":1,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test string ", + metrics: []telegraf.Metric{ + testutil.TestMetric("value1", "test_String"), + }, + wantErr: false, + auditMessage: "", + }, + { + name: "Test: Test int64 ", + metrics: []telegraf.Metric{ + testutil.TestMetric(int64(15), "test_int64"), + }, + wantErr: false, + auditMessage: `"metrics":[{"name":"test_int64.value","type":"gauge","value":15,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test uint64 ", + metrics: []telegraf.Metric{ + testutil.TestMetric(uint64(20), "test_uint64"), + }, + wantErr: false, + auditMessage: `"metrics":[{"name":"test_uint64.value","type":"gauge","value":20,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test bool true ", + metrics: []telegraf.Metric{ + testutil.TestMetric(bool(true), "test_bool_true"), + }, + wantErr: false, + auditMessage: `"metrics":[{"name":"test_bool_true.value","type":"gauge","value":1,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test bool false ", + metrics: []telegraf.Metric{ + testutil.TestMetric(bool(false), "test_bool_false"), + }, + wantErr: false, + auditMessage: `"metrics":[{"name":"test_bool_false.value","type":"gauge","value":0,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test max float64 ", + metrics: []telegraf.Metric{ + testutil.TestMetric(math.MaxFloat64, "test_maxfloat64"), + }, + wantErr: false, + auditMessage: `"metrics":[{"name":"test_maxfloat64.value","type":"gauge","value":1.7976931348623157e+308,"timestamp":1257894000000,"attributes":{"tag1":"value1"}}]`, + }, + { + name: "Test: Test NAN ", + metrics: []telegraf.Metric{ + testutil.TestMetric(math.NaN, "test_NaN"), + }, + wantErr: false, + auditMessage: ``, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var auditLog map[string]interface{} + nr := &NewRelic{} + nr.harvestor, _ = telemetry.NewHarvester( + telemetry.ConfigHarvestPeriod(0), + func(cfg *telemetry.Config) { + cfg.APIKey = "dummyTestKey" + cfg.HarvestPeriod = 0 + cfg.HarvestTimeout = 0 + cfg.AuditLogger = func(e map[string]interface{}) { + auditLog = e + } + }) + err := nr.Write(tt.metrics) + assert.NoError(t, err) + if auditLog["data"] != nil { + assert.Contains(t, auditLog["data"], tt.auditMessage) + } else { + assert.Contains(t, "", tt.auditMessage) + } + + if (err != nil) != tt.wantErr { + t.Errorf("NewRelic.Write() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestNewRelic_Connect(t *testing.T) { + tests := []struct { + name string + newrelic *NewRelic + wantErr bool + }{ + { + name: "Test: No Insights key", + newrelic: &NewRelic{ + MetricPrefix: "prefix", + }, + wantErr: true, + }, + { + name: "Test: Insights key", + newrelic: &NewRelic{ + InsightsKey: "12312133", + MetricPrefix: "prefix", + }, + wantErr: false, + }, + { + name: "Test: Only Insights key", + newrelic: &NewRelic{ + InsightsKey: "12312133", + }, + wantErr: false, + }, + { + name: "Test: Insights key and Timeout", + newrelic: &NewRelic{ + InsightsKey: "12312133", + Timeout: internal.Duration{Duration: time.Second * 5}, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + nr := tt.newrelic + if err := nr.Connect(); (err != nil) != tt.wantErr { + t.Errorf("NewRelic.Connect() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/plugins/outputs/nsq/nsq.go b/plugins/outputs/nsq/nsq.go index c826ab648..a9e2d94ac 100644 --- a/plugins/outputs/nsq/nsq.go +++ b/plugins/outputs/nsq/nsq.go @@ -2,12 +2,12 @@ package nsq import ( "fmt" - - "github.com/nsqio/go-nsq" + "log" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/serializers" + "github.com/nsqio/go-nsq" ) type NSQ struct { @@ -68,7 +68,8 @@ func (n *NSQ) Write(metrics []telegraf.Metric) error { for _, metric := range metrics { buf, err := n.serializer.Serialize(metric) if err != nil { - return err + log.Printf("D! [outputs.nsq] Could not serialize metric: %v", err) + continue } err = n.producer.Publish(n.Topic, buf) diff --git a/plugins/outputs/opentsdb/opentsdb.go b/plugins/outputs/opentsdb/opentsdb.go index 964b1768f..766c7a304 100644 --- a/plugins/outputs/opentsdb/opentsdb.go +++ b/plugins/outputs/opentsdb/opentsdb.go @@ -16,14 +16,14 @@ import ( var ( allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`) - hypenChars = strings.NewReplacer( + hyphenChars = strings.NewReplacer( "@", "-", "*", "-", `%`, "-", "#", "-", "$", "-") defaultHttpPath = "/api/put" - defaultSeperator = "_" + defaultSeparator = "_" ) type OpenTSDB struct { @@ -213,7 +213,10 @@ func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error { func cleanTags(tags map[string]string) map[string]string { tagSet := make(map[string]string, len(tags)) for k, v := range tags { - tagSet[sanitize(k)] = sanitize(v) + val := sanitize(v) + if val != "" { + tagSet[sanitize(k)] = val + } } return tagSet } @@ -258,8 +261,8 @@ func (o *OpenTSDB) Close() error { } func sanitize(value string) string { - // Apply special hypenation rules to preserve backwards compatibility - value = hypenChars.Replace(value) + // Apply special hyphenation rules to preserve backwards compatibility + value = hyphenChars.Replace(value) // Replace any remaining illegal chars return allowedChars.ReplaceAllLiteralString(value, "_") } @@ -268,7 +271,7 @@ func init() { outputs.Add("opentsdb", func() telegraf.Output { return &OpenTSDB{ HttpPath: defaultHttpPath, - Separator: defaultSeperator, + Separator: defaultSeparator, } }) } diff --git a/plugins/outputs/prometheus_client/README.md b/plugins/outputs/prometheus_client/README.md index 6cb0cc59e..7d4fe09b1 100644 --- a/plugins/outputs/prometheus_client/README.md +++ b/plugins/outputs/prometheus_client/README.md @@ -1,30 +1,54 @@ -# Prometheus Client Service Output Plugin +# Prometheus Output Plugin -This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes all metrics on `/metrics` (default) to be polled by a Prometheus server. +This plugin starts a [Prometheus](https://prometheus.io/) Client, it exposes +all metrics on `/metrics` (default) to be polled by a Prometheus server. ## Configuration -``` +```toml # Publish all metrics to /metrics for Prometheus to scrape [[outputs.prometheus_client]] - # Address to listen on + ## Address to listen on. listen = ":9273" - # Use TLS - tls_cert = "/etc/ssl/telegraf.crt" - tls_key = "/etc/ssl/telegraf.key" + ## Metric version controls the mapping from Telegraf metrics into + ## Prometheus format. When using the prometheus input, use the same value in + ## both plugins to ensure metrics are round-tripped without modification. + ## + ## example: metric_version = 1; deprecated in 1.13 + ## metric_version = 2; recommended version + # metric_version = 1 - # Use http basic authentication - basic_username = "Foo" - basic_password = "Bar" + ## Use HTTP Basic Authentication. + # basic_username = "Foo" + # basic_password = "Bar" - # Path to publish the metrics on, defaults to /metrics - path = "/metrics" + ## If set, the IP Ranges which are allowed to access metrics. + ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] + # ip_range = [] - # Expiration interval for each metric. 0 == no expiration - expiration_interval = "60s" + ## Path to publish the metrics on. + # path = "/metrics" - # Send string metrics as Prometheus labels. - # Unless set to false all string metrics will be sent as labels. - string_as_label = true + ## Expiration interval for each metric. 0 == no expiration + # expiration_interval = "60s" + + ## Collectors to enable, valid entries are "gocollector" and "process". + ## If unset, both are enabled. + # collectors_exclude = ["gocollector", "process"] + + ## Send string metrics as Prometheus labels. + ## Unless set to false all string metrics will be sent as labels. + # string_as_label = true + + ## If set, enable TLS with the given certificate. + # tls_cert = "/etc/ssl/telegraf.crt" + # tls_key = "/etc/ssl/telegraf.key" + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Export metric collection time. + # export_timestamp = false ``` diff --git a/plugins/outputs/prometheus_client/prometheus_client.go b/plugins/outputs/prometheus_client/prometheus_client.go index b82c72cf0..57cb1a510 100644 --- a/plugins/outputs/prometheus_client/prometheus_client.go +++ b/plugins/outputs/prometheus_client/prometheus_client.go @@ -1,120 +1,113 @@ -package prometheus_client +package prometheus import ( "context" - "crypto/subtle" + "crypto/tls" "fmt" - "log" + "net" "net/http" - "os" - "regexp" - "sort" - "strconv" - "strings" + "net/url" "sync" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + tlsint "github.com/influxdata/telegraf/internal/tls" "github.com/influxdata/telegraf/plugins/outputs" + "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v1" + "github.com/influxdata/telegraf/plugins/outputs/prometheus_client/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) -var invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`) - -// SampleID uniquely identifies a Sample -type SampleID string - -// Sample represents the current value of a series. -type Sample struct { - // Labels are the Prometheus labels. - Labels map[string]string - // Value is the value in the Prometheus output. Only one of these will populated. - Value float64 - HistogramValue map[float64]uint64 - SummaryValue map[float64]float64 - // Histograms and Summaries need a count and a sum - Count uint64 - Sum float64 - // Expiration is the deadline that this Sample is valid until. - Expiration time.Time -} - -// MetricFamily contains the data required to build valid prometheus Metrics. -type MetricFamily struct { - // Samples are the Sample belonging to this MetricFamily. - Samples map[SampleID]*Sample - // Need the telegraf ValueType because there isn't a Prometheus ValueType - // representing Histogram or Summary - TelegrafValueType telegraf.ValueType - // LabelSet is the label counts for all Samples. - LabelSet map[string]int -} - -type PrometheusClient struct { - Listen string - TLSCert string `toml:"tls_cert"` - TLSKey string `toml:"tls_key"` - BasicUsername string `toml:"basic_username"` - BasicPassword string `toml:"basic_password"` - ExpirationInterval internal.Duration `toml:"expiration_interval"` - Path string `toml:"path"` - CollectorsExclude []string `toml:"collectors_exclude"` - StringAsLabel bool `toml:"string_as_label"` - - server *http.Server - - sync.Mutex - // fam is the non-expired MetricFamily by Prometheus metric name. - fam map[string]*MetricFamily - // now returns the current time. - now func() time.Time -} +var ( + defaultListen = ":9273" + defaultPath = "/metrics" + defaultExpirationInterval = internal.Duration{Duration: 60 * time.Second} +) var sampleConfig = ` ## Address to listen on - # listen = ":9273" + listen = ":9273" - ## Use TLS - #tls_cert = "/etc/ssl/telegraf.crt" - #tls_key = "/etc/ssl/telegraf.key" + ## Metric version controls the mapping from Telegraf metrics into + ## Prometheus format. When using the prometheus input, use the same value in + ## both plugins to ensure metrics are round-tripped without modification. + ## + ## example: metric_version = 1; deprecated in 1.13 + ## metric_version = 2; recommended version + # metric_version = 1 - ## Use http basic authentication - #basic_username = "Foo" - #basic_password = "Bar" + ## Use HTTP Basic Authentication. + # basic_username = "Foo" + # basic_password = "Bar" - ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration + ## If set, the IP Ranges which are allowed to access metrics. + ## ex: ip_range = ["192.168.0.0/24", "192.168.1.0/30"] + # ip_range = [] + + ## Path to publish the metrics on. + # path = "/metrics" + + ## Expiration interval for each metric. 0 == no expiration # expiration_interval = "60s" ## Collectors to enable, valid entries are "gocollector" and "process". ## If unset, both are enabled. - collectors_exclude = ["gocollector", "process"] + # collectors_exclude = ["gocollector", "process"] - # Send string metrics as Prometheus labels. - # Unless set to false all string metrics will be sent as labels. - string_as_label = true + ## Send string metrics as Prometheus labels. + ## Unless set to false all string metrics will be sent as labels. + # string_as_label = true + + ## If set, enable TLS with the given certificate. + # tls_cert = "/etc/ssl/telegraf.crt" + # tls_key = "/etc/ssl/telegraf.key" + + ## Set one or more allowed client CA certificate file names to + ## enable mutually authenticated TLS connections + # tls_allowed_cacerts = ["/etc/telegraf/clientca.pem"] + + ## Export metric collection time. + # export_timestamp = false ` -func (p *PrometheusClient) basicAuth(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if p.BasicUsername != "" && p.BasicPassword != "" { - w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) - - username, password, ok := r.BasicAuth() - if !ok || - subtle.ConstantTimeCompare([]byte(username), []byte(p.BasicUsername)) != 1 || - subtle.ConstantTimeCompare([]byte(password), []byte(p.BasicPassword)) != 1 { - http.Error(w, "Not authorized", 401) - return - } - } - - h.ServeHTTP(w, r) - }) +type Collector interface { + Describe(ch chan<- *prometheus.Desc) + Collect(ch chan<- prometheus.Metric) + Add(metrics []telegraf.Metric) error } -func (p *PrometheusClient) Start() error { +type PrometheusClient struct { + Listen string `toml:"listen"` + MetricVersion int `toml:"metric_version"` + BasicUsername string `toml:"basic_username"` + BasicPassword string `toml:"basic_password"` + IPRange []string `toml:"ip_range"` + ExpirationInterval internal.Duration `toml:"expiration_interval"` + Path string `toml:"path"` + CollectorsExclude []string `toml:"collectors_exclude"` + StringAsLabel bool `toml:"string_as_label"` + ExportTimestamp bool `toml:"export_timestamp"` + tlsint.ServerConfig + + Log telegraf.Logger `toml:"-"` + + server *http.Server + url *url.URL + collector Collector + wg sync.WaitGroup +} + +func (p *PrometheusClient) Description() string { + return "Configuration for the Prometheus client to spawn" +} + +func (p *PrometheusClient) SampleConfig() string { + return sampleConfig +} + +func (p *PrometheusClient) Init() error { defaultCollectors := map[string]bool{ "gocollector": true, "process": true, @@ -124,363 +117,146 @@ func (p *PrometheusClient) Start() error { } registry := prometheus.NewRegistry() - for collector, _ := range defaultCollectors { + for collector := range defaultCollectors { switch collector { case "gocollector": registry.Register(prometheus.NewGoCollector()) case "process": - registry.Register(prometheus.NewProcessCollector(os.Getpid(), "")) + registry.Register(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{})) default: return fmt.Errorf("unrecognized collector %s", collector) } } - registry.Register(p) - - if p.Listen == "" { - p.Listen = "localhost:9273" + switch p.MetricVersion { + default: + fallthrough + case 1: + p.Log.Warnf("Use of deprecated configuration: metric_version = 1; please update to metric_version = 2") + p.collector = v1.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.Log) + err := registry.Register(p.collector) + if err != nil { + return err + } + case 2: + p.collector = v2.NewCollector(p.ExpirationInterval.Duration, p.StringAsLabel, p.ExportTimestamp) + err := registry.Register(p.collector) + if err != nil { + return err + } } - if p.Path == "" { - p.Path = "/metrics" + ipRange := make([]*net.IPNet, 0, len(p.IPRange)) + for _, cidr := range p.IPRange { + _, ipNet, err := net.ParseCIDR(cidr) + if err != nil { + return fmt.Errorf("error parsing ip_range: %v", err) + } + + ipRange = append(ipRange, ipNet) } + authHandler := internal.AuthHandler(p.BasicUsername, p.BasicPassword, "prometheus", onAuthError) + rangeHandler := internal.IPRangeHandler(ipRange, onError) + promHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}) + mux := http.NewServeMux() - mux.Handle(p.Path, p.basicAuth(promhttp.HandlerFor( - registry, promhttp.HandlerOpts{ErrorHandling: promhttp.ContinueOnError}))) + if p.Path == "" { + p.Path = "/" + } + mux.Handle(p.Path, authHandler(rangeHandler(promHandler))) - p.server = &http.Server{ - Addr: p.Listen, - Handler: mux, + tlsConfig, err := p.TLSConfig() + if err != nil { + return err } + p.server = &http.Server{ + Addr: p.Listen, + Handler: mux, + TLSConfig: tlsConfig, + } + + return nil +} + +func (p *PrometheusClient) listen() (net.Listener, error) { + if p.server.TLSConfig != nil { + return tls.Listen("tcp", p.Listen, p.server.TLSConfig) + } else { + return net.Listen("tcp", p.Listen) + } +} + +func (p *PrometheusClient) Connect() error { + listener, err := p.listen() + if err != nil { + return err + } + + scheme := "http" + if p.server.TLSConfig != nil { + scheme = "https" + } + + p.url = &url.URL{ + Scheme: scheme, + Host: listener.Addr().String(), + Path: p.Path, + } + + p.Log.Infof("Listening on %s", p.URL()) + + p.wg.Add(1) go func() { - var err error - if p.TLSCert != "" && p.TLSKey != "" { - err = p.server.ListenAndServeTLS(p.TLSCert, p.TLSKey) - } else { - err = p.server.ListenAndServe() - } + defer p.wg.Done() + err := p.server.Serve(listener) if err != nil && err != http.ErrServerClosed { - log.Printf("E! Error creating prometheus metric endpoint, err: %s\n", - err.Error()) + p.Log.Errorf("Server error: %v", err) } }() return nil } -func (p *PrometheusClient) Stop() { - // plugin gets cleaned up in Close() already. +func onAuthError(_ http.ResponseWriter) { } -func (p *PrometheusClient) Connect() error { - // This service output does not need to make any further connections - return nil +func onError(rw http.ResponseWriter, code int) { + http.Error(rw, http.StatusText(code), code) +} + +// Address returns the address the plugin is listening on. If not listening +// an empty string is returned. +func (p *PrometheusClient) URL() string { + if p.url != nil { + return p.url.String() + } + return "" } func (p *PrometheusClient) Close() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() + err := p.server.Shutdown(ctx) - prometheus.Unregister(p) + p.wg.Wait() + p.url = nil + prometheus.Unregister(p.collector) return err } -func (p *PrometheusClient) SampleConfig() string { - return sampleConfig -} - -func (p *PrometheusClient) Description() string { - return "Configuration for the Prometheus client to spawn" -} - -// Implements prometheus.Collector -func (p *PrometheusClient) Describe(ch chan<- *prometheus.Desc) { - prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(ch) -} - -// Expire removes Samples that have expired. -func (p *PrometheusClient) Expire() { - now := p.now() - for name, family := range p.fam { - for key, sample := range family.Samples { - if p.ExpirationInterval.Duration != 0 && now.After(sample.Expiration) { - for k, _ := range sample.Labels { - family.LabelSet[k]-- - } - delete(family.Samples, key) - - if len(family.Samples) == 0 { - delete(p.fam, name) - } - } - } - } -} - -// Collect implements prometheus.Collector -func (p *PrometheusClient) Collect(ch chan<- prometheus.Metric) { - p.Lock() - defer p.Unlock() - - p.Expire() - - for name, family := range p.fam { - // Get list of all labels on MetricFamily - var labelNames []string - for k, v := range family.LabelSet { - if v > 0 { - labelNames = append(labelNames, k) - } - } - desc := prometheus.NewDesc(name, "Telegraf collected metric", labelNames, nil) - - for _, sample := range family.Samples { - // Get labels for this sample; unset labels will be set to the - // empty string - var labels []string - for _, label := range labelNames { - v := sample.Labels[label] - labels = append(labels, v) - } - - var metric prometheus.Metric - var err error - switch family.TelegrafValueType { - case telegraf.Summary: - metric, err = prometheus.NewConstSummary(desc, sample.Count, sample.Sum, sample.SummaryValue, labels...) - case telegraf.Histogram: - metric, err = prometheus.NewConstHistogram(desc, sample.Count, sample.Sum, sample.HistogramValue, labels...) - default: - metric, err = prometheus.NewConstMetric(desc, getPromValueType(family.TelegrafValueType), sample.Value, labels...) - } - if err != nil { - log.Printf("E! Error creating prometheus metric, "+ - "key: %s, labels: %v,\nerr: %s\n", - name, labels, err.Error()) - } - - ch <- metric - } - } -} - -func sanitize(value string) string { - return invalidNameCharRE.ReplaceAllString(value, "_") -} - -func getPromValueType(tt telegraf.ValueType) prometheus.ValueType { - switch tt { - case telegraf.Counter: - return prometheus.CounterValue - case telegraf.Gauge: - return prometheus.GaugeValue - default: - return prometheus.UntypedValue - } -} - -// CreateSampleID creates a SampleID based on the tags of a telegraf.Metric. -func CreateSampleID(tags map[string]string) SampleID { - pairs := make([]string, 0, len(tags)) - for k, v := range tags { - pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) - } - sort.Strings(pairs) - return SampleID(strings.Join(pairs, ",")) -} - -func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) { - - for k, _ := range sample.Labels { - fam.LabelSet[k]++ - } - - fam.Samples[sampleID] = sample -} - -func (p *PrometheusClient) addMetricFamily(point telegraf.Metric, sample *Sample, mname string, sampleID SampleID) { - var fam *MetricFamily - var ok bool - if fam, ok = p.fam[mname]; !ok { - fam = &MetricFamily{ - Samples: make(map[SampleID]*Sample), - TelegrafValueType: point.Type(), - LabelSet: make(map[string]int), - } - p.fam[mname] = fam - } - - addSample(fam, sample, sampleID) -} - func (p *PrometheusClient) Write(metrics []telegraf.Metric) error { - p.Lock() - defer p.Unlock() - - now := p.now() - - for _, point := range metrics { - tags := point.Tags() - sampleID := CreateSampleID(tags) - - labels := make(map[string]string) - for k, v := range tags { - labels[sanitize(k)] = v - } - - // Prometheus doesn't have a string value type, so convert string - // fields to labels if enabled. - if p.StringAsLabel { - for fn, fv := range point.Fields() { - switch fv := fv.(type) { - case string: - labels[sanitize(fn)] = fv - } - } - } - - switch point.Type() { - case telegraf.Summary: - var mname string - var sum float64 - var count uint64 - summaryvalue := make(map[float64]float64) - for fn, fv := range point.Fields() { - var value float64 - switch fv := fv.(type) { - case int64: - value = float64(fv) - case uint64: - value = float64(fv) - case float64: - value = fv - default: - continue - } - - switch fn { - case "sum": - sum = value - case "count": - count = uint64(value) - default: - limit, err := strconv.ParseFloat(fn, 64) - if err == nil { - summaryvalue[limit] = value - } - } - } - sample := &Sample{ - Labels: labels, - SummaryValue: summaryvalue, - Count: count, - Sum: sum, - Expiration: now.Add(p.ExpirationInterval.Duration), - } - mname = sanitize(point.Name()) - - p.addMetricFamily(point, sample, mname, sampleID) - - case telegraf.Histogram: - var mname string - var sum float64 - var count uint64 - histogramvalue := make(map[float64]uint64) - for fn, fv := range point.Fields() { - var value float64 - switch fv := fv.(type) { - case int64: - value = float64(fv) - case uint64: - value = float64(fv) - case float64: - value = fv - default: - continue - } - - switch fn { - case "sum": - sum = value - case "count": - count = uint64(value) - default: - limit, err := strconv.ParseFloat(fn, 64) - if err == nil { - histogramvalue[limit] = uint64(value) - } - } - } - sample := &Sample{ - Labels: labels, - HistogramValue: histogramvalue, - Count: count, - Sum: sum, - Expiration: now.Add(p.ExpirationInterval.Duration), - } - mname = sanitize(point.Name()) - - p.addMetricFamily(point, sample, mname, sampleID) - - default: - for fn, fv := range point.Fields() { - // Ignore string and bool fields. - var value float64 - switch fv := fv.(type) { - case int64: - value = float64(fv) - case uint64: - value = float64(fv) - case float64: - value = fv - default: - continue - } - - sample := &Sample{ - Labels: labels, - Value: value, - Expiration: now.Add(p.ExpirationInterval.Duration), - } - - // Special handling of value field; supports passthrough from - // the prometheus input. - var mname string - switch point.Type() { - case telegraf.Counter: - if fn == "counter" { - mname = sanitize(point.Name()) - } - case telegraf.Gauge: - if fn == "gauge" { - mname = sanitize(point.Name()) - } - } - if mname == "" { - if fn == "value" { - mname = sanitize(point.Name()) - } else { - mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn)) - } - } - - p.addMetricFamily(point, sample, mname, sampleID) - - } - } - } - return nil + return p.collector.Add(metrics) } func init() { outputs.Add("prometheus_client", func() telegraf.Output { return &PrometheusClient{ - ExpirationInterval: internal.Duration{Duration: time.Second * 60}, + Listen: defaultListen, + Path: defaultPath, + ExpirationInterval: defaultExpirationInterval, StringAsLabel: true, - fam: make(map[string]*MetricFamily), - now: time.Now, } }) } diff --git a/plugins/outputs/prometheus_client/prometheus_client_test.go b/plugins/outputs/prometheus_client/prometheus_client_test.go deleted file mode 100644 index bd2398a23..000000000 --- a/plugins/outputs/prometheus_client/prometheus_client_test.go +++ /dev/null @@ -1,693 +0,0 @@ -package prometheus_client - -import ( - "testing" - "time" - - "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/internal" - "github.com/influxdata/telegraf/metric" - prometheus_input "github.com/influxdata/telegraf/plugins/inputs/prometheus" - "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/require" -) - -func setUnixTime(client *PrometheusClient, sec int64) { - client.now = func() time.Time { - return time.Unix(sec, 0) - } -} - -// NewClient initializes a PrometheusClient. -func NewClient() *PrometheusClient { - return &PrometheusClient{ - ExpirationInterval: internal.Duration{Duration: time.Second * 60}, - StringAsLabel: true, - fam: make(map[string]*MetricFamily), - now: time.Now, - } -} - -func TestWrite_Basic(t *testing.T) { - now := time.Now() - pt1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 0.0}, - now) - var metrics = []telegraf.Metric{ - pt1, - } - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, telegraf.Untyped, fam.TelegrafValueType) - require.Equal(t, map[string]int{}, fam.LabelSet) - - sample, ok := fam.Samples[CreateSampleID(pt1.Tags())] - require.True(t, ok) - - require.Equal(t, 0.0, sample.Value) - require.True(t, now.Before(sample.Expiration)) -} - -func TestWrite_IntField(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 42}, - time.Now()) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - for _, v := range fam.Samples { - require.Equal(t, 42.0, v.Value) - } - -} - -func TestWrite_FieldNotValue(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"howdy": 0.0}, - time.Now()) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo_howdy"] - require.True(t, ok) - for _, v := range fam.Samples { - require.Equal(t, 0.0, v.Value) - } -} - -func TestWrite_SkipNonNumberField(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": "howdy"}, - time.Now()) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - _, ok := client.fam["foo"] - require.False(t, ok) -} - -func TestWrite_Counters(t *testing.T) { - type args struct { - measurement string - tags map[string]string - fields map[string]interface{} - valueType telegraf.ValueType - } - var tests = []struct { - name string - args args - err error - metricName string - valueType telegraf.ValueType - }{ - { - name: "field named value is not added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"value": 42}, - valueType: telegraf.Counter, - }, - metricName: "foo", - valueType: telegraf.Counter, - }, - { - name: "field named counter is not added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"counter": 42}, - valueType: telegraf.Counter, - }, - metricName: "foo", - valueType: telegraf.Counter, - }, - { - name: "field with any other name is added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"other": 42}, - valueType: telegraf.Counter, - }, - metricName: "foo_other", - valueType: telegraf.Counter, - }, - { - name: "uint64 fields are output", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"value": uint64(42)}, - valueType: telegraf.Counter, - }, - metricName: "foo", - valueType: telegraf.Counter, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m, err := metric.New( - tt.args.measurement, - tt.args.tags, - tt.args.fields, - time.Now(), - tt.args.valueType, - ) - client := NewClient() - err = client.Write([]telegraf.Metric{m}) - require.Equal(t, tt.err, err) - - fam, ok := client.fam[tt.metricName] - require.True(t, ok) - require.Equal(t, tt.valueType, fam.TelegrafValueType) - }) - } -} - -func TestWrite_Sanitize(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo.bar", - map[string]string{"tag-with-dash": "localhost.local"}, - map[string]interface{}{"field-with-dash": 42}, - time.Now(), - telegraf.Counter) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo_bar_field_with_dash"] - require.True(t, ok) - require.Equal(t, map[string]int{"tag_with_dash": 1}, fam.LabelSet) - - sample1, ok := fam.Samples[CreateSampleID(p1.Tags())] - require.True(t, ok) - - require.Equal(t, map[string]string{ - "tag_with_dash": "localhost.local"}, sample1.Labels) -} - -func TestWrite_Gauge(t *testing.T) { - type args struct { - measurement string - tags map[string]string - fields map[string]interface{} - valueType telegraf.ValueType - } - var tests = []struct { - name string - args args - err error - metricName string - valueType telegraf.ValueType - }{ - { - name: "field named value is not added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"value": 42}, - valueType: telegraf.Gauge, - }, - metricName: "foo", - valueType: telegraf.Gauge, - }, - { - name: "field named gauge is not added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"gauge": 42}, - valueType: telegraf.Gauge, - }, - metricName: "foo", - valueType: telegraf.Gauge, - }, - { - name: "field with any other name is added to metric name", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"other": 42}, - valueType: telegraf.Gauge, - }, - metricName: "foo_other", - valueType: telegraf.Gauge, - }, - { - name: "uint64 fields are output", - args: args{ - measurement: "foo", - fields: map[string]interface{}{"value": uint64(42)}, - valueType: telegraf.Counter, - }, - metricName: "foo", - valueType: telegraf.Counter, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - m, err := metric.New( - tt.args.measurement, - tt.args.tags, - tt.args.fields, - time.Now(), - tt.args.valueType, - ) - client := NewClient() - err = client.Write([]telegraf.Metric{m}) - require.Equal(t, tt.err, err) - - fam, ok := client.fam[tt.metricName] - require.True(t, ok) - require.Equal(t, tt.valueType, fam.TelegrafValueType) - - }) - } -} - -func TestWrite_Summary(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"sum": 84, "count": 42, "0": 2, "0.5": 3, "1": 4}, - time.Now(), - telegraf.Summary) - - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 1, len(fam.Samples)) - - sample1, ok := fam.Samples[CreateSampleID(p1.Tags())] - require.True(t, ok) - - require.Equal(t, 84.0, sample1.Sum) - require.Equal(t, uint64(42), sample1.Count) - require.Equal(t, 3, len(sample1.SummaryValue)) -} - -func TestWrite_Histogram(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"sum": 84, "count": 42, "0": 2, "0.5": 3, "1": 4}, - time.Now(), - telegraf.Histogram) - - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 1, len(fam.Samples)) - - sample1, ok := fam.Samples[CreateSampleID(p1.Tags())] - require.True(t, ok) - - require.Equal(t, 84.0, sample1.Sum) - require.Equal(t, uint64(42), sample1.Count) - require.Equal(t, 3, len(sample1.HistogramValue)) -} - -func TestWrite_MixedValueType(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0}, - now, - telegraf.Counter) - p2, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 2.0}, - now, - telegraf.Gauge) - var metrics = []telegraf.Metric{p1, p2} - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 1, len(fam.Samples)) -} - -func TestWrite_MixedValueTypeUpgrade(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - map[string]string{"a": "x"}, - map[string]interface{}{"value": 1.0}, - now, - telegraf.Untyped) - p2, err := metric.New( - "foo", - map[string]string{"a": "y"}, - map[string]interface{}{"value": 2.0}, - now, - telegraf.Gauge) - var metrics = []telegraf.Metric{p1, p2} - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 2, len(fam.Samples)) -} - -func TestWrite_MixedValueTypeDowngrade(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - map[string]string{"a": "x"}, - map[string]interface{}{"value": 1.0}, - now, - telegraf.Gauge) - p2, err := metric.New( - "foo", - map[string]string{"a": "y"}, - map[string]interface{}{"value": 2.0}, - now, - telegraf.Untyped) - var metrics = []telegraf.Metric{p1, p2} - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 2, len(fam.Samples)) -} - -func TestWrite_Tags(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0}, - now) - p2, err := metric.New( - "foo", - map[string]string{"host": "localhost"}, - map[string]interface{}{"value": 2.0}, - now) - var metrics = []telegraf.Metric{p1, p2} - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, telegraf.Untyped, fam.TelegrafValueType) - - require.Equal(t, map[string]int{"host": 1}, fam.LabelSet) - - sample1, ok := fam.Samples[CreateSampleID(p1.Tags())] - require.True(t, ok) - - require.Equal(t, 1.0, sample1.Value) - require.True(t, now.Before(sample1.Expiration)) - - sample2, ok := fam.Samples[CreateSampleID(p2.Tags())] - require.True(t, ok) - - require.Equal(t, 2.0, sample2.Value) - require.True(t, now.Before(sample2.Expiration)) -} - -func TestWrite_StringFields(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0, "status": "good"}, - now, - telegraf.Counter) - p2, err := metric.New( - "bar", - make(map[string]string), - map[string]interface{}{"status": "needs numeric field"}, - now, - telegraf.Gauge) - var metrics = []telegraf.Metric{p1, p2} - - client := NewClient() - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 1, fam.LabelSet["status"]) - - fam, ok = client.fam["bar"] - require.False(t, ok) -} - -func TestDoNotWrite_StringFields(t *testing.T) { - now := time.Now() - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0, "status": "good"}, - now, - telegraf.Counter) - p2, err := metric.New( - "bar", - make(map[string]string), - map[string]interface{}{"status": "needs numeric field"}, - now, - telegraf.Gauge) - var metrics = []telegraf.Metric{p1, p2} - - client := &PrometheusClient{ - ExpirationInterval: internal.Duration{Duration: time.Second * 60}, - StringAsLabel: false, - fam: make(map[string]*MetricFamily), - now: time.Now, - } - - err = client.Write(metrics) - require.NoError(t, err) - - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 0, fam.LabelSet["status"]) - - fam, ok = client.fam["bar"] - require.False(t, ok) -} - -func TestExpire(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0}, - time.Now()) - setUnixTime(client, 0) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - p2, err := metric.New( - "bar", - make(map[string]string), - map[string]interface{}{"value": 2.0}, - time.Now()) - setUnixTime(client, 1) - err = client.Write([]telegraf.Metric{p2}) - - setUnixTime(client, 61) - require.Equal(t, 2, len(client.fam)) - client.Expire() - require.Equal(t, 1, len(client.fam)) -} - -func TestExpire_TagsNoDecrement(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 1.0}, - time.Now()) - setUnixTime(client, 0) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - p2, err := metric.New( - "foo", - map[string]string{"host": "localhost"}, - map[string]interface{}{"value": 2.0}, - time.Now()) - setUnixTime(client, 1) - err = client.Write([]telegraf.Metric{p2}) - - setUnixTime(client, 61) - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 2, len(fam.Samples)) - client.Expire() - require.Equal(t, 1, len(fam.Samples)) - - require.Equal(t, map[string]int{"host": 1}, fam.LabelSet) -} - -func TestExpire_TagsWithDecrement(t *testing.T) { - client := NewClient() - - p1, err := metric.New( - "foo", - map[string]string{"host": "localhost"}, - map[string]interface{}{"value": 1.0}, - time.Now()) - setUnixTime(client, 0) - err = client.Write([]telegraf.Metric{p1}) - require.NoError(t, err) - - p2, err := metric.New( - "foo", - make(map[string]string), - map[string]interface{}{"value": 2.0}, - time.Now()) - setUnixTime(client, 1) - err = client.Write([]telegraf.Metric{p2}) - - setUnixTime(client, 61) - fam, ok := client.fam["foo"] - require.True(t, ok) - require.Equal(t, 2, len(fam.Samples)) - client.Expire() - require.Equal(t, 1, len(fam.Samples)) - - require.Equal(t, map[string]int{"host": 0}, fam.LabelSet) -} - -var pTesting *PrometheusClient - -func TestPrometheusWritePointEmptyTag(t *testing.T) { - if testing.Short() { - t.Skip("Skipping integration test in short mode") - } - - pClient, p, err := setupPrometheus() - require.NoError(t, err) - defer pClient.Stop() - - now := time.Now() - tags := make(map[string]string) - pt1, _ := metric.New( - "test_point_1", - tags, - map[string]interface{}{"value": 0.0}, - now) - pt2, _ := metric.New( - "test_point_2", - tags, - map[string]interface{}{"value": 1.0}, - now) - var metrics = []telegraf.Metric{ - pt1, - pt2, - } - require.NoError(t, pClient.Write(metrics)) - - expected := []struct { - name string - value float64 - tags map[string]string - }{ - {"test_point_1", 0.0, tags}, - {"test_point_2", 1.0, tags}, - } - - var acc testutil.Accumulator - - require.NoError(t, p.Gather(&acc)) - for _, e := range expected { - acc.AssertContainsFields(t, e.name, - map[string]interface{}{"value": e.value}) - } - - tags = make(map[string]string) - tags["testtag"] = "testvalue" - pt3, _ := metric.New( - "test_point_3", - tags, - map[string]interface{}{"value": 0.0}, - now) - pt4, _ := metric.New( - "test_point_4", - tags, - map[string]interface{}{"value": 1.0}, - now) - metrics = []telegraf.Metric{ - pt3, - pt4, - } - require.NoError(t, pClient.Write(metrics)) - - expected2 := []struct { - name string - value float64 - }{ - {"test_point_3", 0.0}, - {"test_point_4", 1.0}, - } - - require.NoError(t, p.Gather(&acc)) - for _, e := range expected2 { - acc.AssertContainsFields(t, e.name, - map[string]interface{}{"value": e.value}) - } -} - -func setupPrometheus() (*PrometheusClient, *prometheus_input.Prometheus, error) { - if pTesting == nil { - pTesting = NewClient() - pTesting.Listen = "localhost:9127" - pTesting.Path = "/metrics" - err := pTesting.Start() - if err != nil { - return nil, nil, err - } - } else { - pTesting.fam = make(map[string]*MetricFamily) - } - - time.Sleep(time.Millisecond * 200) - - p := &prometheus_input.Prometheus{ - URLs: []string{"http://localhost:9127/metrics"}, - } - - return pTesting, p, nil -} diff --git a/plugins/outputs/prometheus_client/prometheus_client_v1_test.go b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go new file mode 100644 index 000000000..adf18c9f0 --- /dev/null +++ b/plugins/outputs/prometheus_client/prometheus_client_v1_test.go @@ -0,0 +1,402 @@ +package prometheus + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + inputs "github.com/influxdata/telegraf/plugins/inputs/prometheus" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestMetricVersion1(t *testing.T) { + Logger := testutil.Logger{Name: "outputs.prometheus_client"} + tests := []struct { + name string + output *PrometheusClient + metrics []telegraf.Metric + expected []byte + }{ + { + name: "simple", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "prometheus untyped", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "prometheus counter", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "counter": 42.0, + }, + time.Unix(0, 0), + telegraf.Counter, + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle counter +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "replace characters when using string as label", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + StringAsLabel: true, + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{}, + map[string]interface{}{ + "host:name": "example.org", + "counter": 42.0, + }, + time.Unix(0, 0), + telegraf.Counter, + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle counter +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "prometheus gauge", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu_time_idle", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "gauge": 42.0, + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle gauge +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "prometheus histogram", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "http_request_duration_seconds", + map[string]string{}, + map[string]interface{}{ + "sum": 53423, + "0.05": 24054, + "0.1": 33444, + "0.2": 100392, + "0.5": 129389, + "1": 133988, + "+Inf": 144320, + "count": 144320, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + }, + expected: []byte(` +# HELP http_request_duration_seconds Telegraf collected metric +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 24054 +http_request_duration_seconds_bucket{le="0.1"} 33444 +http_request_duration_seconds_bucket{le="0.2"} 100392 +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="1"} 133988 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_count 144320 +`), + }, + { + name: "prometheus summary", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 1, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "rpc_duration_seconds", + map[string]string{}, + map[string]interface{}{ + "0.01": 3102, + "0.05": 3272, + "0.5": 4773, + "0.9": 9001, + "0.99": 76656, + "count": 2693, + "sum": 17560473, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + }, + expected: []byte(` +# HELP rpc_duration_seconds Telegraf collected metric +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 3102 +rpc_duration_seconds{quantile="0.05"} 3272 +rpc_duration_seconds{quantile="0.5"} 4773 +rpc_duration_seconds{quantile="0.9"} 9001 +rpc_duration_seconds{quantile="0.99"} 76656 +rpc_duration_seconds_sum 1.7560473e+07 +rpc_duration_seconds_count 2693 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.output.Init() + require.NoError(t, err) + + err = tt.output.Connect() + require.NoError(t, err) + + defer func() { + err := tt.output.Close() + require.NoError(t, err) + }() + + err = tt.output.Write(tt.metrics) + require.NoError(t, err) + + resp, err := http.Get(tt.output.URL()) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(body))) + }) + } +} + +func TestRoundTripMetricVersion1(t *testing.T) { + Logger := testutil.Logger{Name: "outputs.prometheus_client"} + tests := []struct { + name string + data []byte + }{ + { + name: "untyped", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "counter", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle counter +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "gauge", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle gauge +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "multi", + data: []byte(` +# HELP cpu_time_guest Telegraf collected metric +# TYPE cpu_time_guest gauge +cpu_time_guest{host="one.example.org"} 42 +cpu_time_guest{host="two.example.org"} 42 +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle gauge +cpu_time_idle{host="one.example.org"} 42 +cpu_time_idle{host="two.example.org"} 42 +`), + }, + { + name: "histogram", + data: []byte(` +# HELP http_request_duration_seconds Telegraf collected metric +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 24054 +http_request_duration_seconds_bucket{le="0.1"} 33444 +http_request_duration_seconds_bucket{le="0.2"} 100392 +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="1"} 133988 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_count 144320 +`), + }, + { + name: "summary", + data: []byte(` +# HELP rpc_duration_seconds Telegraf collected metric +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 3102 +rpc_duration_seconds{quantile="0.05"} 3272 +rpc_duration_seconds{quantile="0.5"} 4773 +rpc_duration_seconds{quantile="0.9"} 9001 +rpc_duration_seconds{quantile="0.99"} 76656 +rpc_duration_seconds_sum 1.7560473e+07 +rpc_duration_seconds_count 2693 +`), + }, + } + + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + url := fmt.Sprintf("http://%s", ts.Listener.Addr()) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write(tt.data) + }) + + input := &inputs.Prometheus{ + URLs: []string{url}, + URLTag: "", + MetricVersion: 1, + } + var acc testutil.Accumulator + err := input.Start(&acc) + require.NoError(t, err) + err = input.Gather(&acc) + require.NoError(t, err) + input.Stop() + + metrics := acc.GetTelegrafMetrics() + + output := &PrometheusClient{ + Listen: "127.0.0.1:0", + Path: defaultPath, + MetricVersion: 1, + Log: Logger, + CollectorsExclude: []string{"gocollector", "process"}, + } + err = output.Init() + require.NoError(t, err) + err = output.Connect() + require.NoError(t, err) + defer func() { + err = output.Close() + require.NoError(t, err) + }() + err = output.Write(metrics) + require.NoError(t, err) + + resp, err := http.Get(output.URL()) + require.NoError(t, err) + + actual, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.data)), + strings.TrimSpace(string(actual))) + }) + } +} diff --git a/plugins/outputs/prometheus_client/prometheus_client_v2_test.go b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go new file mode 100644 index 000000000..3404ab2ed --- /dev/null +++ b/plugins/outputs/prometheus_client/prometheus_client_v2_test.go @@ -0,0 +1,404 @@ +package prometheus + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + inputs "github.com/influxdata/telegraf/plugins/inputs/prometheus" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestMetricVersion2(t *testing.T) { + Logger := testutil.Logger{Name: "outputs.prometheus_client"} + tests := []struct { + name string + output *PrometheusClient + metrics []telegraf.Metric + expected []byte + }{ + { + name: "untyped telegraf metric", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "when export timestamp is true timestamp is present in the metric", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + ExportTimestamp: true, + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 0 +`), + }, + { + name: "strings as labels", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + StringAsLabel: true, + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "host": "example.org", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "when strings as labels is false string fields are discarded", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + StringAsLabel: false, + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "host": "example.org", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle 42 +`), + }, + { + name: "untype prometheus metric", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "cpu_time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "telegraf histogram", + output: &PrometheusClient{ + Listen: ":0", + MetricVersion: 2, + CollectorsExclude: []string{"gocollector", "process"}, + Path: "/metrics", + Log: Logger, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + }, + map[string]interface{}{ + "usage_idle_sum": 2000.0, + "usage_idle_count": 20.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + "le": "0.0", + }, + map[string]interface{}{ + "usage_idle_bucket": 0.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + "le": "50.0", + }, + map[string]interface{}{ + "usage_idle_bucket": 7.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + "le": "100.0", + }, + map[string]interface{}{ + "usage_idle_bucket": 20.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + "le": "+Inf", + }, + map[string]interface{}{ + "usage_idle_bucket": 20.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + }, + expected: []byte(` +# HELP cpu_usage_idle Telegraf collected metric +# TYPE cpu_usage_idle histogram +cpu_usage_idle_bucket{cpu="cpu1",le="0"} 0 +cpu_usage_idle_bucket{cpu="cpu1",le="50"} 7 +cpu_usage_idle_bucket{cpu="cpu1",le="100"} 20 +cpu_usage_idle_bucket{cpu="cpu1",le="+Inf"} 20 +cpu_usage_idle_sum{cpu="cpu1"} 2000 +cpu_usage_idle_count{cpu="cpu1"} 20 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.output.Init() + require.NoError(t, err) + + err = tt.output.Connect() + require.NoError(t, err) + + defer func() { + err := tt.output.Close() + require.NoError(t, err) + }() + + err = tt.output.Write(tt.metrics) + require.NoError(t, err) + + resp, err := http.Get(tt.output.URL()) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(body))) + }) + } +} + +func TestRoundTripMetricVersion2(t *testing.T) { + Logger := testutil.Logger{Name: "outputs.prometheus_client"} + tests := []struct { + name string + data []byte + }{ + { + name: "untyped", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "counter", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle counter +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "gauge", + data: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle gauge +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "multi", + data: []byte(` +# HELP cpu_time_guest Telegraf collected metric +# TYPE cpu_time_guest gauge +cpu_time_guest{host="one.example.org"} 42 +cpu_time_guest{host="two.example.org"} 42 +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle gauge +cpu_time_idle{host="one.example.org"} 42 +cpu_time_idle{host="two.example.org"} 42 +`), + }, + { + name: "histogram", + data: []byte(` +# HELP http_request_duration_seconds Telegraf collected metric +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 24054 +http_request_duration_seconds_bucket{le="0.1"} 33444 +http_request_duration_seconds_bucket{le="0.2"} 100392 +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="1"} 133988 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_count 144320 +`), + }, + { + name: "summary", + data: []byte(` +# HELP rpc_duration_seconds Telegraf collected metric +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 3102 +rpc_duration_seconds{quantile="0.05"} 3272 +rpc_duration_seconds{quantile="0.5"} 4773 +rpc_duration_seconds{quantile="0.9"} 9001 +rpc_duration_seconds{quantile="0.99"} 76656 +rpc_duration_seconds_sum 1.7560473e+07 +rpc_duration_seconds_count 2693 +`), + }, + } + + ts := httptest.NewServer(http.NotFoundHandler()) + defer ts.Close() + + url := fmt.Sprintf("http://%s", ts.Listener.Addr()) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write(tt.data) + }) + + input := &inputs.Prometheus{ + URLs: []string{url}, + URLTag: "", + MetricVersion: 2, + } + var acc testutil.Accumulator + err := input.Start(&acc) + require.NoError(t, err) + err = input.Gather(&acc) + require.NoError(t, err) + input.Stop() + + metrics := acc.GetTelegrafMetrics() + + output := &PrometheusClient{ + Listen: "127.0.0.1:0", + Path: defaultPath, + MetricVersion: 2, + Log: Logger, + CollectorsExclude: []string{"gocollector", "process"}, + } + err = output.Init() + require.NoError(t, err) + err = output.Connect() + require.NoError(t, err) + defer func() { + err = output.Close() + require.NoError(t, err) + }() + err = output.Write(metrics) + require.NoError(t, err) + + resp, err := http.Get(output.URL()) + require.NoError(t, err) + + actual, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.data)), + strings.TrimSpace(string(actual))) + }) + } +} diff --git a/plugins/outputs/prometheus_client/v1/collector.go b/plugins/outputs/prometheus_client/v1/collector.go new file mode 100644 index 000000000..7932bbc59 --- /dev/null +++ b/plugins/outputs/prometheus_client/v1/collector.go @@ -0,0 +1,392 @@ +package v1 + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/influxdata/telegraf" + serializer "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "github.com/prometheus/client_golang/prometheus" +) + +var ( + invalidNameCharRE = regexp.MustCompile(`[^a-zA-Z0-9_:]`) + validNameCharRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*`) +) + +// SampleID uniquely identifies a Sample +type SampleID string + +// Sample represents the current value of a series. +type Sample struct { + // Labels are the Prometheus labels. + Labels map[string]string + // Value is the value in the Prometheus output. Only one of these will populated. + Value float64 + HistogramValue map[float64]uint64 + SummaryValue map[float64]float64 + // Histograms and Summaries need a count and a sum + Count uint64 + Sum float64 + // Metric timestamp + Timestamp time.Time + // Expiration is the deadline that this Sample is valid until. + Expiration time.Time +} + +// MetricFamily contains the data required to build valid prometheus Metrics. +type MetricFamily struct { + // Samples are the Sample belonging to this MetricFamily. + Samples map[SampleID]*Sample + // Need the telegraf ValueType because there isn't a Prometheus ValueType + // representing Histogram or Summary + TelegrafValueType telegraf.ValueType + // LabelSet is the label counts for all Samples. + LabelSet map[string]int +} + +type Collector struct { + ExpirationInterval time.Duration + StringAsLabel bool + ExportTimestamp bool + Log telegraf.Logger + + sync.Mutex + fam map[string]*MetricFamily +} + +func NewCollector(expire time.Duration, stringsAsLabel bool, logger telegraf.Logger) *Collector { + return &Collector{ + ExpirationInterval: expire, + StringAsLabel: stringsAsLabel, + Log: logger, + fam: make(map[string]*MetricFamily), + } +} + +func (c *Collector) Describe(ch chan<- *prometheus.Desc) { + prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(ch) +} + +func (c *Collector) Collect(ch chan<- prometheus.Metric) { + c.Lock() + defer c.Unlock() + + c.Expire(time.Now(), c.ExpirationInterval) + + for name, family := range c.fam { + // Get list of all labels on MetricFamily + var labelNames []string + for k, v := range family.LabelSet { + if v > 0 { + labelNames = append(labelNames, k) + } + } + desc := prometheus.NewDesc(name, "Telegraf collected metric", labelNames, nil) + + for _, sample := range family.Samples { + // Get labels for this sample; unset labels will be set to the + // empty string + var labels []string + for _, label := range labelNames { + v := sample.Labels[label] + labels = append(labels, v) + } + + var metric prometheus.Metric + var err error + switch family.TelegrafValueType { + case telegraf.Summary: + metric, err = prometheus.NewConstSummary(desc, sample.Count, sample.Sum, sample.SummaryValue, labels...) + case telegraf.Histogram: + metric, err = prometheus.NewConstHistogram(desc, sample.Count, sample.Sum, sample.HistogramValue, labels...) + default: + metric, err = prometheus.NewConstMetric(desc, getPromValueType(family.TelegrafValueType), sample.Value, labels...) + } + if err != nil { + c.Log.Errorf("Error creating prometheus metric: "+ + "key: %s, labels: %v, err: %v", + name, labels, err) + continue + } + + if c.ExportTimestamp { + metric = prometheus.NewMetricWithTimestamp(sample.Timestamp, metric) + } + ch <- metric + } + } +} + +func sanitize(value string) string { + return invalidNameCharRE.ReplaceAllString(value, "_") +} + +func isValidTagName(tag string) bool { + return validNameCharRE.MatchString(tag) +} + +func getPromValueType(tt telegraf.ValueType) prometheus.ValueType { + switch tt { + case telegraf.Counter: + return prometheus.CounterValue + case telegraf.Gauge: + return prometheus.GaugeValue + default: + return prometheus.UntypedValue + } +} + +// CreateSampleID creates a SampleID based on the tags of a telegraf.Metric. +func CreateSampleID(tags map[string]string) SampleID { + pairs := make([]string, 0, len(tags)) + for k, v := range tags { + pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(pairs) + return SampleID(strings.Join(pairs, ",")) +} + +func addSample(fam *MetricFamily, sample *Sample, sampleID SampleID) { + + for k := range sample.Labels { + fam.LabelSet[k]++ + } + + fam.Samples[sampleID] = sample +} + +func (c *Collector) addMetricFamily(point telegraf.Metric, sample *Sample, mname string, sampleID SampleID) { + var fam *MetricFamily + var ok bool + if fam, ok = c.fam[mname]; !ok { + fam = &MetricFamily{ + Samples: make(map[SampleID]*Sample), + TelegrafValueType: point.Type(), + LabelSet: make(map[string]int), + } + c.fam[mname] = fam + } + + addSample(fam, sample, sampleID) +} + +// Sorted returns a copy of the metrics in time ascending order. A copy is +// made to avoid modifying the input metric slice since doing so is not +// allowed. +func sorted(metrics []telegraf.Metric) []telegraf.Metric { + batch := make([]telegraf.Metric, 0, len(metrics)) + for i := len(metrics) - 1; i >= 0; i-- { + batch = append(batch, metrics[i]) + } + sort.Slice(batch, func(i, j int) bool { + return batch[i].Time().Before(batch[j].Time()) + }) + return batch +} + +func (c *Collector) Add(metrics []telegraf.Metric) error { + c.Lock() + defer c.Unlock() + + now := time.Now() + + for _, point := range sorted(metrics) { + tags := point.Tags() + sampleID := CreateSampleID(tags) + + labels := make(map[string]string) + for k, v := range tags { + name, ok := serializer.SanitizeLabelName(k) + if !ok { + continue + } + labels[name] = v + } + + // Prometheus doesn't have a string value type, so convert string + // fields to labels if enabled. + if c.StringAsLabel { + for fn, fv := range point.Fields() { + switch fv := fv.(type) { + case string: + name, ok := serializer.SanitizeLabelName(fn) + if !ok { + continue + } + labels[name] = fv + } + } + } + + switch point.Type() { + case telegraf.Summary: + var mname string + var sum float64 + var count uint64 + summaryvalue := make(map[float64]float64) + for fn, fv := range point.Fields() { + var value float64 + switch fv := fv.(type) { + case int64: + value = float64(fv) + case uint64: + value = float64(fv) + case float64: + value = fv + default: + continue + } + + switch fn { + case "sum": + sum = value + case "count": + count = uint64(value) + default: + limit, err := strconv.ParseFloat(fn, 64) + if err == nil { + summaryvalue[limit] = value + } + } + } + sample := &Sample{ + Labels: labels, + SummaryValue: summaryvalue, + Count: count, + Sum: sum, + Timestamp: point.Time(), + Expiration: now.Add(c.ExpirationInterval), + } + mname = sanitize(point.Name()) + + if !isValidTagName(mname) { + continue + } + + c.addMetricFamily(point, sample, mname, sampleID) + + case telegraf.Histogram: + var mname string + var sum float64 + var count uint64 + histogramvalue := make(map[float64]uint64) + for fn, fv := range point.Fields() { + var value float64 + switch fv := fv.(type) { + case int64: + value = float64(fv) + case uint64: + value = float64(fv) + case float64: + value = fv + default: + continue + } + + switch fn { + case "sum": + sum = value + case "count": + count = uint64(value) + default: + limit, err := strconv.ParseFloat(fn, 64) + if err == nil { + histogramvalue[limit] = uint64(value) + } + } + } + sample := &Sample{ + Labels: labels, + HistogramValue: histogramvalue, + Count: count, + Sum: sum, + Timestamp: point.Time(), + Expiration: now.Add(c.ExpirationInterval), + } + mname = sanitize(point.Name()) + + if !isValidTagName(mname) { + continue + } + + c.addMetricFamily(point, sample, mname, sampleID) + + default: + for fn, fv := range point.Fields() { + // Ignore string and bool fields. + var value float64 + switch fv := fv.(type) { + case int64: + value = float64(fv) + case uint64: + value = float64(fv) + case float64: + value = fv + default: + continue + } + + sample := &Sample{ + Labels: labels, + Value: value, + Timestamp: point.Time(), + Expiration: now.Add(c.ExpirationInterval), + } + + // Special handling of value field; supports passthrough from + // the prometheus input. + var mname string + switch point.Type() { + case telegraf.Counter: + if fn == "counter" { + mname = sanitize(point.Name()) + } + case telegraf.Gauge: + if fn == "gauge" { + mname = sanitize(point.Name()) + } + } + if mname == "" { + if fn == "value" { + mname = sanitize(point.Name()) + } else { + mname = sanitize(fmt.Sprintf("%s_%s", point.Name(), fn)) + } + } + if !isValidTagName(mname) { + continue + } + c.addMetricFamily(point, sample, mname, sampleID) + + } + } + } + return nil +} + +func (c *Collector) Expire(now time.Time, age time.Duration) { + if age == 0 { + return + } + + for name, family := range c.fam { + for key, sample := range family.Samples { + if age != 0 && now.After(sample.Expiration) { + for k := range sample.Labels { + family.LabelSet[k]-- + } + delete(family.Samples, key) + + if len(family.Samples) == 0 { + delete(c.fam, name) + } + } + } + } +} diff --git a/plugins/outputs/prometheus_client/v2/collector.go b/plugins/outputs/prometheus_client/v2/collector.go new file mode 100644 index 000000000..b28a4deab --- /dev/null +++ b/plugins/outputs/prometheus_client/v2/collector.go @@ -0,0 +1,101 @@ +package v2 + +import ( + "sync" + "time" + + "github.com/influxdata/telegraf" + serializer "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +type Metric struct { + family *dto.MetricFamily + metric *dto.Metric +} + +func (m *Metric) Desc() *prometheus.Desc { + labelNames := make([]string, 0, len(m.metric.Label)) + for _, label := range m.metric.Label { + labelNames = append(labelNames, *label.Name) + } + + desc := prometheus.NewDesc(*m.family.Name, *m.family.Help, labelNames, nil) + + return desc +} + +func (m *Metric) Write(out *dto.Metric) error { + out.Label = m.metric.Label + out.Counter = m.metric.Counter + out.Untyped = m.metric.Untyped + out.Gauge = m.metric.Gauge + out.Histogram = m.metric.Histogram + out.Summary = m.metric.Summary + out.TimestampMs = m.metric.TimestampMs + return nil +} + +type Collector struct { + sync.Mutex + expireDuration time.Duration + coll *serializer.Collection +} + +func NewCollector(expire time.Duration, stringsAsLabel bool, exportTimestamp bool) *Collector { + config := serializer.FormatConfig{} + if stringsAsLabel { + config.StringHandling = serializer.StringAsLabel + } + + if exportTimestamp { + config.TimestampExport = serializer.ExportTimestamp + } + + return &Collector{ + expireDuration: expire, + coll: serializer.NewCollection(config), + } +} + +func (c *Collector) Describe(ch chan<- *prometheus.Desc) { + // Sending no descriptor at all marks the Collector as "unchecked", + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + return +} + +func (c *Collector) Collect(ch chan<- prometheus.Metric) { + c.Lock() + defer c.Unlock() + + // Expire metrics, doing this on Collect ensure metrics are removed even if no + // new metrics are added to the output. + if c.expireDuration != 0 { + c.coll.Expire(time.Now(), c.expireDuration) + } + + for _, family := range c.coll.GetProto() { + for _, metric := range family.Metric { + ch <- &Metric{family: family, metric: metric} + } + } +} + +func (c *Collector) Add(metrics []telegraf.Metric) error { + c.Lock() + defer c.Unlock() + + for _, metric := range metrics { + c.coll.Add(metric, time.Now()) + } + + // Expire metrics, doing this on Add ensure metrics are removed even if no + // one is querying the data. + if c.expireDuration != 0 { + c.coll.Expire(time.Now(), c.expireDuration) + } + + return nil +} diff --git a/plugins/outputs/socket_writer/README.md b/plugins/outputs/socket_writer/README.md index 149cda2a6..5dc9d0246 100644 --- a/plugins/outputs/socket_writer/README.md +++ b/plugins/outputs/socket_writer/README.md @@ -32,6 +32,11 @@ It can output data in any of the [supported output formats](https://github.com/i ## Defaults to the OS configuration. # keep_alive_period = "5m" + ## Content encoding for message payloads, can be set to "gzip" or to + ## "identity" to apply no encoding. + ## + # content_encoding = "identity" + ## Data format to generate. ## Each data format has its own unique set of configuration options, read ## more about them here: diff --git a/plugins/outputs/socket_writer/socket_writer.go b/plugins/outputs/socket_writer/socket_writer.go index 8b0f56acc..eb286d919 100644 --- a/plugins/outputs/socket_writer/socket_writer.go +++ b/plugins/outputs/socket_writer/socket_writer.go @@ -1,13 +1,12 @@ package socket_writer import ( + "crypto/tls" "fmt" "log" "net" "strings" - "crypto/tls" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" tlsint "github.com/influxdata/telegraf/internal/tls" @@ -16,12 +15,15 @@ import ( ) type SocketWriter struct { + ContentEncoding string `toml:"content_encoding"` Address string KeepAlivePeriod *internal.Duration tlsint.ClientConfig serializers.Serializer + encoder internal.ContentEncoder + net.Conn } @@ -56,6 +58,11 @@ func (sw *SocketWriter) SampleConfig() string { ## Defaults to the OS configuration. # keep_alive_period = "5m" + ## Content encoding for packet-based connections (i.e. UDP, unixgram). + ## Can be set to "gzip" or to "identity" to apply no encoding. + ## + # content_encoding = "identity" + ## Data format to generate. ## Each data format has its own unique set of configuration options, read ## more about them here: @@ -92,6 +99,11 @@ func (sw *SocketWriter) Connect() error { if err := sw.setKeepAlive(c); err != nil { log.Printf("unable to configure keep alive (%s): %s", sw.Address, err) } + //set encoder + sw.encoder, err = internal.NewContentEncoder(sw.ContentEncoding) + if err != nil { + return err + } sw.Conn = c return nil @@ -128,9 +140,16 @@ func (sw *SocketWriter) Write(metrics []telegraf.Metric) error { for _, m := range metrics { bs, err := sw.Serialize(m) if err != nil { - //TODO log & keep going with remaining metrics - return err + log.Printf("D! [outputs.socket_writer] Could not serialize metric: %v", err) + continue } + + bs, err = sw.encoder.Encode(bs) + if err != nil { + log.Printf("D! [outputs.socket_writer] Could not encode metric: %v", err) + continue + } + if _, err := sw.Conn.Write(bs); err != nil { //TODO log & keep going with remaining strings if err, ok := err.(net.Error); !ok || !err.Temporary() { diff --git a/plugins/outputs/socket_writer/socket_writer_test.go b/plugins/outputs/socket_writer/socket_writer_test.go index 4d93469fa..14b25e6c5 100644 --- a/plugins/outputs/socket_writer/socket_writer_test.go +++ b/plugins/outputs/socket_writer/socket_writer_test.go @@ -2,7 +2,6 @@ package socket_writer import ( "bufio" - "bytes" "io/ioutil" "net" "os" @@ -49,7 +48,7 @@ func TestSocketWriter_unix(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "socket_writer.TestSocketWriter_unix.sock") + sock := filepath.Join(tmpdir, "sw.TestSocketWriter_unix.sock") listener, err := net.Listen("unix", sock) require.NoError(t, err) @@ -70,7 +69,7 @@ func TestSocketWriter_unixgram(t *testing.T) { tmpdir, err := ioutil.TempDir("", "telegraf") require.NoError(t, err) defer os.RemoveAll(tmpdir) - sock := filepath.Join(tmpdir, "socket_writer.TestSocketWriter_unixgram.sock") + sock := filepath.Join(tmpdir, "sw.TSW_unixgram.sock") listener, err := net.ListenPacket("unixgram", sock) require.NoError(t, err) @@ -88,8 +87,10 @@ func testSocketWriter_stream(t *testing.T, sw *SocketWriter, lconn net.Conn) { metrics := []telegraf.Metric{} metrics = append(metrics, testutil.TestMetric(1, "test")) mbs1out, _ := sw.Serialize(metrics[0]) + mbs1out, _ = sw.encoder.Encode(mbs1out) metrics = append(metrics, testutil.TestMetric(2, "test")) mbs2out, _ := sw.Serialize(metrics[1]) + mbs2out, _ = sw.encoder.Encode(mbs2out) err := sw.Write(metrics) require.NoError(t, err) @@ -108,8 +109,12 @@ func testSocketWriter_packet(t *testing.T, sw *SocketWriter, lconn net.PacketCon metrics := []telegraf.Metric{} metrics = append(metrics, testutil.TestMetric(1, "test")) mbs1out, _ := sw.Serialize(metrics[0]) + mbs1out, _ = sw.encoder.Encode(mbs1out) + mbs1str := string(mbs1out) metrics = append(metrics, testutil.TestMetric(2, "test")) mbs2out, _ := sw.Serialize(metrics[1]) + mbs2out, _ = sw.encoder.Encode(mbs2out) + mbs2str := string(mbs2out) err := sw.Write(metrics) require.NoError(t, err) @@ -119,17 +124,12 @@ func testSocketWriter_packet(t *testing.T, sw *SocketWriter, lconn net.PacketCon for len(mstrins) < 2 { n, _, err := lconn.ReadFrom(buf) require.NoError(t, err) - for _, bs := range bytes.Split(buf[:n], []byte{'\n'}) { - if len(bs) == 0 { - continue - } - mstrins = append(mstrins, string(bs)+"\n") - } + mstrins = append(mstrins, string(buf[:n])) } require.Len(t, mstrins, 2) - assert.Equal(t, string(mbs1out), mstrins[0]) - assert.Equal(t, string(mbs2out), mstrins[1]) + assert.Equal(t, mbs1str, mstrins[0]) + assert.Equal(t, mbs2str, mstrins[1]) } func TestSocketWriter_Write_err(t *testing.T) { @@ -195,3 +195,17 @@ func TestSocketWriter_Write_reconnect(t *testing.T) { require.NoError(t, err) assert.Equal(t, string(mbsout), string(buf[:n])) } + +func TestSocketWriter_udp_gzip(t *testing.T) { + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + + sw := newSocketWriter() + sw.Address = "udp://" + listener.LocalAddr().String() + sw.ContentEncoding = "gzip" + + err = sw.Connect() + require.NoError(t, err) + + testSocketWriter_packet(t, sw, listener) +} diff --git a/plugins/outputs/stackdriver/README.md b/plugins/outputs/stackdriver/README.md new file mode 100644 index 000000000..27ef3a09f --- /dev/null +++ b/plugins/outputs/stackdriver/README.md @@ -0,0 +1,56 @@ +# Stackdriver Google Cloud Monitoring Output Plugin + +This plugin writes to the [Google Cloud Monitoring API][stackdriver] (formerly +Stackdriver) and requires [authentication][] with Google Cloud using either a +service account or user credentials + +This plugin accesses APIs which are [chargeable][pricing]; you might incur +costs. + +Requires `project` to specify where Stackdriver metrics will be delivered to. + +Metrics are grouped by the `namespace` variable and metric key - eg: `custom.googleapis.com/telegraf/system/load5` + +[Resource type](https://cloud.google.com/monitoring/api/resources) is configured by the `resource_type` variable (default `global`). + +Additional resource labels can be configured by `resource_labels`. By default the required `project_id` label is always set to the `project` variable. + +### Configuration + +```toml +[[outputs.stackdriver]] + ## GCP Project + project = "erudite-bloom-151019" + + ## The namespace for the metric descriptor + namespace = "telegraf" + + ## Custom resource type + # resource_type = "generic_node" + + ## Additional resource labels + # [outputs.stackdriver.resource_labels] + # node_id = "$HOSTNAME" + # namespace = "myapp" + # location = "eu-north0" +``` + +### Restrictions + +Stackdriver does not support string values in custom metrics, any string +fields will not be written. + +The Stackdriver API does not allow writing points which are out of order, +older than 24 hours, or more with resolution greater than than one per point +minute. Since Telegraf writes the newest points first and moves backwards +through the metric buffer, it may not be possible to write historical data +after an interruption. + +Points collected with greater than 1 minute precision may need to be +aggregated before then can be written. Consider using the [basicstats][] +aggregator to do this. + +[basicstats]: /plugins/aggregators/basicstats/README.md +[stackdriver]: https://cloud.google.com/monitoring/api/v3/ +[authentication]: https://cloud.google.com/docs/authentication/getting-started +[pricing]: https://cloud.google.com/stackdriver/pricing#stackdriver_monitoring_services diff --git a/plugins/outputs/stackdriver/stackdriver.go b/plugins/outputs/stackdriver/stackdriver.go new file mode 100644 index 000000000..3bd38614b --- /dev/null +++ b/plugins/outputs/stackdriver/stackdriver.go @@ -0,0 +1,394 @@ +package stackdriver + +import ( + "context" + "fmt" + "hash/fnv" + "log" + "path" + "sort" + "strings" + + monitoring "cloud.google.com/go/monitoring/apiv3" // Imports the Stackdriver Monitoring client package. + googlepb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/outputs" + "google.golang.org/api/option" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +// Stackdriver is the Google Stackdriver config info. +type Stackdriver struct { + Project string + Namespace string + ResourceType string `toml:"resource_type"` + ResourceLabels map[string]string `toml:"resource_labels"` + + client *monitoring.MetricClient +} + +const ( + // QuotaLabelsPerMetricDescriptor is the limit + // to labels (tags) per metric descriptor. + QuotaLabelsPerMetricDescriptor = 10 + // QuotaStringLengthForLabelKey is the limit + // to string length for label key. + QuotaStringLengthForLabelKey = 100 + // QuotaStringLengthForLabelValue is the limit + // to string length for label value. + QuotaStringLengthForLabelValue = 1024 + + // StartTime for cumulative metrics. + StartTime = int64(1) + // MaxInt is the max int64 value. + MaxInt = int(^uint(0) >> 1) + + errStringPointsOutOfOrder = "One or more of the points specified had an older end time than the most recent point" + errStringPointsTooOld = "Data points cannot be written more than 24h in the past" + errStringPointsTooFrequent = "One or more points were written more frequently than the maximum sampling period configured for the metric" +) + +var sampleConfig = ` + ## GCP Project + project = "erudite-bloom-151019" + + ## The namespace for the metric descriptor + namespace = "telegraf" + + ## Custom resource type + # resource_type = "generic_node" + + ## Additional resource labels + # [outputs.stackdriver.resource_labels] + # node_id = "$HOSTNAME" + # namespace = "myapp" + # location = "eu-north0" +` + +// Connect initiates the primary connection to the GCP project. +func (s *Stackdriver) Connect() error { + if s.Project == "" { + return fmt.Errorf("Project is a required field for stackdriver output") + } + + if s.Namespace == "" { + return fmt.Errorf("Namespace is a required field for stackdriver output") + } + + if s.ResourceType == "" { + s.ResourceType = "global" + } + + if s.ResourceLabels == nil { + s.ResourceLabels = make(map[string]string, 1) + } + + s.ResourceLabels["project_id"] = s.Project + + if s.client == nil { + ctx := context.Background() + client, err := monitoring.NewMetricClient(ctx, option.WithUserAgent(internal.ProductToken())) + if err != nil { + return err + } + s.client = client + } + + return nil +} + +// Sorted returns a copy of the metrics in time ascending order. A copy is +// made to avoid modifying the input metric slice since doing so is not +// allowed. +func sorted(metrics []telegraf.Metric) []telegraf.Metric { + batch := make([]telegraf.Metric, 0, len(metrics)) + for i := len(metrics) - 1; i >= 0; i-- { + batch = append(batch, metrics[i]) + } + sort.Slice(batch, func(i, j int) bool { + return batch[i].Time().Before(batch[j].Time()) + }) + return batch +} + +type timeSeriesBuckets map[uint64][]*monitoringpb.TimeSeries + +func (tsb timeSeriesBuckets) Add(m telegraf.Metric, f *telegraf.Field, ts *monitoringpb.TimeSeries) { + h := fnv.New64a() + h.Write([]byte(m.Name())) + h.Write([]byte{'\n'}) + h.Write([]byte(f.Key)) + h.Write([]byte{'\n'}) + for key, value := range m.Tags() { + h.Write([]byte(key)) + h.Write([]byte{'\n'}) + h.Write([]byte(value)) + h.Write([]byte{'\n'}) + } + k := h.Sum64() + + s := tsb[k] + s = append(s, ts) + tsb[k] = s +} + +// Write the metrics to Google Cloud Stackdriver. +func (s *Stackdriver) Write(metrics []telegraf.Metric) error { + ctx := context.Background() + + batch := sorted(metrics) + buckets := make(timeSeriesBuckets) + for _, m := range batch { + for _, f := range m.FieldList() { + value, err := getStackdriverTypedValue(f.Value) + if err != nil { + log.Printf("E! [outputs.stackdriver] get type failed: %s", err) + continue + } + + if value == nil { + continue + } + + metricKind, err := getStackdriverMetricKind(m.Type()) + if err != nil { + log.Printf("E! [outputs.stackdriver] get metric failed: %s", err) + continue + } + + timeInterval, err := getStackdriverTimeInterval(metricKind, StartTime, m.Time().Unix()) + if err != nil { + log.Printf("E! [outputs.stackdriver] get time interval failed: %s", err) + continue + } + + // Prepare an individual data point. + dataPoint := &monitoringpb.Point{ + Interval: timeInterval, + Value: value, + } + + // Prepare time series. + timeSeries := &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Type: path.Join("custom.googleapis.com", s.Namespace, m.Name(), f.Key), + Labels: getStackdriverLabels(m.TagList()), + }, + MetricKind: metricKind, + Resource: &monitoredrespb.MonitoredResource{ + Type: s.ResourceType, + Labels: s.ResourceLabels, + }, + Points: []*monitoringpb.Point{ + dataPoint, + }, + } + + buckets.Add(m, f, timeSeries) + } + } + + // process the buckets in order + keys := make([]uint64, 0, len(buckets)) + for k := range buckets { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + + for len(buckets) != 0 { + // can send up to 200 time series to stackdriver + timeSeries := make([]*monitoringpb.TimeSeries, 0, 200) + for i := 0; i < len(keys) && len(timeSeries) < cap(timeSeries); i++ { + k := keys[i] + s := buckets[k] + timeSeries = append(timeSeries, s[0]) + if len(s) == 1 { + delete(buckets, k) + keys = append(keys[:i], keys[i+1:]...) + i-- + continue + } + + s = s[1:] + buckets[k] = s + } + + // Prepare time series request. + timeSeriesRequest := &monitoringpb.CreateTimeSeriesRequest{ + Name: monitoring.MetricProjectPath(s.Project), + TimeSeries: timeSeries, + } + + // Create the time series in Stackdriver. + err := s.client.CreateTimeSeries(ctx, timeSeriesRequest) + if err != nil { + if strings.Contains(err.Error(), errStringPointsOutOfOrder) || + strings.Contains(err.Error(), errStringPointsTooOld) || + strings.Contains(err.Error(), errStringPointsTooFrequent) { + log.Printf("D! [outputs.stackdriver] unable to write to Stackdriver: %s", err) + return nil + } + log.Printf("E! [outputs.stackdriver] unable to write to Stackdriver: %s", err) + return err + } + } + + return nil +} + +func getStackdriverTimeInterval( + m metricpb.MetricDescriptor_MetricKind, + start int64, + end int64, +) (*monitoringpb.TimeInterval, error) { + switch m { + case metricpb.MetricDescriptor_GAUGE: + return &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: end, + }, + }, nil + case metricpb.MetricDescriptor_CUMULATIVE: + return &monitoringpb.TimeInterval{ + StartTime: &googlepb.Timestamp{ + Seconds: start, + }, + EndTime: &googlepb.Timestamp{ + Seconds: end, + }, + }, nil + case metricpb.MetricDescriptor_DELTA, metricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED: + fallthrough + default: + return nil, fmt.Errorf("unsupported metric kind %T", m) + } +} + +func getStackdriverMetricKind(vt telegraf.ValueType) (metricpb.MetricDescriptor_MetricKind, error) { + switch vt { + case telegraf.Untyped: + return metricpb.MetricDescriptor_GAUGE, nil + case telegraf.Gauge: + return metricpb.MetricDescriptor_GAUGE, nil + case telegraf.Counter: + return metricpb.MetricDescriptor_CUMULATIVE, nil + case telegraf.Histogram, telegraf.Summary: + fallthrough + default: + return metricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, fmt.Errorf("unsupported telegraf value type") + } +} + +func getStackdriverTypedValue(value interface{}) (*monitoringpb.TypedValue, error) { + switch v := value.(type) { + case uint64: + if v <= uint64(MaxInt) { + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v), + }, + }, nil + } + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(MaxInt), + }, + }, nil + case int64: + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v), + }, + }, nil + case float64: + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: float64(v), + }, + }, nil + case bool: + return &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_BoolValue{ + BoolValue: bool(v), + }, + }, nil + case string: + // String value types are not available for custom metrics + return nil, nil + default: + return nil, fmt.Errorf("value type \"%T\" not supported for stackdriver custom metrics", v) + } +} + +func getStackdriverLabels(tags []*telegraf.Tag) map[string]string { + labels := make(map[string]string) + for _, t := range tags { + labels[t.Key] = t.Value + } + for k, v := range labels { + if len(k) > QuotaStringLengthForLabelKey { + log.Printf( + "W! [outputs.stackdriver] removing tag [%s] key exceeds string length for label key [%d]", + k, + QuotaStringLengthForLabelKey, + ) + delete(labels, k) + continue + } + if len(v) > QuotaStringLengthForLabelValue { + log.Printf( + "W! [outputs.stackdriver] removing tag [%s] value exceeds string length for label value [%d]", + k, + QuotaStringLengthForLabelValue, + ) + delete(labels, k) + continue + } + } + if len(labels) > QuotaLabelsPerMetricDescriptor { + excess := len(labels) - QuotaLabelsPerMetricDescriptor + log.Printf( + "W! [outputs.stackdriver] tag count [%d] exceeds quota for stackdriver labels [%d] removing [%d] random tags", + len(labels), + QuotaLabelsPerMetricDescriptor, + excess, + ) + for k := range labels { + if excess == 0 { + break + } + excess-- + delete(labels, k) + } + } + + return labels +} + +// Close will terminate the session to the backend, returning error if an issue arises. +func (s *Stackdriver) Close() error { + return s.client.Close() +} + +// SampleConfig returns the formatted sample configuration for the plugin. +func (s *Stackdriver) SampleConfig() string { + return sampleConfig +} + +// Description returns the human-readable function definition of the plugin. +func (s *Stackdriver) Description() string { + return "Configuration for Google Cloud Stackdriver to send metrics to" +} + +func newStackdriver() *Stackdriver { + return &Stackdriver{} +} + +func init() { + outputs.Add("stackdriver", func() telegraf.Output { + return newStackdriver() + }) +} diff --git a/plugins/outputs/stackdriver/stackdriver_test.go b/plugins/outputs/stackdriver/stackdriver_test.go new file mode 100644 index 000000000..7ddaa4485 --- /dev/null +++ b/plugins/outputs/stackdriver/stackdriver_test.go @@ -0,0 +1,436 @@ +package stackdriver + +import ( + "context" + "errors" + "fmt" + "log" + "net" + "os" + "strings" + "testing" + "time" + + monitoring "cloud.google.com/go/monitoring/apiv3" + "github.com/golang/protobuf/proto" + emptypb "github.com/golang/protobuf/ptypes/empty" + googlepb "github.com/golang/protobuf/ptypes/timestamp" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" + "google.golang.org/api/option" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var mockMetric mockMetricServer + +type mockMetricServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.MetricServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockMetricServer) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest) (*emptypb.Empty, error) { + md, _ := metadata.FromIncomingContext(ctx) + if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") { + return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg) + } + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*emptypb.Empty), nil +} + +func TestMain(m *testing.M) { + serv := grpc.NewServer() + monitoringpb.RegisterMetricServiceServer(serv, &mockMetric) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestWrite(t *testing.T) { + expectedResponse := &emptypb.Empty{} + mockMetric.err = nil + mockMetric.reqs = nil + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + client: c, + } + + err = s.Connect() + require.NoError(t, err) + err = s.Write(testutil.MockMetrics()) + require.NoError(t, err) + + request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) + require.Equal(t, request.TimeSeries[0].Resource.Type, "global") + require.Equal(t, request.TimeSeries[0].Resource.Labels["project_id"], "projects/[PROJECT]") +} + +func TestWriteResourceTypeAndLabels(t *testing.T) { + expectedResponse := &emptypb.Empty{} + mockMetric.err = nil + mockMetric.reqs = nil + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + ResourceType: "foo", + ResourceLabels: map[string]string{ + "mylabel": "myvalue", + }, + client: c, + } + + err = s.Connect() + require.NoError(t, err) + err = s.Write(testutil.MockMetrics()) + require.NoError(t, err) + + request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) + require.Equal(t, request.TimeSeries[0].Resource.Type, "foo") + require.Equal(t, request.TimeSeries[0].Resource.Labels["project_id"], "projects/[PROJECT]") + require.Equal(t, request.TimeSeries[0].Resource.Labels["mylabel"], "myvalue") +} + +func TestWriteAscendingTime(t *testing.T) { + expectedResponse := &emptypb.Empty{} + mockMetric.err = nil + mockMetric.reqs = nil + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + client: c, + } + + // Metrics in descending order of timestamp + metrics := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(2, 0), + ), + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "value": 43, + }, + time.Unix(1, 0), + ), + } + + err = s.Connect() + require.NoError(t, err) + err = s.Write(metrics) + require.NoError(t, err) + + require.Len(t, mockMetric.reqs, 2) + request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) + require.Len(t, request.TimeSeries, 1) + ts := request.TimeSeries[0] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 1, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(43), + }, + }) + + request = mockMetric.reqs[1].(*monitoringpb.CreateTimeSeriesRequest) + require.Len(t, request.TimeSeries, 1) + ts = request.TimeSeries[0] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 2, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(42), + }, + }) +} + +func TestWriteBatchable(t *testing.T) { + expectedResponse := &emptypb.Empty{} + mockMetric.err = nil + mockMetric.reqs = nil + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + client: c, + } + + // Metrics in descending order of timestamp + metrics := []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(2, 0), + ), + testutil.MustMetric("cpu", + map[string]string{ + "foo": "foo", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(3, 0), + ), + testutil.MustMetric("cpu", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(1, 0), + ), + testutil.MustMetric("ram", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 42, + }, + time.Unix(4, 0), + ), + testutil.MustMetric("ram", + map[string]string{ + "foo": "foo", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(5, 0), + ), + testutil.MustMetric("ram", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(3, 0), + ), + testutil.MustMetric("disk", + map[string]string{ + "foo": "foo", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(3, 0), + ), + testutil.MustMetric("disk", + map[string]string{ + "foo": "bar", + }, + map[string]interface{}{ + "value": 43, + }, + time.Unix(1, 0), + ), + } + + err = s.Connect() + require.NoError(t, err) + err = s.Write(metrics) + require.NoError(t, err) + + require.Len(t, mockMetric.reqs, 2) + request := mockMetric.reqs[0].(*monitoringpb.CreateTimeSeriesRequest) + require.Len(t, request.TimeSeries, 6) + ts := request.TimeSeries[0] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 3, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(43), + }, + }) + + ts = request.TimeSeries[1] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 1, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(43), + }, + }) + + ts = request.TimeSeries[2] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 3, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(43), + }, + }) + + ts = request.TimeSeries[4] + require.Len(t, ts.Points, 1) + require.Equal(t, ts.Points[0].Interval, &monitoringpb.TimeInterval{ + EndTime: &googlepb.Timestamp{ + Seconds: 5, + }, + }) + require.Equal(t, ts.Points[0].Value, &monitoringpb.TypedValue{ + Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(43), + }, + }) +} + +func TestWriteIgnoredErrors(t *testing.T) { + tests := []struct { + name string + err error + expectedErr bool + }{ + { + name: "points too old", + err: errors.New(errStringPointsTooOld), + }, + { + name: "points out of order", + err: errors.New(errStringPointsOutOfOrder), + }, + { + name: "points too frequent", + err: errors.New(errStringPointsTooFrequent), + }, + { + name: "other errors reported", + err: errors.New("test"), + expectedErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockMetric.err = tt.err + mockMetric.reqs = nil + + c, err := monitoring.NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + s := &Stackdriver{ + Project: fmt.Sprintf("projects/%s", "[PROJECT]"), + Namespace: "test", + client: c, + } + + err = s.Connect() + require.NoError(t, err) + err = s.Write(testutil.MockMetrics()) + if tt.expectedErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestGetStackdriverLabels(t *testing.T) { + tags := []*telegraf.Tag{ + {Key: "project", Value: "bar"}, + {Key: "discuss", Value: "revolutionary"}, + {Key: "marble", Value: "discount"}, + {Key: "applied", Value: "falsify"}, + {Key: "test", Value: "foo"}, + {Key: "porter", Value: "discount"}, + {Key: "play", Value: "tiger"}, + {Key: "fireplace", Value: "display"}, + {Key: "host", Value: "this"}, + {Key: "name", Value: "bat"}, + {Key: "device", Value: "local"}, + {Key: "reserve", Value: "publication"}, + {Key: "xpfqacltlmpguimhtjlou2qlmf9uqqwk3teajwlwqkoxtsppbnjksaxvzc1aa973pho9m96gfnl5op8ku7sv93rexyx42qe3zty12ityv", Value: "keyquota"}, + {Key: "valuequota", Value: "icym5wcpejnhljcvy2vwk15svmhrtueoppwlvix61vlbaeedufn1g6u4jgwjoekwew9s2dboxtgrkiyuircnl8h1lbzntt9gzcf60qunhxurhiz0g2bynzy1v6eyn4ravndeiiugobsrsj2bfaguahg4gxn7nx4irwfknunhkk6jdlldevawj8levebjajcrcbeugewd14fa8o34ycfwx2ymalyeqxhfqrsksxnii2deqq6cghrzi6qzwmittkzdtye3imoygqmjjshiskvnzz1e4ipd9c6wfor5jsygn1kvcg6jm4clnsl1fnxotbei9xp4swrkjpgursmfmkyvxcgq9hoy435nwnolo3ipnvdlhk6pmlzpdjn6gqi3v9gv7jn5ro2p1t5ufxzfsvqq1fyrgoi7gvmttil1banh3cftkph1dcoaqfhl7y0wkvhwwvrmslmmxp1wedyn8bacd7akmjgfwdvcmrymbzvmrzfvq1gs1xnmmg8rsfxci2h6r1ralo3splf4f3bdg4c7cy0yy9qbxzxhcmdpwekwc7tdjs8uj6wmofm2aor4hum8nwyfwwlxy3yvsnbjy32oucsrmhcnu6l2i8laujkrhvsr9fcix5jflygznlydbqw5uhw1rg1g5wiihqumwmqgggemzoaivm3ut41vjaff4uqtqyuhuwblmuiphfkd7si49vgeeswzg7tpuw0oxmkesgibkcjtev2h9ouxzjs3eb71jffhdacyiuyhuxwvm5bnrjewbm4x2kmhgbirz3eoj7ijgplggdkx5vixufg65ont8zi1jabsuxx0vsqgprunwkugqkxg2r7iy6fmgs4lob4dlseinowkst6gp6x1ejreauyzjz7atzm3hbmr5rbynuqp4lxrnhhcbuoun69mavvaaki0bdz5ybmbbbz5qdv0odtpjo2aezat5uosjuhzbvic05jlyclikynjgfhencdkz3qcqzbzhnsynj1zdke0sk4zfpvfyryzsxv9pu0qm"}, + } + + labels := getStackdriverLabels(tags) + require.Equal(t, QuotaLabelsPerMetricDescriptor, len(labels)) +} diff --git a/plugins/outputs/syslog/README.md b/plugins/outputs/syslog/README.md new file mode 100644 index 000000000..cb9bc8965 --- /dev/null +++ b/plugins/outputs/syslog/README.md @@ -0,0 +1,108 @@ +# Syslog Output Plugin + +The syslog output plugin sends syslog messages transmitted over +[UDP](https://tools.ietf.org/html/rfc5426) or +[TCP](https://tools.ietf.org/html/rfc6587) or +[TLS](https://tools.ietf.org/html/rfc5425), with or without the octet counting framing. + +Syslog messages are formatted according to +[RFC 5424](https://tools.ietf.org/html/rfc5424). + +### Configuration + +```toml +[[outputs.syslog]] + ## URL to connect to + ## ex: address = "tcp://127.0.0.1:8094" + ## ex: address = "tcp4://127.0.0.1:8094" + ## ex: address = "tcp6://127.0.0.1:8094" + ## ex: address = "tcp6://[2001:db8::1]:8094" + ## ex: address = "udp://127.0.0.1:8094" + ## ex: address = "udp4://127.0.0.1:8094" + ## ex: address = "udp6://127.0.0.1:8094" + address = "tcp://127.0.0.1:8094" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## The framing technique with which it is expected that messages are + ## transported (default = "octet-counting"). Whether the messages come + ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must + ## be one of "octet-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-transparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" + + ## SD-PARAMs settings + ## Syslog messages can contain key/value pairs within zero or more + ## structured data sections. For each unrecognized metric tag/field a + ## SD-PARAMS is created. + ## + ## Example: + ## [[outputs.syslog]] + ## sdparam_separator = "_" + ## default_sdid = "default@32473" + ## sdids = ["foo@123", "bar@456"] + ## + ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 + ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] + + ## SD-PARAMs separator between the sdid and tag/field key (default = "_") + # sdparam_separator = "_" + + ## Default sdid used for tags/fields that don't contain a prefix defined in + ## the explicit sdids setting below If no default is specified, no SD-PARAMs + ## will be used for unrecognized field. + # default_sdid = "default@32473" + + ## List of explicit prefixes to extract from tag/field keys and use as the + ## SDID, if they match (see above example for more details): + # sdids = ["foo@123", "bar@456"] + + ## Default severity value. Severity and Facility are used to calculate the + ## message PRI value (RFC5424#section-6.2.1). Used when no metric field + ## with key "severity_code" is defined. If unset, 5 (notice) is the default + # default_severity_code = 5 + + ## Default facility value. Facility and Severity are used to calculate the + ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with + ## key "facility_code" is defined. If unset, 1 (user-level) is the default + # default_facility_code = 1 + + ## Default APP-NAME value (RFC5424#section-6.2.5) + ## Used when no metric tag with key "appname" is defined. + ## If unset, "Telegraf" is the default + # default_appname = "Telegraf" +``` + +### Metric mapping +The output plugin expects syslog metrics tags and fields to match up with the +ones created in the [syslog input][]. + +The following table shows the metric tags, field and defaults used to format syslog messages. + +| Syslog field | Metric Tag | Metric Field | Default value | +| --- | --- | --- | --- | +| APP-NAME | appname | - | default_appname = "Telegraf" | +| TIMESTAMP | - | timestamp | Metric's own timestamp | +| VERSION | - | version | 1 | +| PRI | - | serverity_code + (8 * facility_code)| default_severity_code=5 (notice), default_facility_code=1 (user-level)| +| HOSTNAME | hostname OR source OR host | - | os.Hostname() | +| MSGID | - | msgid | Metric name | +| PROCID | - | procid | - | +| MSG | - | msg | - | + +[syslog input]: /plugins/inputs/syslog#metrics diff --git a/plugins/outputs/syslog/syslog.go b/plugins/outputs/syslog/syslog.go new file mode 100644 index 000000000..41833f464 --- /dev/null +++ b/plugins/outputs/syslog/syslog.go @@ -0,0 +1,249 @@ +package syslog + +import ( + "crypto/tls" + "fmt" + "log" + "net" + "strconv" + "strings" + + "github.com/influxdata/go-syslog/v2/nontransparent" + "github.com/influxdata/go-syslog/v2/rfc5424" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + framing "github.com/influxdata/telegraf/internal/syslog" + tlsint "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +type Syslog struct { + Address string + KeepAlivePeriod *internal.Duration + DefaultSdid string + DefaultSeverityCode uint8 + DefaultFacilityCode uint8 + DefaultAppname string + Sdids []string + Separator string `toml:"sdparam_separator"` + Framing framing.Framing + Trailer nontransparent.TrailerType + net.Conn + tlsint.ClientConfig + mapper *SyslogMapper +} + +var sampleConfig = ` + ## URL to connect to + ## ex: address = "tcp://127.0.0.1:8094" + ## ex: address = "tcp4://127.0.0.1:8094" + ## ex: address = "tcp6://127.0.0.1:8094" + ## ex: address = "tcp6://[2001:db8::1]:8094" + ## ex: address = "udp://127.0.0.1:8094" + ## ex: address = "udp4://127.0.0.1:8094" + ## ex: address = "udp6://127.0.0.1:8094" + address = "tcp://127.0.0.1:8094" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## The framing technique with which it is expected that messages are + ## transported (default = "octet-counting"). Whether the messages come + ## using the octect-counting (RFC5425#section-4.3.1, RFC6587#section-3.4.1), + ## or the non-transparent framing technique (RFC6587#section-3.4.2). Must + ## be one of "octet-counting", "non-transparent". + # framing = "octet-counting" + + ## The trailer to be expected in case of non-transparent framing (default = "LF"). + ## Must be one of "LF", or "NUL". + # trailer = "LF" + + ## SD-PARAMs settings + ## Syslog messages can contain key/value pairs within zero or more + ## structured data sections. For each unrecognized metric tag/field a + ## SD-PARAMS is created. + ## + ## Example: + ## [[outputs.syslog]] + ## sdparam_separator = "_" + ## default_sdid = "default@32473" + ## sdids = ["foo@123", "bar@456"] + ## + ## input => xyzzy,x=y foo@123_value=42,bar@456_value2=84,something_else=1 + ## output (structured data only) => [foo@123 value=42][bar@456 value2=84][default@32473 something_else=1 x=y] + + ## SD-PARAMs separator between the sdid and tag/field key (default = "_") + # sdparam_separator = "_" + + ## Default sdid used for tags/fields that don't contain a prefix defined in + ## the explicit sdids setting below If no default is specified, no SD-PARAMs + ## will be used for unrecognized field. + # default_sdid = "default@32473" + + ## List of explicit prefixes to extract from tag/field keys and use as the + ## SDID, if they match (see above example for more details): + # sdids = ["foo@123", "bar@456"] + + ## Default severity value. Severity and Facility are used to calculate the + ## message PRI value (RFC5424#section-6.2.1). Used when no metric field + ## with key "severity_code" is defined. If unset, 5 (notice) is the default + # default_severity_code = 5 + + ## Default facility value. Facility and Severity are used to calculate the + ## message PRI value (RFC5424#section-6.2.1). Used when no metric field with + ## key "facility_code" is defined. If unset, 1 (user-level) is the default + # default_facility_code = 1 + + ## Default APP-NAME value (RFC5424#section-6.2.5) + ## Used when no metric tag with key "appname" is defined. + ## If unset, "Telegraf" is the default + # default_appname = "Telegraf" +` + +func (s *Syslog) Connect() error { + s.initializeSyslogMapper() + + spl := strings.SplitN(s.Address, "://", 2) + if len(spl) != 2 { + return fmt.Errorf("invalid address: %s", s.Address) + } + + tlsCfg, err := s.ClientConfig.TLSConfig() + if err != nil { + return err + } + + var c net.Conn + if tlsCfg == nil { + c, err = net.Dial(spl[0], spl[1]) + } else { + c, err = tls.Dial(spl[0], spl[1], tlsCfg) + } + if err != nil { + return err + } + + if err := s.setKeepAlive(c); err != nil { + log.Printf("unable to configure keep alive (%s): %s", s.Address, err) + } + + s.Conn = c + return nil +} + +func (s *Syslog) setKeepAlive(c net.Conn) error { + if s.KeepAlivePeriod == nil { + return nil + } + tcpc, ok := c.(*net.TCPConn) + if !ok { + return fmt.Errorf("cannot set keep alive on a %s socket", strings.SplitN(s.Address, "://", 2)[0]) + } + if s.KeepAlivePeriod.Duration == 0 { + return tcpc.SetKeepAlive(false) + } + if err := tcpc.SetKeepAlive(true); err != nil { + return err + } + return tcpc.SetKeepAlivePeriod(s.KeepAlivePeriod.Duration) +} + +func (s *Syslog) Close() error { + if s.Conn == nil { + return nil + } + err := s.Conn.Close() + s.Conn = nil + return err +} + +func (s *Syslog) SampleConfig() string { + return sampleConfig +} + +func (s *Syslog) Description() string { + return "Configuration for Syslog server to send metrics to" +} + +func (s *Syslog) Write(metrics []telegraf.Metric) (err error) { + if s.Conn == nil { + // previous write failed with permanent error and socket was closed. + if err = s.Connect(); err != nil { + return err + } + } + for _, metric := range metrics { + var msg *rfc5424.SyslogMessage + if msg, err = s.mapper.MapMetricToSyslogMessage(metric); err != nil { + log.Printf("E! [outputs.syslog] Failed to create syslog message: %v", err) + continue + } + var msgBytesWithFraming []byte + if msgBytesWithFraming, err = s.getSyslogMessageBytesWithFraming(msg); err != nil { + log.Printf("E! [outputs.syslog] Failed to convert syslog message with framing: %v", err) + continue + } + if _, err = s.Conn.Write(msgBytesWithFraming); err != nil { + if netErr, ok := err.(net.Error); !ok || !netErr.Temporary() { + s.Close() + s.Conn = nil + return fmt.Errorf("closing connection: %v", netErr) + } + return err + } + } + return nil +} + +func (s *Syslog) getSyslogMessageBytesWithFraming(msg *rfc5424.SyslogMessage) ([]byte, error) { + var msgString string + var err error + if msgString, err = msg.String(); err != nil { + return nil, err + } + msgBytes := []byte(msgString) + + if s.Framing == framing.OctetCounting { + return append([]byte(strconv.Itoa(len(msgBytes))+" "), msgBytes...), nil + } + // Non-transparent framing + return append(msgBytes, byte(s.Trailer)), nil +} + +func (s *Syslog) initializeSyslogMapper() { + if s.mapper != nil { + return + } + s.mapper = newSyslogMapper() + s.mapper.DefaultFacilityCode = s.DefaultFacilityCode + s.mapper.DefaultSeverityCode = s.DefaultSeverityCode + s.mapper.DefaultAppname = s.DefaultAppname + s.mapper.Separator = s.Separator + s.mapper.DefaultSdid = s.DefaultSdid + s.mapper.Sdids = s.Sdids +} + +func newSyslog() *Syslog { + return &Syslog{ + Framing: framing.OctetCounting, + Trailer: nontransparent.LF, + Separator: "_", + DefaultSeverityCode: uint8(5), // notice + DefaultFacilityCode: uint8(1), // user-level + DefaultAppname: "Telegraf", + } +} + +func init() { + outputs.Add("syslog", func() telegraf.Output { return newSyslog() }) +} diff --git a/plugins/outputs/syslog/syslog_mapper.go b/plugins/outputs/syslog/syslog_mapper.go new file mode 100644 index 000000000..4e4848205 --- /dev/null +++ b/plugins/outputs/syslog/syslog_mapper.go @@ -0,0 +1,199 @@ +package syslog + +import ( + "errors" + "math" + "os" + "strconv" + "strings" + "time" + + "github.com/influxdata/go-syslog/v2/rfc5424" + "github.com/influxdata/telegraf" +) + +type SyslogMapper struct { + DefaultSdid string + DefaultSeverityCode uint8 + DefaultFacilityCode uint8 + DefaultAppname string + Sdids []string + Separator string + reservedKeys map[string]bool +} + +// MapMetricToSyslogMessage maps metrics tags/fields to syslog messages +func (sm *SyslogMapper) MapMetricToSyslogMessage(metric telegraf.Metric) (*rfc5424.SyslogMessage, error) { + msg := &rfc5424.SyslogMessage{} + + sm.mapPriority(metric, msg) + sm.mapStructuredData(metric, msg) + sm.mapAppname(metric, msg) + mapHostname(metric, msg) + mapTimestamp(metric, msg) + mapMsgID(metric, msg) + mapVersion(metric, msg) + mapProcID(metric, msg) + mapMsg(metric, msg) + + if !msg.Valid() { + return nil, errors.New("metric could not produce valid syslog message") + } + return msg, nil +} + +func (sm *SyslogMapper) mapStructuredData(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + for _, tag := range metric.TagList() { + sm.mapStructuredDataItem(tag.Key, tag.Value, msg) + } + for _, field := range metric.FieldList() { + sm.mapStructuredDataItem(field.Key, formatValue(field.Value), msg) + } +} + +func (sm *SyslogMapper) mapStructuredDataItem(key string, value string, msg *rfc5424.SyslogMessage) { + if sm.reservedKeys[key] { + return + } + isExplicitSdid := false + for _, sdid := range sm.Sdids { + k := strings.TrimLeft(key, sdid+sm.Separator) + if len(key) > len(k) { + isExplicitSdid = true + msg.SetParameter(sdid, k, value) + break + } + } + if !isExplicitSdid && len(sm.DefaultSdid) > 0 { + k := strings.TrimPrefix(key, sm.DefaultSdid+sm.Separator) + msg.SetParameter(sm.DefaultSdid, k, value) + } +} + +func (sm *SyslogMapper) mapAppname(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + if value, ok := metric.GetTag("appname"); ok { + msg.SetAppname(formatValue(value)) + } else { + //Use default appname + msg.SetAppname(sm.DefaultAppname) + } +} + +func mapMsgID(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + if value, ok := metric.GetField("msgid"); ok { + msg.SetMsgID(formatValue(value)) + } else { + // We default to metric name + msg.SetMsgID(metric.Name()) + } +} + +func mapVersion(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + if value, ok := metric.GetField("version"); ok { + switch v := value.(type) { + case uint64: + msg.SetVersion(uint16(v)) + return + } + } + msg.SetVersion(1) +} + +func mapMsg(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + if value, ok := metric.GetField("msg"); ok { + msg.SetMessage(formatValue(value)) + } +} + +func mapProcID(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + if value, ok := metric.GetField("procid"); ok { + msg.SetProcID(formatValue(value)) + } +} + +func (sm *SyslogMapper) mapPriority(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + severityCode := sm.DefaultSeverityCode + facilityCode := sm.DefaultFacilityCode + + if value, ok := getFieldCode(metric, "severity_code"); ok { + severityCode = *value + } + + if value, ok := getFieldCode(metric, "facility_code"); ok { + facilityCode = *value + } + + priority := (8 * facilityCode) + severityCode + msg.SetPriority(priority) +} + +func mapHostname(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + // Try with hostname, then with source, then with host tags, then take OS Hostname + if value, ok := metric.GetTag("hostname"); ok { + msg.SetHostname(formatValue(value)) + } else if value, ok := metric.GetTag("source"); ok { + msg.SetHostname(formatValue(value)) + } else if value, ok := metric.GetTag("host"); ok { + msg.SetHostname(formatValue(value)) + } else if value, err := os.Hostname(); err == nil { + msg.SetHostname(value) + } +} + +func mapTimestamp(metric telegraf.Metric, msg *rfc5424.SyslogMessage) { + timestamp := metric.Time() + if value, ok := metric.GetField("timestamp"); ok { + switch v := value.(type) { + case int64: + timestamp = time.Unix(0, v).UTC() + } + } + msg.SetTimestamp(timestamp.Format(time.RFC3339)) +} + +func formatValue(value interface{}) string { + switch v := value.(type) { + case string: + return v + case bool: + if v { + return "1" + } + return "0" + case uint64: + return strconv.FormatUint(v, 10) + case int64: + return strconv.FormatInt(v, 10) + case float64: + if math.IsNaN(v) { + return "" + } + + if math.IsInf(v, 0) { + return "" + } + return strconv.FormatFloat(v, 'f', -1, 64) + } + + return "" +} + +func getFieldCode(metric telegraf.Metric, fieldKey string) (*uint8, bool) { + if value, ok := metric.GetField(fieldKey); ok { + if v, err := strconv.ParseUint(formatValue(value), 10, 8); err == nil { + r := uint8(v) + return &r, true + } + } + return nil, false +} + +func newSyslogMapper() *SyslogMapper { + return &SyslogMapper{ + reservedKeys: map[string]bool{ + "version": true, "severity_code": true, "facility_code": true, + "procid": true, "msgid": true, "msg": true, "timestamp": true, "sdid": true, + "hostname": true, "source": true, "host": true, "severity": true, + "facility": true, "appname": true}, + } +} diff --git a/plugins/outputs/syslog/syslog_mapper_test.go b/plugins/outputs/syslog/syslog_mapper_test.go new file mode 100644 index 000000000..300d5fcab --- /dev/null +++ b/plugins/outputs/syslog/syslog_mapper_test.go @@ -0,0 +1,200 @@ +package syslog + +import ( + "os" + "testing" + "time" + + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSyslogMapperWithDefaults(t *testing.T) { + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{}, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + hostname, err := os.Hostname() + assert.NoError(t, err) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<13>1 2010-11-10T23:00:00Z "+hostname+" Telegraf - testmetric -", str, "Wrong syslog message") +} + +func TestSyslogMapperWithHostname(t *testing.T) { + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "hostname": "testhost", + "source": "sourcevalue", + "host": "hostvalue", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", str, "Wrong syslog message") +} +func TestSyslogMapperWithHostnameSourceFallback(t *testing.T) { + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "source": "sourcevalue", + "host": "hostvalue", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<13>1 2010-11-10T23:00:00Z sourcevalue Telegraf - testmetric -", str, "Wrong syslog message") +} + +func TestSyslogMapperWithHostnameHostFallback(t *testing.T) { + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "host": "hostvalue", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<13>1 2010-11-10T23:00:00Z hostvalue Telegraf - testmetric -", str, "Wrong syslog message") +} + +func TestSyslogMapperWithDefaultSdid(t *testing.T) { + s := newSyslog() + s.DefaultSdid = "default@32473" + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "appname": "testapp", + "hostname": "testhost", + "tag1": "bar", + "default@32473_tag2": "foobar", + }, + map[string]interface{}{ + "severity_code": uint64(3), + "facility_code": uint64(3), + "msg": "Test message", + "procid": uint64(25), + "version": uint16(2), + "msgid": int64(555), + "timestamp": time.Date(2010, time.November, 10, 23, 30, 0, 0, time.UTC).UnixNano(), + "value1": int64(2), + "default@32473_value2": "foo", + "value3": float64(1.2), + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<27>2 2010-11-10T23:30:00Z testhost testapp 25 555 [default@32473 tag1=\"bar\" tag2=\"foobar\" value1=\"2\" value2=\"foo\" value3=\"1.2\"] Test message", str, "Wrong syslog message") +} + +func TestSyslogMapperWithDefaultSdidAndOtherSdids(t *testing.T) { + s := newSyslog() + s.DefaultSdid = "default@32473" + s.Sdids = []string{"bar@123", "foo@456"} + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "appname": "testapp", + "hostname": "testhost", + "tag1": "bar", + "default@32473_tag2": "foobar", + "bar@123_tag3": "barfoobar", + }, + map[string]interface{}{ + "severity_code": uint64(1), + "facility_code": uint64(3), + "msg": "Test message", + "procid": uint64(25), + "version": uint16(2), + "msgid": int64(555), + "timestamp": time.Date(2010, time.November, 10, 23, 30, 0, 0, time.UTC).UnixNano(), + "value1": int64(2), + "default@32473_value2": "default", + "bar@123_value3": int64(2), + "foo@456_value4": "foo", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<25>2 2010-11-10T23:30:00Z testhost testapp 25 555 [bar@123 tag3=\"barfoobar\" value3=\"2\"][default@32473 tag1=\"bar\" tag2=\"foobar\" value1=\"2\" value2=\"default\"][foo@456 value4=\"foo\"] Test message", str, "Wrong syslog message") +} + +func TestSyslogMapperWithNoSdids(t *testing.T) { + // Init mapper + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "appname": "testapp", + "hostname": "testhost", + "tag1": "bar", + "default@32473_tag2": "foobar", + "bar@123_tag3": "barfoobar", + "foo@456_tag4": "foobarfoo", + }, + map[string]interface{}{ + "severity_code": uint64(2), + "facility_code": uint64(3), + "msg": "Test message", + "procid": uint64(25), + "version": uint16(2), + "msgid": int64(555), + "timestamp": time.Date(2010, time.November, 10, 23, 30, 0, 0, time.UTC).UnixNano(), + "value1": int64(2), + "default@32473_value2": "default", + "bar@123_value3": int64(2), + "foo@456_value4": "foo", + }, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + str, _ := syslogMessage.String() + assert.Equal(t, "<26>2 2010-11-10T23:30:00Z testhost testapp 25 555 - Test message", str, "Wrong syslog message") +} diff --git a/plugins/outputs/syslog/syslog_test.go b/plugins/outputs/syslog/syslog_test.go new file mode 100644 index 000000000..7581a7b53 --- /dev/null +++ b/plugins/outputs/syslog/syslog_test.go @@ -0,0 +1,205 @@ +package syslog + +import ( + "net" + "sync" + "testing" + "time" + + "github.com/influxdata/telegraf" + framing "github.com/influxdata/telegraf/internal/syslog" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestGetSyslogMessageWithFramingOctectCounting(t *testing.T) { + // Init plugin + s := newSyslog() + s.initializeSyslogMapper() + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "hostname": "testhost", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + + assert.Equal(t, "59 <13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -", string(messageBytesWithFraming), "Incorrect Octect counting framing") +} + +func TestGetSyslogMessageWithFramingNonTransparent(t *testing.T) { + // Init plugin + s := newSyslog() + s.initializeSyslogMapper() + s.Framing = framing.NonTransparent + + // Init metrics + m1, _ := metric.New( + "testmetric", + map[string]string{ + "hostname": "testhost", + }, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC), + ) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(m1) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + + assert.Equal(t, "<13>1 2010-11-10T23:00:00Z testhost Telegraf - testmetric -\x00", string(messageBytesWithFraming), "Incorrect Octect counting framing") +} + +func TestSyslogWriteWithTcp(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + s := newSyslog() + s.Address = "tcp://" + listener.Addr().String() + + err = s.Connect() + require.NoError(t, err) + + lconn, err := listener.Accept() + require.NoError(t, err) + + testSyslogWriteWithStream(t, s, lconn) +} + +func TestSyslogWriteWithUdp(t *testing.T) { + listener, err := net.ListenPacket("udp", "127.0.0.1:0") + require.NoError(t, err) + + s := newSyslog() + s.Address = "udp://" + listener.LocalAddr().String() + + err = s.Connect() + require.NoError(t, err) + + testSyslogWriteWithPacket(t, s, listener) +} + +func testSyslogWriteWithStream(t *testing.T, s *Syslog, lconn net.Conn) { + metrics := []telegraf.Metric{} + m1, _ := metric.New( + "testmetric", + map[string]string{}, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)) + + metrics = append(metrics, m1) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(metrics[0]) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + + err = s.Write(metrics) + require.NoError(t, err) + + buf := make([]byte, 256) + n, err := lconn.Read(buf) + require.NoError(t, err) + assert.Equal(t, string(messageBytesWithFraming), string(buf[:n])) +} + +func testSyslogWriteWithPacket(t *testing.T, s *Syslog, lconn net.PacketConn) { + s.Framing = framing.NonTransparent + metrics := []telegraf.Metric{} + m1, _ := metric.New( + "testmetric", + map[string]string{}, + map[string]interface{}{}, + time.Date(2010, time.November, 10, 23, 0, 0, 0, time.UTC)) + + metrics = append(metrics, m1) + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(metrics[0]) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + + err = s.Write(metrics) + require.NoError(t, err) + + buf := make([]byte, 256) + n, _, err := lconn.ReadFrom(buf) + require.NoError(t, err) + assert.Equal(t, string(messageBytesWithFraming), string(buf[:n])) +} + +func TestSyslogWriteErr(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + s := newSyslog() + s.Address = "tcp://" + listener.Addr().String() + + err = s.Connect() + require.NoError(t, err) + s.Conn.(*net.TCPConn).SetReadBuffer(256) + + lconn, err := listener.Accept() + require.NoError(t, err) + lconn.(*net.TCPConn).SetWriteBuffer(256) + + metrics := []telegraf.Metric{testutil.TestMetric(1, "testerr")} + + // close the socket to generate an error + lconn.Close() + s.Conn.Close() + err = s.Write(metrics) + require.Error(t, err) + assert.Nil(t, s.Conn) +} + +func TestSyslogWriteReconnect(t *testing.T) { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + s := newSyslog() + s.Address = "tcp://" + listener.Addr().String() + + err = s.Connect() + require.NoError(t, err) + s.Conn.(*net.TCPConn).SetReadBuffer(256) + + lconn, err := listener.Accept() + require.NoError(t, err) + lconn.(*net.TCPConn).SetWriteBuffer(256) + lconn.Close() + s.Conn = nil + + wg := sync.WaitGroup{} + wg.Add(1) + var lerr error + go func() { + lconn, lerr = listener.Accept() + wg.Done() + }() + + metrics := []telegraf.Metric{testutil.TestMetric(1, "testerr")} + err = s.Write(metrics) + require.NoError(t, err) + + wg.Wait() + assert.NoError(t, lerr) + + syslogMessage, err := s.mapper.MapMetricToSyslogMessage(metrics[0]) + require.NoError(t, err) + messageBytesWithFraming, err := s.getSyslogMessageBytesWithFraming(syslogMessage) + require.NoError(t, err) + buf := make([]byte, 256) + n, err := lconn.Read(buf) + require.NoError(t, err) + assert.Equal(t, string(messageBytesWithFraming), string(buf[:n])) +} diff --git a/plugins/outputs/warp10/README.md b/plugins/outputs/warp10/README.md new file mode 100644 index 000000000..07e6cd25b --- /dev/null +++ b/plugins/outputs/warp10/README.md @@ -0,0 +1,50 @@ +# Warp10 Output Plugin + +The `warp10` output plugin writes metrics to [Warp 10][]. + +### Configuration + +```toml +[[outputs.warp10]] + # Prefix to add to the measurement. + prefix = "telegraf." + + # URL of the Warp 10 server + warp_url = "http://localhost:8080" + + # Write token to access your app on warp 10 + token = "Token" + + # Warp 10 query timeout + # timeout = "15s" + + ## Print Warp 10 error body + # print_error_body = false + + ## Max string error size + # max_string_error_size = 511 + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +``` + +### Output Format + +Metrics are converted and sent using the [Geo Time Series][] (GTS) input format. + +The class name of the reading is produced by combining the value of the +`prefix` option, the measurement name, and the field key. A dot (`.`) +character is used as the joining character. + +The GTS form provides support for the Telegraf integer, float, boolean, and +string types directly. Unsigned integer fields will be capped to the largest +64-bit integer (2^63-1) in case of overflow. + +Timestamps are sent in microsecond precision. + +[Warp 10]: https://www.warp10.io +[Geo Time Series]: https://www.warp10.io/content/03_Documentation/03_Interacting_with_Warp_10/03_Ingesting_data/02_GTS_input_format diff --git a/plugins/outputs/warp10/warp10.go b/plugins/outputs/warp10/warp10.go new file mode 100644 index 000000000..eead153e0 --- /dev/null +++ b/plugins/outputs/warp10/warp10.go @@ -0,0 +1,291 @@ +package warp10 + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/internal/tls" + "github.com/influxdata/telegraf/plugins/outputs" +) + +const ( + defaultClientTimeout = 15 * time.Second +) + +// Warp10 output plugin +type Warp10 struct { + Prefix string `toml:"prefix"` + WarpURL string `toml:"warp_url"` + Token string `toml:"token"` + Timeout internal.Duration `toml:"timeout"` + PrintErrorBody bool `toml:"print_error_body"` + MaxStringErrorSize int `toml:"max_string_error_size"` + client *http.Client + tls.ClientConfig +} + +var sampleConfig = ` + # Prefix to add to the measurement. + prefix = "telegraf." + + # URL of the Warp 10 server + warp_url = "http://localhost:8080" + + # Write token to access your app on warp 10 + token = "Token" + + # Warp 10 query timeout + # timeout = "15s" + + ## Print Warp 10 error body + # print_error_body = false + + ## Max string error size + # max_string_error_size = 511 + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +` + +// MetricLine Warp 10 metrics +type MetricLine struct { + Metric string + Timestamp int64 + Value string + Tags string +} + +func (w *Warp10) createClient() (*http.Client, error) { + tlsCfg, err := w.ClientConfig.TLSConfig() + if err != nil { + return nil, err + } + + if w.Timeout.Duration == 0 { + w.Timeout.Duration = defaultClientTimeout + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsCfg, + Proxy: http.ProxyFromEnvironment, + }, + Timeout: w.Timeout.Duration, + } + + return client, nil +} + +// Connect to warp10 +func (w *Warp10) Connect() error { + client, err := w.createClient() + if err != nil { + return err + } + + w.client = client + return nil +} + +// GenWarp10Payload compute Warp 10 metrics payload +func (w *Warp10) GenWarp10Payload(metrics []telegraf.Metric) string { + collectString := make([]string, 0) + for _, mm := range metrics { + + for _, field := range mm.FieldList() { + + metric := &MetricLine{ + Metric: fmt.Sprintf("%s%s", w.Prefix, mm.Name()+"."+field.Key), + Timestamp: mm.Time().UnixNano() / 1000, + } + + metricValue, err := buildValue(field.Value) + if err != nil { + log.Printf("E! [outputs.warp10] Could not encode value: %v", err) + continue + } + metric.Value = metricValue + + tagsSlice := buildTags(mm.TagList()) + metric.Tags = strings.Join(tagsSlice, ",") + + messageLine := fmt.Sprintf("%d// %s{%s} %s\n", metric.Timestamp, metric.Metric, metric.Tags, metric.Value) + + collectString = append(collectString, messageLine) + } + } + return fmt.Sprint(strings.Join(collectString, "")) +} + +// Write metrics to Warp10 +func (w *Warp10) Write(metrics []telegraf.Metric) error { + payload := w.GenWarp10Payload(metrics) + if payload == "" { + return nil + } + + req, err := http.NewRequest("POST", w.WarpURL+"/api/v0/update", bytes.NewBufferString(payload)) + req.Header.Set("X-Warp10-Token", w.Token) + req.Header.Set("Content-Type", "text/plain") + + resp, err := w.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + if w.PrintErrorBody { + body, _ := ioutil.ReadAll(resp.Body) + return fmt.Errorf(w.WarpURL + ": " + w.HandleError(string(body), w.MaxStringErrorSize)) + } + + if len(resp.Status) < w.MaxStringErrorSize { + return fmt.Errorf(w.WarpURL + ": " + resp.Status) + } + + return fmt.Errorf(w.WarpURL + ": " + resp.Status[0:w.MaxStringErrorSize]) + } + + return nil +} + +func buildTags(tags []*telegraf.Tag) []string { + + tagsString := make([]string, len(tags)+1) + indexSource := 0 + for index, tag := range tags { + tagsString[index] = fmt.Sprintf("%s=%s", tag.Key, tag.Value) + indexSource = index + } + indexSource++ + tagsString[indexSource] = fmt.Sprintf("source=telegraf") + sort.Strings(tagsString) + return tagsString +} + +func buildValue(v interface{}) (string, error) { + var retv string + switch p := v.(type) { + case int64: + retv = intToString(p) + case string: + retv = fmt.Sprintf("'%s'", strings.Replace(p, "'", "\\'", -1)) + case bool: + retv = boolToString(p) + case uint64: + if p <= uint64(math.MaxInt64) { + retv = strconv.FormatInt(int64(p), 10) + } else { + retv = strconv.FormatInt(math.MaxInt64, 10) + } + case float64: + retv = floatToString(float64(p)) + default: + return "", fmt.Errorf("unsupported type: %T", v) + } + return retv, nil +} + +func intToString(inputNum int64) string { + return strconv.FormatInt(inputNum, 10) +} + +func boolToString(inputBool bool) string { + return strconv.FormatBool(inputBool) +} + +func uIntToString(inputNum uint64) string { + return strconv.FormatUint(inputNum, 10) +} + +func floatToString(inputNum float64) string { + return strconv.FormatFloat(inputNum, 'f', 6, 64) +} + +// SampleConfig get config +func (w *Warp10) SampleConfig() string { + return sampleConfig +} + +// Description get description +func (w *Warp10) Description() string { + return "Write metrics to Warp 10" +} + +// Close close +func (w *Warp10) Close() error { + return nil +} + +// Init Warp10 struct +func (w *Warp10) Init() error { + if w.MaxStringErrorSize <= 0 { + w.MaxStringErrorSize = 511 + } + return nil +} + +func init() { + outputs.Add("warp10", func() telegraf.Output { + return &Warp10{} + }) +} + +// HandleError read http error body and return a corresponding error +func (w *Warp10) HandleError(body string, maxStringSize int) string { + if body == "" { + return "Empty return" + } + + if strings.Contains(body, "Invalid token") { + return "Invalid token" + } + + if strings.Contains(body, "Write token missing") { + return "Write token missing" + } + + if strings.Contains(body, "Token Expired") { + return "Token Expired" + } + + if strings.Contains(body, "Token revoked") { + return "Token revoked" + } + + if strings.Contains(body, "exceed your Monthly Active Data Streams limit") || strings.Contains(body, "exceed the Monthly Active Data Streams limit") { + return "Exceeded Monthly Active Data Streams limit" + } + + if strings.Contains(body, "Daily Data Points limit being already exceeded") { + return "Exceeded Daily Data Points limit" + } + + if strings.Contains(body, "Application suspended or closed") { + return "Application suspended or closed" + } + + if strings.Contains(body, "broken pipe") { + return "broken pipe" + } + + if len(body) < maxStringSize { + return body + } + return body[0:maxStringSize] +} diff --git a/plugins/outputs/warp10/warp10_test.go b/plugins/outputs/warp10/warp10_test.go new file mode 100644 index 000000000..5b543b34c --- /dev/null +++ b/plugins/outputs/warp10/warp10_test.go @@ -0,0 +1,105 @@ +package warp10 + +import ( + "fmt" + "testing" + + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +type ErrorTest struct { + Message string + Expected string +} + +func TestWriteWarp10(t *testing.T) { + w := Warp10{ + Prefix: "unit.test", + WarpURL: "http://localhost:8090", + Token: "WRITE", + } + + payload := w.GenWarp10Payload(testutil.MockMetrics()) + require.Exactly(t, "1257894000000000// unit.testtest1.value{source=telegraf,tag1=value1} 1.000000\n", payload) +} + +func TestHandleWarp10Error(t *testing.T) { + w := Warp10{ + Prefix: "unit.test", + WarpURL: "http://localhost:8090", + Token: "WRITE", + } + tests := [...]*ErrorTest{ + { + Message: ` + + + + Error 500 io.warp10.script.WarpScriptException: Invalid token. + +

HTTP ERROR 500

+

Problem accessing /api/v0/update. Reason: +

    io.warp10.script.WarpScriptException: Invalid token.

+ + + `, + Expected: fmt.Sprintf("Invalid token"), + }, + { + Message: ` + + + + Error 500 io.warp10.script.WarpScriptException: Token Expired. + +

HTTP ERROR 500

+

Problem accessing /api/v0/update. Reason: +

    io.warp10.script.WarpScriptException: Token Expired.

+ + + `, + Expected: fmt.Sprintf("Token Expired"), + }, + { + Message: ` + + + + Error 500 io.warp10.script.WarpScriptException: Token revoked. + +

HTTP ERROR 500

+

Problem accessing /api/v0/update. Reason: +

    io.warp10.script.WarpScriptException: Token revoked.

+ + + `, + Expected: fmt.Sprintf("Token revoked"), + }, + { + Message: ` + + + + Error 500 io.warp10.script.WarpScriptException: Write token missing. + +

HTTP ERROR 500

+

Problem accessing /api/v0/update. Reason: +

    io.warp10.script.WarpScriptException: Write token missing.

+ + + `, + Expected: "Write token missing", + }, + { + Message: `Error 503: server unavailable`, + Expected: "Error 503: server unavailable", + }, + } + + for _, handledError := range tests { + payload := w.HandleError(handledError.Message, 511) + require.Exactly(t, handledError.Expected, payload) + } + +} diff --git a/plugins/outputs/wavefront/README.md b/plugins/outputs/wavefront/README.md index be8fcd7dc..2daca328c 100644 --- a/plugins/outputs/wavefront/README.md +++ b/plugins/outputs/wavefront/README.md @@ -6,44 +6,49 @@ This plugin writes to a [Wavefront](https://www.wavefront.com) proxy, in Wavefro ### Configuration: ```toml -# Configuration for Wavefront output -[[outputs.wavefront]] - ## DNS name of the wavefront proxy server - host = "wavefront.example.com" + ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy + ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 + url = "https://metrics.wavefront.com" - ## Port that the Wavefront proxy server listens on - port = 2878 + ## Authentication Token for Wavefront. Only required if using Direct Ingestion + #token = "DUMMY_TOKEN" + + ## DNS name of the wavefront proxy server. Do not use if url is specified + #host = "wavefront.example.com" + + ## Port that the Wavefront proxy server listens on. Do not use if url is specified + #port = 2878 ## prefix for metrics keys #prefix = "my.specific.prefix." - ## wether to use "value" for name of simple fields. default is false + ## whether to use "value" for name of simple fields. default is false #simple_fields = false - ## character to use between metric and field name. default is . (dot) + ## character to use between metric and field name. default is . (dot) #metric_separator = "." - ## Convert metric name paths to use metricSeperator character - ## When true will convert all _ (underscore) chartacters in final metric name. default is true + ## Convert metric name paths to use metricSeparator character + ## When true will convert all _ (underscore) characters in final metric name. default is true #convert_paths = true + ## Use Strict rules to sanitize metric and tag names from invalid characters + ## When enabled forward slash (/) and comma (,) will be accepted + #use_strict = false + ## Use Regex to sanitize metric and tag names from invalid characters ## Regex is more thorough, but significantly slower. default is false #use_regex = false ## point tags to use as the source name for Wavefront (if none found, host will be used) - #source_override = ["hostname", "agent_host", "node_host"] + #source_override = ["hostname", "address", "agent_host", "node_host"] ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true #convert_bool = true - ## Define a mapping, namespaced by metric prefix, from string values to numeric values - ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for - ## any metrics beginning with "elasticsearch" - #[[outputs.wavefront.string_to_number.elasticsearch]] - # green = 1.0 - # yellow = 0.5 - # red = 0.0 + ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any + ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. + #truncate_tags = false ``` @@ -70,12 +75,11 @@ source of the metric. ### Wavefront Data format The expected input for Wavefront is specified in the following way: ``` - [] = [tagk1=tagv1 ...tagkN=tagvN] + [] = [tagk1=tagv1 ...tagkN=tagvN] ``` More information about the Wavefront data format is available [here](https://community.wavefront.com/docs/DOC-1031) ### Allowed values for metrics -Wavefront allows `integers` and `floats` as input values. It will ignore most `strings`, but when configured -will map certain `strings` to numeric values. By default it also maps `bool` values to numeric, false -> 0.0, -true -> 1.0 \ No newline at end of file +Wavefront allows `integers` and `floats` as input values. By default it also maps `bool` values to numeric, false -> 0.0, +true -> 1.0. To map `strings` use the [enum](../../processors/enum) processor plugin. diff --git a/plugins/outputs/wavefront/wavefront.go b/plugins/outputs/wavefront/wavefront.go index 18c5a6495..79c998e25 100644 --- a/plugins/outputs/wavefront/wavefront.go +++ b/plugins/outputs/wavefront/wavefront.go @@ -1,31 +1,35 @@ package wavefront import ( - "bytes" "fmt" - "log" - "net" "regexp" - "strconv" "strings" - "time" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/plugins/outputs" + wavefront "github.com/wavefronthq/wavefront-sdk-go/senders" ) +const maxTagLength = 254 + type Wavefront struct { - Prefix string + Url string + Token string Host string Port int + Prefix string SimpleFields bool MetricSeparator string ConvertPaths bool ConvertBool bool UseRegex bool + UseStrict bool + TruncateTags bool SourceOverride []string StringToNumber map[string][]map[string]float64 + + sender wavefront.Sender + Log telegraf.Logger } // catch many of the invalid chars that could appear in a metric or tag name @@ -37,46 +41,68 @@ var sanitizedChars = strings.NewReplacer( "=", "-", ) +// catch many of the invalid chars that could appear in a metric or tag name +var strictSanitizedChars = strings.NewReplacer( + "!", "-", "@", "-", "#", "-", "$", "-", "%", "-", "^", "-", "&", "-", + "*", "-", "(", "-", ")", "-", "+", "-", "`", "-", "'", "-", "\"", "-", + "[", "-", "]", "-", "{", "-", "}", "-", ":", "-", ";", "-", "<", "-", + ">", "-", "?", "-", "\\", "-", "|", "-", " ", "-", "=", "-", +) + // instead of Replacer which may miss some special characters we can use a regex pattern, but this is significantly slower than Replacer var sanitizedRegex = regexp.MustCompile("[^a-zA-Z\\d_.-]") -var tagValueReplacer = strings.NewReplacer("\"", "\\\"", "*", "-") +var tagValueReplacer = strings.NewReplacer("*", "-") var pathReplacer = strings.NewReplacer("_", "_") var sampleConfig = ` - ## DNS name of the wavefront proxy server - host = "wavefront.example.com" + ## Url for Wavefront Direct Ingestion or using HTTP with Wavefront Proxy + ## If using Wavefront Proxy, also specify port. example: http://proxyserver:2878 + url = "https://metrics.wavefront.com" - ## Port that the Wavefront proxy server listens on - port = 2878 + ## Authentication Token for Wavefront. Only required if using Direct Ingestion + #token = "DUMMY_TOKEN" + + ## DNS name of the wavefront proxy server. Do not use if url is specified + #host = "wavefront.example.com" + + ## Port that the Wavefront proxy server listens on. Do not use if url is specified + #port = 2878 ## prefix for metrics keys #prefix = "my.specific.prefix." - ## whether to use "value" for name of simple fields + ## whether to use "value" for name of simple fields. default is false #simple_fields = false - ## character to use between metric and field name. defaults to . (dot) + ## character to use between metric and field name. default is . (dot) #metric_separator = "." - ## Convert metric name paths to use metricSeperator character - ## When true (default) will convert all _ (underscore) chartacters in final metric name + ## Convert metric name paths to use metricSeparator character + ## When true will convert all _ (underscore) characters in final metric name. default is true #convert_paths = true + ## Use Strict rules to sanitize metric and tag names from invalid characters + ## When enabled forward slash (/) and comma (,) will be accepted + #use_strict = false + ## Use Regex to sanitize metric and tag names from invalid characters - ## Regex is more thorough, but significantly slower + ## Regex is more thorough, but significantly slower. default is false #use_regex = false ## point tags to use as the source name for Wavefront (if none found, host will be used) - #source_override = ["hostname", "agent_host", "node_host"] + #source_override = ["hostname", "address", "agent_host", "node_host"] - ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default true + ## whether to convert boolean values to numeric values, with false -> 0.0 and true -> 1.0. default is true #convert_bool = true + ## Truncate metric tags to a total of 254 characters for the tag name value. Wavefront will reject any + ## data point exceeding this limit if not truncated. Defaults to 'false' to provide backwards compatibility. + #truncate_tags = false + ## Define a mapping, namespaced by metric prefix, from string values to numeric values - ## The example below maps "green" -> 1.0, "yellow" -> 0.5, "red" -> 0.0 for - ## any metrics beginning with "elasticsearch" + ## deprecated in 1.9; use the enum processor plugin #[[outputs.wavefront.string_to_number.elasticsearch]] # green = 1.0 # yellow = 0.5 @@ -92,52 +118,58 @@ type MetricPoint struct { } func (w *Wavefront) Connect() error { + + if len(w.StringToNumber) > 0 { + w.Log.Warn("The string_to_number option is deprecated; please use the enum processor instead") + } + + if w.Url != "" { + w.Log.Debug("connecting over http/https using Url: %s", w.Url) + sender, err := wavefront.NewDirectSender(&wavefront.DirectConfiguration{ + Server: w.Url, + Token: w.Token, + FlushIntervalSeconds: 5, + }) + if err != nil { + return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Url: %s", w.Url) + } + w.sender = sender + } else { + w.Log.Debug("connecting over tcp using Host: %s and Port: %d", w.Host, w.Port) + sender, err := wavefront.NewProxySender(&wavefront.ProxyConfiguration{ + Host: w.Host, + MetricsPort: w.Port, + FlushIntervalSeconds: 5, + }) + if err != nil { + return fmt.Errorf("Wavefront: Could not create Wavefront Sender for Host: %s and Port: %d", w.Host, w.Port) + } + w.sender = sender + } + if w.ConvertPaths && w.MetricSeparator == "_" { w.ConvertPaths = false } if w.ConvertPaths { pathReplacer = strings.NewReplacer("_", w.MetricSeparator) } - - // Test Connection to Wavefront proxy Server - uri := fmt.Sprintf("%s:%d", w.Host, w.Port) - _, err := net.ResolveTCPAddr("tcp", uri) - if err != nil { - return fmt.Errorf("Wavefront: TCP address cannot be resolved %s", err.Error()) - } - connection, err := net.Dial("tcp", uri) - if err != nil { - return fmt.Errorf("Wavefront: TCP connect fail %s", err.Error()) - } - defer connection.Close() return nil } func (w *Wavefront) Write(metrics []telegraf.Metric) error { - // Send Data to Wavefront proxy Server - uri := fmt.Sprintf("%s:%d", w.Host, w.Port) - connection, err := net.Dial("tcp", uri) - if err != nil { - return fmt.Errorf("Wavefront: TCP connect fail %s", err.Error()) - } - defer connection.Close() - connection.SetWriteDeadline(time.Now().Add(5 * time.Second)) - for _, m := range metrics { - for _, metricPoint := range buildMetrics(m, w) { - metricLine := formatMetricPoint(metricPoint, w) - _, err := connection.Write([]byte(metricLine)) + for _, point := range w.buildMetrics(m) { + err := w.sender.SendMetric(point.Metric, point.Value, point.Timestamp, point.Source, point.Tags) if err != nil { - return fmt.Errorf("Wavefront: TCP writing error %s", err.Error()) + return fmt.Errorf("Wavefront sending error: %s", err.Error()) } } } - return nil } -func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint { +func (w *Wavefront) buildMetrics(m telegraf.Metric) []*MetricPoint { ret := []*MetricPoint{} for fieldName, value := range m.Fields() { @@ -150,6 +182,8 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint { if w.UseRegex { name = sanitizedRegex.ReplaceAllLiteralString(name, "-") + } else if w.UseStrict { + name = strictSanitizedChars.Replace(name) } else { name = sanitizedChars.Replace(name) } @@ -165,12 +199,12 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint { metricValue, buildError := buildValue(value, metric.Metric, w) if buildError != nil { - log.Printf("D! Output [wavefront] %s\n", buildError.Error()) + w.Log.Debug("Error building tags: %s\n", buildError.Error()) continue } metric.Value = metricValue - source, tags := buildTags(m.Tags(), w) + source, tags := w.buildTags(m.Tags()) metric.Source = source metric.Tags = tags @@ -179,7 +213,7 @@ func buildMetrics(m telegraf.Metric, w *Wavefront) []*MetricPoint { return ret } -func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string) { +func (w *Wavefront) buildTags(mTags map[string]string) (string, map[string]string) { // Remove all empty tags. for k, v := range mTags { @@ -188,30 +222,63 @@ func buildTags(mTags map[string]string, w *Wavefront) (string, map[string]string } } + // find source, use source_override property if needed var source string - sourceTagFound := false - - for _, s := range w.SourceOverride { - for k, v := range mTags { - if k == s { - source = v - mTags["telegraf_host"] = mTags["host"] - sourceTagFound = true - delete(mTags, k) + if s, ok := mTags["source"]; ok { + source = s + delete(mTags, "source") + } else { + sourceTagFound := false + for _, s := range w.SourceOverride { + for k, v := range mTags { + if k == s { + source = v + mTags["telegraf_host"] = mTags["host"] + sourceTagFound = true + delete(mTags, k) + break + } + } + if sourceTagFound { break } } - if sourceTagFound { - break + + if !sourceTagFound { + source = mTags["host"] } } + source = tagValueReplacer.Replace(source) - if !sourceTagFound { - source = mTags["host"] - } + // remove default host tag delete(mTags, "host") - return tagValueReplacer.Replace(source), mTags + // sanitize tag keys and values + tags := make(map[string]string) + for k, v := range mTags { + var key string + if w.UseRegex { + key = sanitizedRegex.ReplaceAllLiteralString(k, "-") + } else if w.UseStrict { + key = strictSanitizedChars.Replace(k) + } else { + key = sanitizedChars.Replace(k) + } + val := tagValueReplacer.Replace(v) + if w.TruncateTags { + if len(key) > maxTagLength { + w.Log.Warnf("Tag key length > 254. Skipping tag: %s", key) + continue + } + if len(key)+len(val) > maxTagLength { + w.Log.Debugf("Key+value length > 254: %s", key) + val = val[:maxTagLength-len(key)] + } + } + tags[key] = val + } + + return source, tags } func buildValue(v interface{}, name string, w *Wavefront) (float64, error) { @@ -245,38 +312,9 @@ func buildValue(v interface{}, name string, w *Wavefront) (float64, error) { default: return 0, fmt.Errorf("unexpected type: %T, with value: %v, for: %s", v, v, name) } - return 0, fmt.Errorf("unexpected type: %T, with value: %v, for: %s", v, v, name) } -func formatMetricPoint(metricPoint *MetricPoint, w *Wavefront) string { - buffer := bytes.NewBufferString("") - buffer.WriteString(metricPoint.Metric) - buffer.WriteString(" ") - buffer.WriteString(strconv.FormatFloat(metricPoint.Value, 'f', 6, 64)) - buffer.WriteString(" ") - buffer.WriteString(strconv.FormatInt(metricPoint.Timestamp, 10)) - buffer.WriteString(" source=\"") - buffer.WriteString(metricPoint.Source) - buffer.WriteString("\"") - - for k, v := range metricPoint.Tags { - buffer.WriteString(" ") - if w.UseRegex { - buffer.WriteString(sanitizedRegex.ReplaceAllLiteralString(k, "-")) - } else { - buffer.WriteString(sanitizedChars.Replace(k)) - } - buffer.WriteString("=\"") - buffer.WriteString(tagValueReplacer.Replace(v)) - buffer.WriteString("\"") - } - - buffer.WriteString("\n") - - return buffer.String() -} - func (w *Wavefront) SampleConfig() string { return sampleConfig } @@ -286,15 +324,18 @@ func (w *Wavefront) Description() string { } func (w *Wavefront) Close() error { + w.sender.Close() return nil } func init() { outputs.Add("wavefront", func() telegraf.Output { return &Wavefront{ + Token: "DUMMY_TOKEN", MetricSeparator: ".", ConvertPaths: true, ConvertBool: true, + TruncateTags: false, } }) } diff --git a/plugins/outputs/wavefront/wavefront_test.go b/plugins/outputs/wavefront/wavefront_test.go index f1722e668..40707e6d6 100644 --- a/plugins/outputs/wavefront/wavefront_test.go +++ b/plugins/outputs/wavefront/wavefront_test.go @@ -4,6 +4,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" "reflect" "strings" "testing" @@ -21,6 +22,7 @@ func defaultWavefront() *Wavefront { ConvertPaths: true, ConvertBool: true, UseRegex: false, + Log: testutil.Logger{}, } } @@ -50,6 +52,13 @@ func TestBuildMetrics(t *testing.T) { {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, }, }, + { + testutil.TestMetric(float64(1), "testing_just/another,metric:float", "metric2"), + []MetricPoint{ + {Metric: w.Prefix + "testing.just-another-metric-float", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, + {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, + }, + }, { testMetric1, []MetricPoint{{Metric: w.Prefix + "test.simple.metric", Value: 123, Timestamp: timestamp, Source: "testHost", Tags: map[string]string{"tag1": "value1"}}}, @@ -57,7 +66,47 @@ func TestBuildMetrics(t *testing.T) { } for _, mt := range metricTests { - ml := buildMetrics(mt.metric, w) + ml := w.buildMetrics(mt.metric) + for i, line := range ml { + if mt.metricPoints[i].Metric != line.Metric || mt.metricPoints[i].Value != line.Value { + t.Errorf("\nexpected\t%+v %+v\nreceived\t%+v %+v\n", mt.metricPoints[i].Metric, mt.metricPoints[i].Value, line.Metric, line.Value) + } + } + } + +} + +func TestBuildMetricsStrict(t *testing.T) { + w := defaultWavefront() + w.Prefix = "testthis." + w.UseStrict = true + + pathReplacer = strings.NewReplacer("_", w.MetricSeparator) + + var timestamp int64 = 1257894000 + + var metricTests = []struct { + metric telegraf.Metric + metricPoints []MetricPoint + }{ + { + testutil.TestMetric(float64(1), "testing_just*a%metric:float", "metric2"), + []MetricPoint{ + {Metric: w.Prefix + "testing.just-a-metric-float", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, + {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag1": "value1"}}, + }, + }, + { + testutil.TestMetric(float64(1), "testing_just/another,metric:float", "metric2"), + []MetricPoint{ + {Metric: w.Prefix + "testing.just/another,metric-float", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag/1": "value1", "tag,2": "value2"}}, + {Metric: w.Prefix + "testing.metric2", Value: 1, Timestamp: timestamp, Tags: map[string]string{"tag/1": "value1", "tag,2": "value2"}}, + }, + }, + } + + for _, mt := range metricTests { + ml := w.buildMetrics(mt.metric) for i, line := range ml { if mt.metricPoints[i].Metric != line.Metric || mt.metricPoints[i].Value != line.Value { t.Errorf("\nexpected\t%+v %+v\nreceived\t%+v %+v\n", mt.metricPoints[i].Metric, mt.metricPoints[i].Value, line.Metric, line.Value) @@ -96,7 +145,7 @@ func TestBuildMetricsWithSimpleFields(t *testing.T) { } for _, mt := range metricTests { - ml := buildMetrics(mt.metric, w) + ml := w.buildMetrics(mt.metric) for i, line := range ml { if mt.metricLines[i].Metric != line.Metric || mt.metricLines[i].Value != line.Value { t.Errorf("\nexpected\t%+v %+v\nreceived\t%+v %+v\n", mt.metricLines[i].Metric, mt.metricLines[i].Value, line.Metric, line.Value) @@ -140,10 +189,15 @@ func TestBuildTags(t *testing.T) { "aaa", map[string]string{"dc": "bbb"}, }, + { + map[string]string{"host": "aaa", "dc": "a*$a\\abbb\"som/et|hing else", "bad#k%e/y that*sho\\uld work": "value1"}, + "aaa", + map[string]string{"dc": "a-$a\\abbb\"som/et|hing else", "bad-k-e-y-that-sho-uld-work": "value1"}, + }, } for _, tt := range tagtests { - source, tags := buildTags(tt.ptIn, w) + source, tags := w.buildTags(tt.ptIn) if source != tt.outSource { t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outSource, source) } @@ -189,13 +243,13 @@ func TestBuildTagsWithSource(t *testing.T) { }, { map[string]string{"something": "abc", "host": "r*@l\"Ho/st"}, - "r-@l\\\"Ho/st", + "r-@l\"Ho/st", map[string]string{"something": "abc"}, }, } for _, tt := range tagtests { - source, tags := buildTags(tt.ptIn, w) + source, tags := w.buildTags(tt.ptIn) if source != tt.outSource { t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outSource, source) } @@ -264,25 +318,40 @@ func TestBuildValueString(t *testing.T) { } -func TestFormatMetricPoint(t *testing.T) { +func TestTagLimits(t *testing.T) { w := defaultWavefront() + w.TruncateTags = true - testpoint := &MetricPoint{ - Metric: "test.metric.something", - Value: 123.456, - Timestamp: 1257894000, - Source: "testSource", - Tags: map[string]string{"sp*c!@l\"-ch/rs": "sp*c!@l/ val\"ue"}, - } + // Should fail (all tags skipped) + template := make(map[string]string) + template[strings.Repeat("x", 255)] = "whatever" + _, tags := w.buildTags(template) + require.Empty(t, tags, "All tags should have been skipped") - expected := "test.metric.something 123.456000 1257894000 source=\"testSource\" sp-c--l--ch-rs=\"sp-c!@l/ val\\\"ue\"\n" + // Should truncate value + template = make(map[string]string) + longKey := strings.Repeat("x", 253) + template[longKey] = "whatever" + _, tags = w.buildTags(template) + require.Contains(t, tags, longKey, "Should contain truncated long key") + require.Equal(t, "w", tags[longKey]) - received := formatMetricPoint(testpoint, w) + // Should not truncate + template = make(map[string]string) + longKey = strings.Repeat("x", 251) + template[longKey] = "Hi!" + _, tags = w.buildTags(template) + require.Contains(t, tags, longKey, "Should contain non truncated long key") + require.Equal(t, "Hi!", tags[longKey]) - if expected != received { - t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", expected, received) - - } + // Turn off truncating and make sure it leaves the tags intact + w.TruncateTags = false + template = make(map[string]string) + longKey = strings.Repeat("x", 255) + template[longKey] = longKey + _, tags = w.buildTags(template) + require.Contains(t, tags, longKey, "Should contain non truncated long key") + require.Equal(t, longKey, tags[longKey]) } // Benchmarks to test performance of string replacement via Regex and Replacer diff --git a/plugins/parsers/EXAMPLE_README.md b/plugins/parsers/EXAMPLE_README.md new file mode 100644 index 000000000..b3c1bc2e2 --- /dev/null +++ b/plugins/parsers/EXAMPLE_README.md @@ -0,0 +1,46 @@ +# Example + +This description explains at a high level what the parser does and provides +links to where additional information about the format can be found. + +### Configuration + +This section contains the sample configuration for the parser. Since the +configuration for a parser is not have a standalone plugin, use the `file` or +`exec` input as the base config. + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "example" + + ## Describe variables using the standard SampleConfig style. + ## https://github.com/influxdata/telegraf/wiki/SampleConfig + example_option = "example_value" +``` + +#### example_option + +If an option requires a more expansive explanation than can be included inline +in the sample configuration, it may be described here. + +### Metrics + +The optional Metrics section contains details about how the parser converts +input data into Telegraf metrics. + +### Examples + +The optional Examples section can show an example conversion from the input +format using InfluxDB Line Protocol as the reference format. + +For line delimited text formats a diff may be appropriate: +```diff +- cpu|host=localhost|source=example.org|value=42 ++ cpu,host=localhost,source=example.org value=42 +``` diff --git a/plugins/parsers/collectd/README.md b/plugins/parsers/collectd/README.md new file mode 100644 index 000000000..cc7daa4f6 --- /dev/null +++ b/plugins/parsers/collectd/README.md @@ -0,0 +1,57 @@ +# Collectd + +The collectd format parses the collectd binary network protocol. Tags are +created for host, instance, type, and type instance. All collectd values are +added as float64 fields. + +For more information about the binary network protocol see +[here](https://collectd.org/wiki/index.php/Binary_protocol). + +You can control the cryptographic settings with parser options. Create an +authentication file and set `collectd_auth_file` to the path of the file, then +set the desired security level in `collectd_security_level`. + +Additional information including client setup can be found +[here](https://collectd.org/wiki/index.php/Networking_introduction#Cryptographic_setup). + +You can also change the path to the typesdb or add additional typesdb using +`collectd_typesdb`. + +### Configuration + +```toml +[[inputs.socket_listener]] + service_address = "udp://:25826" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "collectd" + + ## Authentication file for cryptographic security levels + collectd_auth_file = "/etc/collectd/auth_file" + ## One of none (default), sign, or encrypt + collectd_security_level = "encrypt" + ## Path of to TypesDB specifications + collectd_typesdb = ["/usr/share/collectd/types.db"] + + ## Multi-value plugins can be handled two ways. + ## "split" will parse and store the multi-value plugin data into separate measurements + ## "join" will parse and store the multi-value plugin as a single multi-value measurement. + ## "split" is the default behavior for backward compatability with previous versions of influxdb. + collectd_parse_multivalue = "split" +``` + +### Example Output + +``` +memory,type=memory,type_instance=buffered value=2520051712 1560455990829955922 +memory,type=memory,type_instance=used value=3710791680 1560455990829955922 +memory,type=memory,type_instance=buffered value=2520047616 1560455980830417318 +memory,type=memory,type_instance=cached value=9472626688 1560455980830417318 +memory,type=memory,type_instance=slab_recl value=2088894464 1560455980830417318 +memory,type=memory,type_instance=slab_unrecl value=146984960 1560455980830417318 +memory,type=memory,type_instance=free value=2978258944 1560455980830417318 +memory,type=memory,type_instance=used value=3707047936 1560455980830417318 +``` diff --git a/plugins/parsers/collectd/parser.go b/plugins/parsers/collectd/parser.go index 20525610c..6b7fbd756 100644 --- a/plugins/parsers/collectd/parser.go +++ b/plugins/parsers/collectd/parser.go @@ -21,7 +21,10 @@ type CollectdParser struct { // DefaultTags will be added to every parsed metric DefaultTags map[string]string - popts network.ParseOpts + //whether or not to split multi value metric into multiple metrics + //default value is split + ParseMultiValue string + popts network.ParseOpts } func (p *CollectdParser) SetParseOpts(popts *network.ParseOpts) { @@ -32,6 +35,7 @@ func NewCollectdParser( authFile string, securityLevel string, typesDB []string, + split string, ) (*CollectdParser, error) { popts := network.ParseOpts{} @@ -64,7 +68,8 @@ func NewCollectdParser( } } - parser := CollectdParser{popts: popts} + parser := CollectdParser{popts: popts, + ParseMultiValue: split} return &parser, nil } @@ -76,7 +81,7 @@ func (p *CollectdParser) Parse(buf []byte) ([]telegraf.Metric, error) { metrics := []telegraf.Metric{} for _, valueList := range valueLists { - metrics = append(metrics, UnmarshalValueList(valueList)...) + metrics = append(metrics, UnmarshalValueList(valueList, p.ParseMultiValue)...) } if len(p.DefaultTags) > 0 { @@ -111,47 +116,91 @@ func (p *CollectdParser) SetDefaultTags(tags map[string]string) { } // UnmarshalValueList translates a ValueList into a Telegraf metric. -func UnmarshalValueList(vl *api.ValueList) []telegraf.Metric { +func UnmarshalValueList(vl *api.ValueList, multiValue string) []telegraf.Metric { timestamp := vl.Time.UTC() var metrics []telegraf.Metric - for i := range vl.Values { - var name string - name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) + + //set multiValue to default "split" if nothing is specified + if multiValue == "" { + multiValue = "split" + } + switch multiValue { + case "split": + for i := range vl.Values { + var name string + name = fmt.Sprintf("%s_%s", vl.Identifier.Plugin, vl.DSName(i)) + tags := make(map[string]string) + fields := make(map[string]interface{}) + + // Convert interface back to actual type, then to float64 + switch value := vl.Values[i].(type) { + case api.Gauge: + fields["value"] = float64(value) + case api.Derive: + fields["value"] = float64(value) + case api.Counter: + fields["value"] = float64(value) + } + + if vl.Identifier.Host != "" { + tags["host"] = vl.Identifier.Host + } + if vl.Identifier.PluginInstance != "" { + tags["instance"] = vl.Identifier.PluginInstance + } + if vl.Identifier.Type != "" { + tags["type"] = vl.Identifier.Type + } + if vl.Identifier.TypeInstance != "" { + tags["type_instance"] = vl.Identifier.TypeInstance + } + + // Drop invalid points + m, err := metric.New(name, tags, fields, timestamp) + if err != nil { + log.Printf("E! Dropping metric %v: %v", name, err) + continue + } + + metrics = append(metrics, m) + } + case "join": + name := vl.Identifier.Plugin tags := make(map[string]string) fields := make(map[string]interface{}) + for i := range vl.Values { + switch value := vl.Values[i].(type) { + case api.Gauge: + fields[vl.DSName(i)] = float64(value) + case api.Derive: + fields[vl.DSName(i)] = float64(value) + case api.Counter: + fields[vl.DSName(i)] = float64(value) + } - // Convert interface back to actual type, then to float64 - switch value := vl.Values[i].(type) { - case api.Gauge: - fields["value"] = float64(value) - case api.Derive: - fields["value"] = float64(value) - case api.Counter: - fields["value"] = float64(value) + if vl.Identifier.Host != "" { + tags["host"] = vl.Identifier.Host + } + if vl.Identifier.PluginInstance != "" { + tags["instance"] = vl.Identifier.PluginInstance + } + if vl.Identifier.Type != "" { + tags["type"] = vl.Identifier.Type + } + if vl.Identifier.TypeInstance != "" { + tags["type_instance"] = vl.Identifier.TypeInstance + } } - if vl.Identifier.Host != "" { - tags["host"] = vl.Identifier.Host - } - if vl.Identifier.PluginInstance != "" { - tags["instance"] = vl.Identifier.PluginInstance - } - if vl.Identifier.Type != "" { - tags["type"] = vl.Identifier.Type - } - if vl.Identifier.TypeInstance != "" { - tags["type_instance"] = vl.Identifier.TypeInstance - } - - // Drop invalid points m, err := metric.New(name, tags, fields, timestamp) if err != nil { log.Printf("E! Dropping metric %v: %v", name, err) - continue } metrics = append(metrics, m) + default: + log.Printf("parse-multi-value config can only be 'split' or 'join'") } return metrics } diff --git a/plugins/parsers/collectd/parser_test.go b/plugins/parsers/collectd/parser_test.go index 3aad04013..42a4d4c7a 100644 --- a/plugins/parsers/collectd/parser_test.go +++ b/plugins/parsers/collectd/parser_test.go @@ -6,6 +6,7 @@ import ( "collectd.org/api" "collectd.org/network" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/influxdata/telegraf" @@ -32,7 +33,7 @@ type testCase struct { var singleMetric = testCase{ []api.ValueList{ - api.ValueList{ + { Identifier: api.Identifier{ Host: "xyzzy", Plugin: "cpu", @@ -47,7 +48,7 @@ var singleMetric = testCase{ }, }, []metricData{ - metricData{ + { "cpu_value", map[string]string{ "type_instance": "user", @@ -64,7 +65,7 @@ var singleMetric = testCase{ var multiMetric = testCase{ []api.ValueList{ - api.ValueList{ + { Identifier: api.Identifier{ Host: "xyzzy", Plugin: "cpu", @@ -76,11 +77,11 @@ var multiMetric = testCase{ api.Derive(42), api.Gauge(42), }, - DSNames: []string(nil), + DSNames: []string{"t1", "t2"}, }, }, []metricData{ - metricData{ + { "cpu_0", map[string]string{ "type_instance": "user", @@ -92,7 +93,7 @@ var multiMetric = testCase{ "value": float64(42), }, }, - metricData{ + { "cpu_1", map[string]string{ "type_instance": "user", @@ -108,7 +109,7 @@ var multiMetric = testCase{ } func TestNewCollectdParser(t *testing.T) { - parser, err := NewCollectdParser("", "", []string{}) + parser, err := NewCollectdParser("", "", []string{}, "join") require.Nil(t, err) require.Equal(t, parser.popts.SecurityLevel, network.None) require.NotNil(t, parser.popts.PasswordLookup) @@ -133,6 +134,19 @@ func TestParse(t *testing.T) { } } +func TestParseMultiValueSplit(t *testing.T) { + buf, err := writeValueList(multiMetric.vl) + require.Nil(t, err) + bytes, err := buf.Bytes() + require.Nil(t, err) + + parser := &CollectdParser{ParseMultiValue: "split"} + metrics, err := parser.Parse(bytes) + require.Nil(t, err) + + assert.Equal(t, 2, len(metrics)) +} + func TestParse_DefaultTags(t *testing.T) { buf, err := writeValueList(singleMetric.vl) require.Nil(t, err) @@ -266,7 +280,7 @@ func TestParseLine(t *testing.T) { bytes, err := buf.Bytes() require.Nil(t, err) - parser, err := NewCollectdParser("", "", []string{}) + parser, err := NewCollectdParser("", "", []string{}, "split") require.Nil(t, err) metric, err := parser.ParseLine(string(bytes)) require.Nil(t, err) diff --git a/plugins/parsers/csv/README.md b/plugins/parsers/csv/README.md new file mode 100644 index 000000000..2189c8ce7 --- /dev/null +++ b/plugins/parsers/csv/README.md @@ -0,0 +1,118 @@ +# CSV + +The `csv` parser creates metrics from a document containing comma separated +values. + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "csv" + + ## Indicates how many rows to treat as a header. By default, the parser assumes + ## there is no header and will parse the first row as data. If set to anything more + ## than 1, column names will be concatenated with the name listed in the next header row. + ## If `csv_column_names` is specified, the column names in header will be overridden. + csv_header_row_count = 0 + + ## For assigning custom names to columns + ## If this is specified, all columns should have a name + ## Unnamed columns will be ignored by the parser. + ## If `csv_header_row_count` is set to 0, this config must be used + csv_column_names = [] + + ## For assigning explicit data types to columns. + ## Supported types: "int", "float", "bool", "string". + ## Specify types in order by column (e.g. `["string", "int", "float"]`) + ## If this is not specified, type conversion will be done on the types above. + csv_column_types = [] + + ## Indicates the number of rows to skip before looking for header information. + csv_skip_rows = 0 + + ## Indicates the number of columns to skip before looking for data to parse. + ## These columns will be skipped in the header as well. + csv_skip_columns = 0 + + ## The separator between csv fields + ## By default, the parser assumes a comma (",") + csv_delimiter = "," + + ## The character reserved for marking a row as a comment row + ## Commented rows are skipped and not parsed + csv_comment = "" + + ## If set to true, the parser will remove leading whitespace from fields + ## By default, this is false + csv_trim_space = false + + ## Columns listed here will be added as tags. Any other columns + ## will be added as fields. + csv_tag_columns = [] + + ## The column to extract the name of the metric from. Will not be + ## included as field in metric. + csv_measurement_column = "" + + ## The column to extract time information for the metric + ## `csv_timestamp_format` must be specified if this is used. + ## Will not be included as field in metric. + csv_timestamp_column = "" + + ## The format of time data extracted from `csv_timestamp_column` + ## this must be specified if `csv_timestamp_column` is specified + csv_timestamp_format = "" + ``` +#### csv_timestamp_column, csv_timestamp_format + +By default the current time will be used for all created metrics, to set the +time using the JSON document you can use the `csv_timestamp_column` and +`csv_timestamp_format` options together to set the time to a value in the parsed +document. + +The `csv_timestamp_column` option specifies the key containing the time value and +`csv_timestamp_format` must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, +or a format string in using the Go "reference time" which is defined to be the +**specific time**: `Mon Jan 2 15:04:05 MST 2006`. + +Consult the Go [time][time parse] package for details and additional examples +on how to set the time format. + +### Metrics + +One metric is created for each row with the columns added as fields. The type +of the field is automatically determined based on the contents of the value. + +In addition to the options above, you can use [metric filtering][] to skip over +columns and rows. + +### Examples + +Config: +``` +[[inputs.file]] + files = ["example"] + data_format = "csv" + csv_header_row_count = 1 + csv_timestamp_column = "time" + csv_timestamp_format = "2006-01-02T15:04:05Z07:00" +``` + +Input: +``` +measurement,cpu,time_user,time_system,time_idle,time +cpu,cpu0,42,42,42,2018-09-13T13:03:28Z +``` + +Output: +``` +cpu cpu=cpu0,time_user=42,time_system=42,time_idle=42 1536869008000000000 +``` + +[metric filtering]: /docs/CONFIGURATION.md#metric-filtering diff --git a/plugins/parsers/csv/parser.go b/plugins/parsers/csv/parser.go new file mode 100644 index 000000000..7f8076917 --- /dev/null +++ b/plugins/parsers/csv/parser.go @@ -0,0 +1,259 @@ +package csv + +import ( + "bytes" + "encoding/csv" + "fmt" + "strconv" + "strings" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" +) + +type TimeFunc func() time.Time + +type Parser struct { + MetricName string + HeaderRowCount int + SkipRows int + SkipColumns int + Delimiter string + Comment string + TrimSpace bool + ColumnNames []string + ColumnTypes []string + TagColumns []string + MeasurementColumn string + TimestampColumn string + TimestampFormat string + DefaultTags map[string]string + TimeFunc func() time.Time +} + +func (p *Parser) SetTimeFunc(fn TimeFunc) { + p.TimeFunc = fn +} + +func (p *Parser) compile(r *bytes.Reader) (*csv.Reader, error) { + csvReader := csv.NewReader(r) + // ensures that the reader reads records of different lengths without an error + csvReader.FieldsPerRecord = -1 + if p.Delimiter != "" { + csvReader.Comma = []rune(p.Delimiter)[0] + } + if p.Comment != "" { + csvReader.Comment = []rune(p.Comment)[0] + } + csvReader.TrimLeadingSpace = p.TrimSpace + return csvReader, nil +} + +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + r := bytes.NewReader(buf) + csvReader, err := p.compile(r) + if err != nil { + return nil, err + } + // skip first rows + for i := 0; i < p.SkipRows; i++ { + csvReader.Read() + } + // if there is a header and nothing in DataColumns + // set DataColumns to names extracted from the header + headerNames := make([]string, 0) + if len(p.ColumnNames) == 0 { + for i := 0; i < p.HeaderRowCount; i++ { + header, err := csvReader.Read() + if err != nil { + return nil, err + } + //concatenate header names + for i := range header { + name := header[i] + if p.TrimSpace { + name = strings.Trim(name, " ") + } + if len(headerNames) <= i { + headerNames = append(headerNames, name) + } else { + headerNames[i] = headerNames[i] + name + } + } + } + p.ColumnNames = headerNames[p.SkipColumns:] + } else { + // if columns are named, just skip header rows + for i := 0; i < p.HeaderRowCount; i++ { + csvReader.Read() + } + } + + table, err := csvReader.ReadAll() + if err != nil { + return nil, err + } + + metrics := make([]telegraf.Metric, 0) + for _, record := range table { + m, err := p.parseRecord(record) + if err != nil { + return metrics, err + } + metrics = append(metrics, m) + } + return metrics, nil +} + +// ParseLine does not use any information in header and assumes DataColumns is set +// it will also not skip any rows +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { + r := bytes.NewReader([]byte(line)) + csvReader, err := p.compile(r) + if err != nil { + return nil, err + } + + // if there is nothing in DataColumns, ParseLine will fail + if len(p.ColumnNames) == 0 { + return nil, fmt.Errorf("[parsers.csv] data columns must be specified") + } + + record, err := csvReader.Read() + if err != nil { + return nil, err + } + m, err := p.parseRecord(record) + if err != nil { + return nil, err + } + return m, nil +} + +func (p *Parser) parseRecord(record []string) (telegraf.Metric, error) { + recordFields := make(map[string]interface{}) + tags := make(map[string]string) + + // skip columns in record + record = record[p.SkipColumns:] +outer: + for i, fieldName := range p.ColumnNames { + if i < len(record) { + value := record[i] + if p.TrimSpace { + value = strings.Trim(value, " ") + } + + for _, tagName := range p.TagColumns { + if tagName == fieldName { + tags[tagName] = value + continue outer + } + } + + // Try explicit conversion only when column types is defined. + if len(p.ColumnTypes) > 0 { + // Throw error if current column count exceeds defined types. + if i >= len(p.ColumnTypes) { + return nil, fmt.Errorf("column type: column count exceeded") + } + + var val interface{} + var err error + + switch p.ColumnTypes[i] { + case "int": + val, err = strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, fmt.Errorf("column type: parse int error %s", err) + } + case "float": + val, err = strconv.ParseFloat(value, 64) + if err != nil { + return nil, fmt.Errorf("column type: parse float error %s", err) + } + case "bool": + val, err = strconv.ParseBool(value) + if err != nil { + return nil, fmt.Errorf("column type: parse bool error %s", err) + } + default: + val = value + } + + recordFields[fieldName] = val + continue + } + + // attempt type conversions + if iValue, err := strconv.ParseInt(value, 10, 64); err == nil { + recordFields[fieldName] = iValue + } else if fValue, err := strconv.ParseFloat(value, 64); err == nil { + recordFields[fieldName] = fValue + } else if bValue, err := strconv.ParseBool(value); err == nil { + recordFields[fieldName] = bValue + } else { + recordFields[fieldName] = value + } + } + } + + // add default tags + for k, v := range p.DefaultTags { + tags[k] = v + } + + // will default to plugin name + measurementName := p.MetricName + if recordFields[p.MeasurementColumn] != nil && recordFields[p.MeasurementColumn] != "" { + measurementName = fmt.Sprintf("%v", recordFields[p.MeasurementColumn]) + } + + metricTime, err := parseTimestamp(p.TimeFunc, recordFields, p.TimestampColumn, p.TimestampFormat) + if err != nil { + return nil, err + } + + // Exclude `TimestampColumn` and `MeasurementColumn` + delete(recordFields, p.TimestampColumn) + delete(recordFields, p.MeasurementColumn) + + m, err := metric.New(measurementName, tags, recordFields, metricTime) + if err != nil { + return nil, err + } + return m, nil +} + +// ParseTimestamp return a timestamp, if there is no timestamp on the csv it +// will be the current timestamp, else it will try to parse the time according +// to the format. +func parseTimestamp(timeFunc func() time.Time, recordFields map[string]interface{}, + timestampColumn, timestampFormat string, +) (time.Time, error) { + if timestampColumn != "" { + if recordFields[timestampColumn] == nil { + return time.Time{}, fmt.Errorf("timestamp column: %v could not be found", timestampColumn) + } + + switch timestampFormat { + case "": + return time.Time{}, fmt.Errorf("timestamp format must be specified") + default: + metricTime, err := internal.ParseTimestamp(timestampFormat, recordFields[timestampColumn], "UTC") + if err != nil { + return time.Time{}, err + } + return metricTime, err + } + } + + return timeFunc(), nil +} + +// SetDefaultTags set the DefaultTags +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} diff --git a/plugins/parsers/csv/parser_test.go b/plugins/parsers/csv/parser_test.go new file mode 100644 index 000000000..c0ef5f1cb --- /dev/null +++ b/plugins/parsers/csv/parser_test.go @@ -0,0 +1,433 @@ +package csv + +import ( + "fmt" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var DefaultTime = func() time.Time { + return time.Unix(3600, 0) +} + +func TestBasicCSV(t *testing.T) { + p := Parser{ + ColumnNames: []string{"first", "second", "third"}, + TagColumns: []string{"third"}, + TimeFunc: DefaultTime, + } + + _, err := p.ParseLine("1.4,true,hi") + require.NoError(t, err) +} + +func TestHeaderConcatenationCSV(t *testing.T) { + p := Parser{ + HeaderRowCount: 2, + MeasurementColumn: "3", + TimeFunc: DefaultTime, + } + testCSV := `first,second +1,2,3 +3.4,70,test_name` + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, "test_name", metrics[0].Name()) +} + +func TestHeaderOverride(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimeFunc: DefaultTime, + } + testCSV := `line1,line2,line3 +3.4,70,test_name` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, "test_name", metrics[0].Name()) +} + +func TestTimestamp(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "02/01/06 03:04:05 PM", + TimeFunc: DefaultTime, + } + testCSV := `line1,line2,line3 +23/05/09 04:05:06 PM,70,test_name +07/11/09 04:05:06 PM,80,test_name2` + metrics, err := p.Parse([]byte(testCSV)) + + require.NoError(t, err) + require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706000000000)) + require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000)) +} + +func TestTimestampError(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimeFunc: DefaultTime, + } + testCSV := `line1,line2,line3 +23/05/09 04:05:06 PM,70,test_name +07/11/09 04:05:06 PM,80,test_name2` + _, err := p.Parse([]byte(testCSV)) + require.Equal(t, fmt.Errorf("timestamp format must be specified"), err) +} + +func TestTimestampUnixFormat(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + } + testCSV := `line1,line2,line3 +1243094706,70,test_name +1257609906,80,test_name2` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706000000000)) + require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906000000000)) +} + +func TestTimestampUnixMSFormat(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimestampColumn: "first", + TimestampFormat: "unix_ms", + TimeFunc: DefaultTime, + } + testCSV := `line1,line2,line3 +1243094706123,70,test_name +1257609906123,80,test_name2` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, metrics[0].Time().UnixNano(), int64(1243094706123000000)) + require.Equal(t, metrics[1].Time().UnixNano(), int64(1257609906123000000)) +} + +func TestQuotedCharacter(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimeFunc: DefaultTime, + } + + testCSV := `line1,line2,line3 +"3,4",70,test_name` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, "3,4", metrics[0].Fields()["first"]) +} + +func TestDelimiter(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + Delimiter: "%", + ColumnNames: []string{"first", "second", "third"}, + MeasurementColumn: "third", + TimeFunc: DefaultTime, + } + + testCSV := `line1%line2%line3 +3,4%70%test_name` + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, "3,4", metrics[0].Fields()["first"]) +} + +func TestValueConversion(t *testing.T) { + p := Parser{ + HeaderRowCount: 0, + Delimiter: ",", + ColumnNames: []string{"first", "second", "third", "fourth"}, + MetricName: "test_value", + TimeFunc: DefaultTime, + } + testCSV := `3.3,4,true,hello` + + expectedTags := make(map[string]string) + expectedFields := map[string]interface{}{ + "first": 3.3, + "second": 4, + "third": true, + "fourth": "hello", + } + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + + expectedMetric, err1 := metric.New("test_value", expectedTags, expectedFields, time.Unix(0, 0)) + returnedMetric, err2 := metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) + require.NoError(t, err1) + require.NoError(t, err2) + + //deep equal fields + require.Equal(t, expectedMetric.Fields(), returnedMetric.Fields()) + + // Test explicit type conversion. + p.ColumnTypes = []string{"float", "int", "bool", "string"} + + metrics, err = p.Parse([]byte(testCSV)) + require.NoError(t, err) + + returnedMetric, err2 = metric.New(metrics[0].Name(), metrics[0].Tags(), metrics[0].Fields(), time.Unix(0, 0)) + require.NoError(t, err2) + + //deep equal fields + require.Equal(t, expectedMetric.Fields(), returnedMetric.Fields()) +} + +func TestSkipComment(t *testing.T) { + p := Parser{ + HeaderRowCount: 0, + Comment: "#", + ColumnNames: []string{"first", "second", "third", "fourth"}, + MetricName: "test_value", + TimeFunc: DefaultTime, + } + testCSV := `#3.3,4,true,hello +4,9.9,true,name_this` + + expectedFields := map[string]interface{}{ + "first": int64(4), + "second": 9.9, + "third": true, + "fourth": "name_this", + } + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, expectedFields, metrics[0].Fields()) +} + +func TestTrimSpace(t *testing.T) { + p := Parser{ + HeaderRowCount: 0, + TrimSpace: true, + ColumnNames: []string{"first", "second", "third", "fourth"}, + MetricName: "test_value", + TimeFunc: DefaultTime, + } + testCSV := ` 3.3, 4, true,hello` + + expectedFields := map[string]interface{}{ + "first": 3.3, + "second": int64(4), + "third": true, + "fourth": "hello", + } + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, expectedFields, metrics[0].Fields()) +} + +func TestTrimSpaceDelimitedBySpace(t *testing.T) { + p := Parser{ + Delimiter: " ", + HeaderRowCount: 1, + TrimSpace: true, + TimeFunc: DefaultTime, + } + testCSV := ` first second third fourth +abcdefgh 0 2 false + abcdef 3.3 4 true + f 0 2 false` + + expectedFields := map[string]interface{}{ + "first": "abcdef", + "second": 3.3, + "third": int64(4), + "fourth": true, + } + + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, expectedFields, metrics[1].Fields()) +} + +func TestSkipRows(t *testing.T) { + p := Parser{ + HeaderRowCount: 1, + SkipRows: 1, + TagColumns: []string{"line1"}, + MeasurementColumn: "line3", + TimeFunc: DefaultTime, + } + testCSV := `garbage nonsense +line1,line2,line3 +hello,80,test_name2` + + expectedFields := map[string]interface{}{ + "line2": int64(80), + } + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, "test_name2", metrics[0].Name()) + require.Equal(t, expectedFields, metrics[0].Fields()) +} + +func TestSkipColumns(t *testing.T) { + p := Parser{ + SkipColumns: 1, + ColumnNames: []string{"line1", "line2"}, + TimeFunc: DefaultTime, + } + testCSV := `hello,80,test_name` + + expectedFields := map[string]interface{}{ + "line1": int64(80), + "line2": "test_name", + } + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, expectedFields, metrics[0].Fields()) +} + +func TestSkipColumnsWithHeader(t *testing.T) { + p := Parser{ + SkipColumns: 1, + HeaderRowCount: 2, + TimeFunc: DefaultTime, + } + testCSV := `col,col,col + 1,2,3 + trash,80,test_name` + + // we should expect an error if we try to get col1 + metrics, err := p.Parse([]byte(testCSV)) + require.NoError(t, err) + require.Equal(t, map[string]interface{}{"col2": int64(80), "col3": "test_name"}, metrics[0].Fields()) +} + +func TestParseStream(t *testing.T) { + p := Parser{ + MetricName: "csv", + HeaderRowCount: 1, + TimeFunc: DefaultTime, + } + + csvHeader := "a,b,c" + csvBody := "1,2,3" + + metrics, err := p.Parse([]byte(csvHeader)) + require.NoError(t, err) + require.Len(t, metrics, 0) + metric, err := p.ParseLine(csvBody) + testutil.RequireMetricEqual(t, + testutil.MustMetric( + "csv", + map[string]string{}, + map[string]interface{}{ + "a": int64(1), + "b": int64(2), + "c": int64(3), + }, + DefaultTime(), + ), metric) +} + +func TestTimestampUnixFloatPrecision(t *testing.T) { + p := Parser{ + MetricName: "csv", + ColumnNames: []string{"time", "value"}, + TimestampColumn: "time", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + } + data := `1551129661.95456123352050781250,42` + + expected := []telegraf.Metric{ + testutil.MustMetric( + "csv", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(1551129661, 954561233), + ), + } + + metrics, err := p.Parse([]byte(data)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, metrics) +} + +func TestSkipMeasurementColumn(t *testing.T) { + p := Parser{ + MetricName: "csv", + HeaderRowCount: 1, + TimestampColumn: "timestamp", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + TrimSpace: true, + } + data := `id,value,timestamp + 1,5,1551129661.954561233` + + expected := []telegraf.Metric{ + testutil.MustMetric( + "csv", + map[string]string{}, + map[string]interface{}{ + "id": 1, + "value": 5, + }, + time.Unix(1551129661, 954561233), + ), + } + + metrics, err := p.Parse([]byte(data)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, metrics) +} + +func TestSkipTimestampColumn(t *testing.T) { + p := Parser{ + MetricName: "csv", + HeaderRowCount: 1, + TimestampColumn: "timestamp", + TimestampFormat: "unix", + TimeFunc: DefaultTime, + TrimSpace: true, + } + data := `id,value,timestamp + 1,5,1551129661.954561233` + + expected := []telegraf.Metric{ + testutil.MustMetric( + "csv", + map[string]string{}, + map[string]interface{}{ + "id": 1, + "value": 5, + }, + time.Unix(1551129661, 954561233), + ), + } + + metrics, err := p.Parse([]byte(data)) + require.NoError(t, err) + testutil.RequireMetricsEqual(t, expected, metrics) +} diff --git a/plugins/parsers/dropwizard/README.md b/plugins/parsers/dropwizard/README.md new file mode 100644 index 000000000..436518a67 --- /dev/null +++ b/plugins/parsers/dropwizard/README.md @@ -0,0 +1,171 @@ +# Dropwizard + +The `dropwizard` data format can parse the [JSON Dropwizard][dropwizard] representation of a single dropwizard metric registry. By default, tags are parsed from metric names as if they were actual influxdb line protocol keys (`measurement<,tag_set>`) which can be overridden by defining a custom [template pattern][templates]. All field value types are supported, `string`, `number` and `boolean`. + +[templates]: /docs/TEMPLATE_PATTERN.md +[dropwizard]: http://metrics.dropwizard.io/3.1.0/manual/json/ + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "dropwizard" + + ## Used by the templating engine to join matched values when cardinality is > 1 + separator = "_" + + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag(s) + ## 3. filter + template with field key + ## 4. default template + ## By providing an empty template array, templating is disabled and measurements are parsed as influxdb line protocol keys (measurement<,tag_set>) + templates = [] + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the metric registry within the JSON document + # dropwizard_metric_registry_path = "metrics" + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the default time of the measurements within the JSON document + # dropwizard_time_path = "time" + # dropwizard_time_format = "2006-01-02T15:04:05Z07:00" + + ## You may use an appropriate [gjson path](https://github.com/tidwall/gjson#path-syntax) + ## to locate the tags map within the JSON document + # dropwizard_tags_path = "tags" + + ## You may even use tag paths per tag + # [inputs.exec.dropwizard_tag_paths] + # tag1 = "tags.tag1" + # tag2 = "tags.tag2" +``` + + +### Examples + +A typical JSON of a dropwizard metric registry: + +```json +{ + "version": "3.0.0", + "counters" : { + "measurement,tag1=green" : { + "count" : 1 + } + }, + "meters" : { + "measurement" : { + "count" : 1, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "units" : "events/second" + } + }, + "gauges" : { + "measurement" : { + "value" : 1 + } + }, + "histograms" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0 + } + }, + "timers" : { + "measurement" : { + "count" : 1, + "max" : 1.0, + "mean" : 1.0, + "min" : 1.0, + "p50" : 1.0, + "p75" : 1.0, + "p95" : 1.0, + "p98" : 1.0, + "p99" : 1.0, + "p999" : 1.0, + "stddev" : 1.0, + "m15_rate" : 1.0, + "m1_rate" : 1.0, + "m5_rate" : 1.0, + "mean_rate" : 1.0, + "duration_units" : "seconds", + "rate_units" : "calls/second" + } + } +} +``` + +Would get translated into 4 different measurements: + +``` +measurement,metric_type=counter,tag1=green count=1 +measurement,metric_type=meter count=1,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 +measurement,metric_type=gauge value=1 +measurement,metric_type=histogram count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0 +measurement,metric_type=timer count=1,max=1.0,mean=1.0,min=1.0,p50=1.0,p75=1.0,p95=1.0,p98=1.0,p99=1.0,p999=1.0,stddev=1.0,m15_rate=1.0,m1_rate=1.0,m5_rate=1.0,mean_rate=1.0 +``` + +You may also parse a dropwizard registry from any JSON document which contains a dropwizard registry in some inner field. +Eg. to parse the following JSON document: + +```json +{ + "time" : "2017-02-22T14:33:03.662+02:00", + "tags" : { + "tag1" : "green", + "tag2" : "yellow" + }, + "metrics" : { + "counters" : { + "measurement" : { + "count" : 1 + } + }, + "meters" : {}, + "gauges" : {}, + "histograms" : {}, + "timers" : {} + } +} +``` +and translate it into: + +``` +measurement,metric_type=counter,tag1=green,tag2=yellow count=1 1487766783662000000 +``` + +you simply need to use the following additional configuration properties: + +```toml +dropwizard_metric_registry_path = "metrics" +dropwizard_time_path = "time" +dropwizard_time_format = "2006-01-02T15:04:05Z07:00" +dropwizard_tags_path = "tags" +## tag paths per tag are supported too, eg. +#[inputs.yourinput.dropwizard_tag_paths] +# tag1 = "tags.tag1" +# tag2 = "tags.tag2" +``` diff --git a/plugins/parsers/dropwizard/parser.go b/plugins/parsers/dropwizard/parser.go index 95ce3bffd..d8dcc9204 100644 --- a/plugins/parsers/dropwizard/parser.go +++ b/plugins/parsers/dropwizard/parser.go @@ -17,6 +17,8 @@ import ( var fieldEscaper = strings.NewReplacer("\\", "\\\\", "\"", "\\\"") var keyEscaper = strings.NewReplacer(" ", "\\ ", ",", "\\,", "=", "\\=") +type TimeFunc func() time.Time + // Parser parses json inputs containing dropwizard metrics, // either top-level or embedded inside a json field. // This parser is using gjson for retrieving paths within the json file. @@ -48,7 +50,7 @@ type parser struct { separator string templateEngine *templating.Engine - timeFunc metric.TimeFunc + timeFunc TimeFunc // seriesParser parses line protocol measurement + tags seriesParser *influx.Parser @@ -267,6 +269,6 @@ func (p *parser) readDWMetrics(metricType string, dwms interface{}, metrics []te return metrics } -func (p *parser) SetTimeFunc(f metric.TimeFunc) { +func (p *parser) SetTimeFunc(f TimeFunc) { p.timeFunc = f } diff --git a/plugins/parsers/dropwizard/parser_test.go b/plugins/parsers/dropwizard/parser_test.go index 8ddcf7714..df75c7f25 100644 --- a/plugins/parsers/dropwizard/parser_test.go +++ b/plugins/parsers/dropwizard/parser_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -var TimeFunc = func() time.Time { +var testTimeFunc = func() time.Time { return time.Unix(0, 0) } @@ -106,9 +106,9 @@ func TestParseValidEmbeddedCounterJSON(t *testing.T) { "count": float64(1), }, metrics[0].Fields()) assert.Equal(t, map[string]string{ - "metric_type": "counter", - "tag1": "green", - "tag2": "yellow", + "metric_type": "counter", + "tag1": "green", + "tag2": "yellow", "tag3 space,comma=equals": "red ,=", }, metrics[0].Tags()) assert.True(t, metricTime.Equal(metrics[0].Time()), fmt.Sprintf("%s should be equal to %s", metrics[0].Time(), metricTime)) @@ -528,7 +528,7 @@ func TestDropWizard(t *testing.T) { map[string]interface{}{ "value": 42.0, }, - TimeFunc(), + testTimeFunc(), ), ), }, @@ -547,7 +547,7 @@ func TestDropWizard(t *testing.T) { map[string]interface{}{ "value": 42.0, }, - TimeFunc(), + testTimeFunc(), ), ), }, @@ -573,7 +573,7 @@ func TestDropWizard(t *testing.T) { map[string]interface{}{ "value": 42.0, }, - TimeFunc(), + testTimeFunc(), ), ), }, @@ -584,7 +584,7 @@ func TestDropWizard(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { parser := NewParser() - parser.SetTimeFunc(TimeFunc) + parser.SetTimeFunc(testTimeFunc) metrics, err := parser.Parse(tt.input) tt.errFunc(t, err) diff --git a/plugins/parsers/form_urlencoded/README.md b/plugins/parsers/form_urlencoded/README.md new file mode 100644 index 000000000..e3700f44e --- /dev/null +++ b/plugins/parsers/form_urlencoded/README.md @@ -0,0 +1,57 @@ +# Form Urlencoded + + +The `form-urlencoded` data format parses `application/x-www-form-urlencoded` +data, such as commonly used in the [query string][]. + +A common use case is to pair it with [http_listener_v2][] input plugin to parse +request body or query params. + +### Configuration + +```toml +[[inputs.http_listener_v2]] + ## Address and port to host HTTP listener on + service_address = ":8080" + + ## Part of the request to consume. Available options are "body" and + ## "query". + data_source = "body" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "form_urlencoded" + + ## Array of key names which should be collected as tags. + ## By default, keys with string value are ignored if not marked as tags. + form_urlencoded_tag_keys = ["tag1"] +``` + +### Examples + +#### Basic parsing + +Config: +```toml +[[inputs.http_listener_v2]] + name_override = "mymetric" + service_address = ":8080" + data_source = "query" + data_format = "form_urlencoded" + form_urlencoded_tag_keys = ["tag1"] +``` + +Request: +```bash +curl -i -XGET 'http://localhost:8080/telegraf?tag1=foo&field1=0.42&field2=42' +``` + +Output: +``` +mymetric,tag1=foo field1=0.42,field2=42 +``` + +[query string]: https://en.wikipedia.org/wiki/Query_string +[http_listener_v2]: /plugins/inputs/http_listener_v2 diff --git a/plugins/parsers/form_urlencoded/parser.go b/plugins/parsers/form_urlencoded/parser.go new file mode 100644 index 000000000..f38d87a80 --- /dev/null +++ b/plugins/parsers/form_urlencoded/parser.go @@ -0,0 +1,125 @@ +package form_urlencoded + +import ( + "bytes" + "fmt" + "net/url" + "strconv" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +var ( + // ErrNoMetric is returned when no metric is found in input line + ErrNoMetric = fmt.Errorf("no metric in line") +) + +// Parser decodes "application/x-www-form-urlencoded" data into metrics +type Parser struct { + MetricName string + DefaultTags map[string]string + TagKeys []string + AllowedKeys []string +} + +// Parse converts a slice of bytes in "application/x-www-form-urlencoded" format into metrics +func (p Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + buf = bytes.TrimSpace(buf) + if len(buf) == 0 { + return make([]telegraf.Metric, 0), nil + } + + values, err := url.ParseQuery(string(buf)) + if err != nil { + return nil, err + } + + if len(p.AllowedKeys) > 0 { + values = p.filterAllowedKeys(values) + } + + tags := p.extractTags(values) + fields := p.parseFields(values) + + for key, value := range p.DefaultTags { + tags[key] = value + } + + metric, err := metric.New(p.MetricName, tags, fields, time.Now().UTC()) + if err != nil { + return nil, err + } + + return []telegraf.Metric{metric}, nil +} + +// ParseLine delegates a single line of text to the Parse function +func (p Parser) ParseLine(line string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(line)) + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, ErrNoMetric + } + + return metrics[0], nil +} + +// SetDefaultTags sets the default tags for every metric +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +func (p Parser) filterAllowedKeys(original url.Values) url.Values { + result := make(url.Values) + + for _, key := range p.AllowedKeys { + value, exists := original[key] + if !exists { + continue + } + + result[key] = value + } + + return result +} + +func (p Parser) extractTags(values url.Values) map[string]string { + tags := make(map[string]string) + for _, key := range p.TagKeys { + value, exists := values[key] + + if !exists || len(key) == 0 { + continue + } + + tags[key] = value[0] + delete(values, key) + } + + return tags +} + +func (p Parser) parseFields(values url.Values) map[string]interface{} { + fields := make(map[string]interface{}) + + for key, value := range values { + if len(key) == 0 || len(value) == 0 { + continue + } + + field, err := strconv.ParseFloat(value[0], 64) + if err != nil { + continue + } + + fields[key] = field + } + + return fields +} diff --git a/plugins/parsers/form_urlencoded/parser_test.go b/plugins/parsers/form_urlencoded/parser_test.go new file mode 100644 index 000000000..931d5a4ca --- /dev/null +++ b/plugins/parsers/form_urlencoded/parser_test.go @@ -0,0 +1,172 @@ +package form_urlencoded + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +const ( + validFormData = "tag1=foo&tag2=bar&tag3=baz&field1=42&field2=69" + encodedFormData = "tag1=%24%24%24&field1=1e%2B3" + notEscapedProperlyFormData = "invalid=%Y5" + blankKeyFormData = "=42&field2=69" + emptyFormData = "" +) + +func TestParseValidFormData(t *testing.T) { + parser := Parser{ + MetricName: "form_urlencoded_test", + } + + metrics, err := parser.Parse([]byte(validFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "form_urlencoded_test", metrics[0].Name()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(42), + "field2": float64(69), + }, metrics[0].Fields()) +} + +func TestParseLineValidFormData(t *testing.T) { + parser := Parser{ + MetricName: "form_urlencoded_test", + } + + metric, err := parser.ParseLine(validFormData) + require.NoError(t, err) + require.Equal(t, "form_urlencoded_test", metric.Name()) + require.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(42), + "field2": float64(69), + }, metric.Fields()) +} + +func TestParseValidFormDataWithTags(t *testing.T) { + parser := Parser{ + MetricName: "form_urlencoded_test", + TagKeys: []string{"tag1", "tag2"}, + } + + metrics, err := parser.Parse([]byte(validFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "form_urlencoded_test", metrics[0].Name()) + require.Equal(t, map[string]string{ + "tag1": "foo", + "tag2": "bar", + }, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(42), + "field2": float64(69), + }, metrics[0].Fields()) +} + +func TestParseValidFormDataDefaultTags(t *testing.T) { + parser := Parser{ + MetricName: "form_urlencoded_test", + TagKeys: []string{"tag1", "tag2"}, + DefaultTags: map[string]string{"tag4": "default"}, + } + + metrics, err := parser.Parse([]byte(validFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "form_urlencoded_test", metrics[0].Name()) + require.Equal(t, map[string]string{ + "tag1": "foo", + "tag2": "bar", + "tag4": "default", + }, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(42), + "field2": float64(69), + }, metrics[0].Fields()) +} + +func TestParseValidFormDataDefaultTagsOverride(t *testing.T) { + parser := Parser{ + MetricName: "form_urlencoded_test", + TagKeys: []string{"tag1", "tag2"}, + DefaultTags: map[string]string{"tag1": "default"}, + } + + metrics, err := parser.Parse([]byte(validFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "form_urlencoded_test", metrics[0].Name()) + require.Equal(t, map[string]string{ + "tag1": "default", + "tag2": "bar", + }, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(42), + "field2": float64(69), + }, metrics[0].Fields()) +} + +func TestParseEncodedFormData(t *testing.T) { + parser := Parser{ + MetricName: "form_urlencoded_test", + TagKeys: []string{"tag1"}, + } + + metrics, err := parser.Parse([]byte(encodedFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "form_urlencoded_test", metrics[0].Name()) + require.Equal(t, map[string]string{ + "tag1": "$$$", + }, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field1": float64(1000), + }, metrics[0].Fields()) +} + +func TestParseInvalidFormDataError(t *testing.T) { + parser := Parser{ + MetricName: "form_urlencoded_test", + } + + metrics, err := parser.Parse([]byte(notEscapedProperlyFormData)) + require.Error(t, err) + require.Len(t, metrics, 0) +} + +func TestParseInvalidFormDataEmptyKey(t *testing.T) { + parser := Parser{ + MetricName: "form_urlencoded_test", + } + + // Empty key for field + metrics, err := parser.Parse([]byte(blankKeyFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field2": float64(69), + }, metrics[0].Fields()) + + // Empty key for tag + parser.TagKeys = []string{""} + metrics, err = parser.Parse([]byte(blankKeyFormData)) + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]interface{}{ + "field2": float64(69), + }, metrics[0].Fields()) +} + +func TestParseInvalidFormDataEmptyString(t *testing.T) { + parser := Parser{ + MetricName: "form_urlencoded_test", + } + + metrics, err := parser.Parse([]byte(emptyFormData)) + require.NoError(t, err) + require.Len(t, metrics, 0) +} diff --git a/plugins/parsers/graphite/README.md b/plugins/parsers/graphite/README.md new file mode 100644 index 000000000..b0b1127aa --- /dev/null +++ b/plugins/parsers/graphite/README.md @@ -0,0 +1,48 @@ +# Graphite + +The Graphite data format translates graphite *dot* buckets directly into +telegraf measurement names, with a single value field, and without any tags. +By default, the separator is left as `.`, but this can be changed using the +`separator` argument. For more advanced options, Telegraf supports specifying +[templates](#templates) to translate graphite buckets into Telegraf metrics. + +### Configuration + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/tmp/test.sh", "/usr/bin/mycollector --foo=bar"] + + ## measurement name suffix (for separating different commands) + name_suffix = "_mycollector" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "graphite" + + ## This string will be used to join the matched values. + separator = "_" + + ## Each template line requires a template pattern. It can have an optional + ## filter before the template and separated by spaces. It can also have optional extra + ## tags following the template. Multiple tags should be separated by commas and no spaces + ## similar to the line protocol format. There can be only one default template. + ## Templates support below format: + ## 1. filter + template + ## 2. filter + template + extra tag(s) + ## 3. filter + template with field key + ## 4. default template + templates = [ + "*.app env.service.resource.measurement", + "stats.* .host.measurement* region=eu-east,agent=sensu", + "stats2.* .host.measurement.field", + "measurement*" + ] +``` + +#### templates + +Consult the [Template Patterns](/docs/TEMPLATE_PATTERN.md) documentation for +details. diff --git a/plugins/parsers/graphite/config.go b/plugins/parsers/graphite/config.go index 7a5c759e7..915077c06 100644 --- a/plugins/parsers/graphite/config.go +++ b/plugins/parsers/graphite/config.go @@ -7,7 +7,7 @@ import ( const ( // DefaultSeparator is the default join character to use when joining multiple - // measurment parts in a template. + // measurement parts in a template. DefaultSeparator = "." ) diff --git a/plugins/parsers/graphite/errors.go b/plugins/parsers/graphite/errors.go deleted file mode 100644 index 2cd2f5583..000000000 --- a/plugins/parsers/graphite/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package graphite - -import "fmt" - -// An UnsupposedValueError is returned when a parsed value is not -// supposed. -type UnsupposedValueError struct { - Field string - Value float64 -} - -func (err *UnsupposedValueError) Error() string { - return fmt.Sprintf(`field "%s" value: "%v" is unsupported`, err.Field, err.Value) -} diff --git a/plugins/parsers/graphite/parser.go b/plugins/parsers/graphite/parser.go index fc32bd83d..f50217711 100644 --- a/plugins/parsers/graphite/parser.go +++ b/plugins/parsers/graphite/parser.go @@ -1,18 +1,16 @@ package graphite import ( - "bufio" "bytes" + "errors" "fmt" - "io" "math" "strconv" "strings" "time" - "github.com/influxdata/telegraf/internal/templating" - "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal/templating" "github.com/influxdata/telegraf/metric" ) @@ -63,42 +61,36 @@ func NewGraphiteParser( func (p *GraphiteParser) Parse(buf []byte) ([]telegraf.Metric, error) { // parse even if the buffer begins with a newline - buf = bytes.TrimPrefix(buf, []byte("\n")) - // add newline to end if not exists: - if len(buf) > 0 && !bytes.HasSuffix(buf, []byte("\n")) { - buf = append(buf, []byte("\n")...) + if len(buf) != 0 && buf[0] == '\n' { + buf = buf[1:] } - metrics := make([]telegraf.Metric, 0) + var metrics []telegraf.Metric + var errs []string - var errStr string - buffer := bytes.NewBuffer(buf) - reader := bufio.NewReader(buffer) for { - // Read up to the next newline. - buf, err := reader.ReadBytes('\n') - if err == io.EOF { + n := bytes.IndexByte(buf, '\n') + var line []byte + if n >= 0 { + line = bytes.TrimSpace(buf[:n:n]) + } else { + line = bytes.TrimSpace(buf) // last line + } + if len(line) != 0 { + metric, err := p.ParseLine(string(line)) + if err == nil { + metrics = append(metrics, metric) + } else { + errs = append(errs, err.Error()) + } + } + if n < 0 { break } - if err != nil && err != io.EOF { - return metrics, err - } - - // Trim the buffer, even though there should be no padding - line := strings.TrimSpace(string(buf)) - if line == "" { - continue - } - metric, err := p.ParseLine(line) - if err == nil { - metrics = append(metrics, metric) - } else { - errStr += err.Error() + "\n" - } + buf = buf[n+1:] } - - if errStr != "" { - return metrics, fmt.Errorf(strings.TrimSpace(errStr)) + if len(errs) != 0 { + return metrics, errors.New(strings.Join(errs, "\n")) } return metrics, nil } @@ -128,10 +120,6 @@ func (p *GraphiteParser) ParseLine(line string) (telegraf.Metric, error) { return nil, fmt.Errorf(`field "%s" value: %s`, fields[0], err) } - if math.IsNaN(v) || math.IsInf(v, 0) { - return nil, &UnsupposedValueError{Field: fields[0], Value: v} - } - fieldValues := map[string]interface{}{} if field != "" { fieldValues[field] = v diff --git a/plugins/parsers/graphite/parser_test.go b/plugins/parsers/graphite/parser_test.go index 9a6b462f7..9254574b6 100644 --- a/plugins/parsers/graphite/parser_test.go +++ b/plugins/parsers/graphite/parser_test.go @@ -1,14 +1,14 @@ package graphite import ( - "reflect" + "math" "strconv" "testing" "time" "github.com/influxdata/telegraf/internal/templating" "github.com/influxdata/telegraf/metric" - + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -241,7 +241,7 @@ func TestParseLine(t *testing.T) { len(test.tags), len(metric.Tags())) } f := metric.Fields()["value"].(float64) - if metric.Fields()["value"] != f { + if f != test.value { t.Fatalf("floatValue value mismatch. expected %v, got %v", test.value, f) } @@ -355,14 +355,40 @@ func TestParse(t *testing.T) { func TestParseNaN(t *testing.T) { p, err := NewGraphiteParser("", []string{"measurement*"}, nil) - assert.NoError(t, err) + require.NoError(t, err) - _, err = p.ParseLine("servers.localhost.cpu_load NaN 1435077219") - assert.Error(t, err) + m, err := p.ParseLine("servers.localhost.cpu_load NaN 1435077219") + require.NoError(t, err) - if _, ok := err.(*UnsupposedValueError); !ok { - t.Fatalf("expected *ErrUnsupportedValue, got %v", reflect.TypeOf(err)) - } + expected := testutil.MustMetric( + "servers.localhost.cpu_load", + map[string]string{}, + map[string]interface{}{ + "value": math.NaN(), + }, + time.Unix(1435077219, 0), + ) + + testutil.RequireMetricEqual(t, expected, m) +} + +func TestParseInf(t *testing.T) { + p, err := NewGraphiteParser("", []string{"measurement*"}, nil) + require.NoError(t, err) + + m, err := p.ParseLine("servers.localhost.cpu_load +Inf 1435077219") + require.NoError(t, err) + + expected := testutil.MustMetric( + "servers.localhost.cpu_load", + map[string]string{}, + map[string]interface{}{ + "value": math.Inf(1), + }, + time.Unix(1435077219, 0), + ) + + testutil.RequireMetricEqual(t, expected, m) } func TestFilterMatchDefault(t *testing.T) { diff --git a/plugins/parsers/grok/README.md b/plugins/parsers/grok/README.md new file mode 100644 index 000000000..80936a41d --- /dev/null +++ b/plugins/parsers/grok/README.md @@ -0,0 +1,257 @@ +# Grok + +The grok data format parses line delimited data using a regular expression like +language. + +The best way to get acquainted with grok patterns is to read the logstash docs, +which are available here: + https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html + +The grok parser uses a slightly modified version of logstash "grok" +patterns, with the format: + +``` +%{[:][:]} +``` + +The `capture_syntax` defines the grok pattern that's used to parse the input +line and the `semantic_name` is used to name the field or tag. The extension +`modifier` controls the data type that the parsed item is converted to or +other special handling. + +By default all named captures are converted into string fields. +If a pattern does not have a semantic name it will not be captured. +Timestamp modifiers can be used to convert captures to the timestamp of the +parsed metric. If no timestamp is parsed the metric will be created using the +current time. + +You must capture at least one field per line. + +- Available modifiers: + - string (default if nothing is specified) + - int + - float + - duration (ie, 5.23ms gets converted to int nanoseconds) + - tag (converts the field into a tag) + - drop (drops the field completely) + - measurement (use the matched text as the measurement name) +- Timestamp modifiers: + - ts (This will auto-learn the timestamp format) + - ts-ansic ("Mon Jan _2 15:04:05 2006") + - ts-unix ("Mon Jan _2 15:04:05 MST 2006") + - ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") + - ts-rfc822 ("02 Jan 06 15:04 MST") + - ts-rfc822z ("02 Jan 06 15:04 -0700") + - ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") + - ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") + - ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") + - ts-rfc3339 ("2006-01-02T15:04:05Z07:00") + - ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") + - ts-httpd ("02/Jan/2006:15:04:05 -0700") + - ts-epoch (seconds since unix epoch, may contain decimal) + - ts-epochnano (nanoseconds since unix epoch) + - ts-epochmilli (milliseconds since unix epoch) + - ts-syslog ("Jan 02 15:04:05", parsed time is set to the current year) + - ts-"CUSTOM" + +CUSTOM time layouts must be within quotes and be the representation of the +"reference time", which is `Mon Jan 2 15:04:05 -0700 MST 2006`. +To match a comma decimal point you can use a period. For example `%{TIMESTAMP:timestamp:ts-"2006-01-02 15:04:05.000"}` can be used to match `"2018-01-02 15:04:05,000"` +To match a comma decimal point you can use a period in the pattern string. +See https://golang.org/pkg/time/#Parse for more details. + +Telegraf has many of its own [built-in patterns][] as well as support for most +of the Logstash builtin patterns using [these Go compatible patterns][grok-patterns]. + +**Note:** Golang regular expressions do not support lookahead or lookbehind. +Logstash patterns that use these features may not be supported, or may use a Go +friendly pattern that is not fully compatible with the Logstash pattern. + +[built-in patterns]: /plugins/parsers/grok/influx_patterns.go +[grok-patterns]: https://github.com/vjeantet/grok/blob/master/patterns/grok-patterns + +If you need help building patterns to match your logs, +you will find the https://grokdebug.herokuapp.com application quite useful! + +### Configuration +```toml +[[inputs.file]] + ## Files to parse each interval. + ## These accept standard unix glob matching rules, but with the addition of + ## ** as a "super asterisk". ie: + ## /var/log/**.log -> recursively find all .log files in /var/log + ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log + ## /var/log/apache.log -> only tail the apache log file + files = ["/var/log/apache/access.log"] + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "grok" + + ## This is a list of patterns to check the given log file(s) for. + ## Note that adding patterns here increases processing time. The most + ## efficient configuration is to have one pattern. + ## Other common built-in patterns are: + ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) + ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) + grok_patterns = ["%{COMBINED_LOG_FORMAT}"] + + ## Full path(s) to custom pattern files. + grok_custom_pattern_files = [] + + ## Custom patterns can also be defined here. Put one pattern per line. + grok_custom_patterns = ''' + ''' + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 data one two 5.43µs + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "Canada/Eastern" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + grok_timezone = "Canada/Eastern" + + ## When set to "disable" timestamp will not incremented if there is a + ## duplicate. + # grok_unique_timestamp = "auto" +``` + +#### Timestamp Examples + +This example input and config parses a file using a custom timestamp conversion: + +``` +2017-02-21 13:10:34 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} value=%{NUMBER:value:int}'] +``` + +This example input and config parses a file using a timestamp in unix time: + +``` +1466004605 value=42 +1466004605.123456789 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{NUMBER:timestamp:ts-epoch} value=%{NUMBER:value:int}'] +``` + +This example parses a file using a built-in conversion and a custom pattern: + +``` +Wed Apr 12 13:10:34 PST 2017 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ["%{TS_UNIX:timestamp:ts-unix} value=%{NUMBER:value:int}"] + grok_custom_patterns = ''' + TS_UNIX %{DAY} %{MONTH} %{MONTHDAY} %{HOUR}:%{MINUTE}:%{SECOND} %{TZ} %{YEAR} + ''' +``` + +This example input and config parses a file using a custom timestamp conversion that doesn't match any specific standard: + +``` +21/02/2017 13:10:34 value=42 +``` + +```toml +[[inputs.file]] + grok_patterns = ['%{MY_TIMESTAMP:timestamp:ts-"02/01/2006 15:04:05"} value=%{NUMBER:value:int}'] + + grok_custom_patterns = ''' + MY_TIMESTAMP (?:\d{2}.\d{2}.\d{4} \d{2}:\d{2}:\d{2}) + ''' +``` + +For cases where the timestamp itself is without offset, the `timezone` config var is available +to denote an offset. By default (with `timezone` either omit, blank or set to `"UTC"`), the times +are processed as if in the UTC timezone. If specified as `timezone = "Local"`, the timestamp +will be processed based on the current machine timezone configuration. Lastly, if using a +timezone from the list of Unix [timezones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +grok will offset the timestamp accordingly. + +#### TOML Escaping + +When saving patterns to the configuration file, keep in mind the different TOML +[string](https://github.com/toml-lang/toml#string) types and the escaping +rules for each. These escaping rules must be applied in addition to the +escaping required by the grok syntax. Using the Multi-line line literal +syntax with `'''` may be useful. + +The following config examples will parse this input file: + +``` +|42|\uD83D\uDC2F|'telegraf'| +``` + +Since `|` is a special character in the grok language, we must escape it to +get a literal `|`. With a basic TOML string, special characters such as +backslash must be escaped, requiring us to escape the backslash a second time. + +```toml +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = "UNICODE_ESCAPE (?:\\\\u[0-9A-F]{4})+" +``` + +We cannot use a literal TOML string for the pattern, because we cannot match a +`'` within it. However, it works well for the custom pattern. +```toml +[[inputs.file]] + grok_patterns = ["\\|%{NUMBER:value:int}\\|%{UNICODE_ESCAPE:escape}\\|'%{WORD:name}'\\|"] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +A multi-line literal string allows us to encode the pattern: +```toml +[[inputs.file]] + grok_patterns = [''' + \|%{NUMBER:value:int}\|%{UNICODE_ESCAPE:escape}\|'%{WORD:name}'\| + '''] + grok_custom_patterns = 'UNICODE_ESCAPE (?:\\u[0-9A-F]{4})+' +``` + +#### Tips for creating patterns + +Writing complex patterns can be difficult, here is some advice for writing a +new pattern or testing a pattern developed [online](https://grokdebug.herokuapp.com). + +Create a file output that writes to stdout, and disable other outputs while +testing. This will allow you to see the captured metrics. Keep in mind that +the file output will only print once per `flush_interval`. + +```toml +[[outputs.file]] + files = ["stdout"] +``` + +- Start with a file containing only a single line of your input. +- Remove all but the first token or piece of the line. +- Add the section of your pattern to match this piece to your configuration file. +- Verify that the metric is parsed successfully by running Telegraf. +- If successful, add the next token, update the pattern and retest. +- Continue one token at a time until the entire line is successfully parsed. + +#### Performance + +Performance depends heavily on the regular expressions that you use, but there +are a few techniques that can help: + +- Avoid using patterns such as `%{DATA}` that will always match. +- If possible, add `^` and `$` anchors to your pattern: + ``` + [[inputs.file]] + grok_patterns = ["^%{COMBINED_LOG_FORMAT}$"] + ``` diff --git a/plugins/inputs/logparser/grok/influx_patterns.go b/plugins/parsers/grok/influx_patterns.go similarity index 51% rename from plugins/inputs/logparser/grok/influx_patterns.go rename to plugins/parsers/grok/influx_patterns.go index 6dc990622..282c28111 100644 --- a/plugins/inputs/logparser/grok/influx_patterns.go +++ b/plugins/parsers/grok/influx_patterns.go @@ -1,45 +1,6 @@ package grok -// DEFAULT_PATTERNS SHOULD BE KEPT IN-SYNC WITH patterns/influx-patterns const DEFAULT_PATTERNS = ` -# Captures are a slightly modified version of logstash "grok" patterns, with -# the format %{[:][:]} -# By default all named captures are converted into string fields. -# Modifiers can be used to convert captures to other types or tags. -# Timestamp modifiers can be used to convert captures to the timestamp of the -# parsed metric. - -# View logstash grok pattern docs here: -# https://www.elastic.co/guide/en/logstash/current/plugins-filters-grok.html -# All default logstash patterns are supported, these can be viewed here: -# https://github.com/logstash-plugins/logstash-patterns-core/blob/master/patterns/grok-patterns - -# Available modifiers: -# string (default if nothing is specified) -# int -# float -# duration (ie, 5.23ms gets converted to int nanoseconds) -# tag (converts the field into a tag) -# drop (drops the field completely) -# Timestamp modifiers: -# ts-ansic ("Mon Jan _2 15:04:05 2006") -# ts-unix ("Mon Jan _2 15:04:05 MST 2006") -# ts-ruby ("Mon Jan 02 15:04:05 -0700 2006") -# ts-rfc822 ("02 Jan 06 15:04 MST") -# ts-rfc822z ("02 Jan 06 15:04 -0700") -# ts-rfc850 ("Monday, 02-Jan-06 15:04:05 MST") -# ts-rfc1123 ("Mon, 02 Jan 2006 15:04:05 MST") -# ts-rfc1123z ("Mon, 02 Jan 2006 15:04:05 -0700") -# ts-rfc3339 ("2006-01-02T15:04:05Z07:00") -# ts-rfc3339nano ("2006-01-02T15:04:05.999999999Z07:00") -# ts-httpd ("02/Jan/2006:15:04:05 -0700") -# ts-epoch (seconds since unix epoch) -# ts-epochnano (nanoseconds since unix epoch) -# ts-"CUSTOM" -# CUSTOM time layouts must be within quotes and be the representation of the -# "reference time", which is Mon Jan 2 15:04:05 -0700 MST 2006 -# See https://golang.org/pkg/time/#Parse for more details. - # Example log file pattern, example log looks like this: # [04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs # Breakdown of the DURATION pattern below: @@ -69,7 +30,7 @@ COMMON_LOG_FORMAT %{CLIENT:client_ip} %{NOTSPACE:ident} %{NOTSPACE:auth} \[%{HTT # Combined log format is the same as the common log format but with the addition # of two quoted strings at the end for "referrer" and "agent" # See Examples at http://httpd.apache.org/docs/current/mod/mod_log_config.html -COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} %{QS:referrer} %{QS:agent} +COMBINED_LOG_FORMAT %{COMMON_LOG_FORMAT} "%{DATA:referrer}" "%{DATA:agent}" # HTTPD log formats HTTPD20_ERRORLOG \[%{HTTPDERROR_DATE:timestamp}\] \[%{LOGLEVEL:loglevel:tag}\] (?:\[client %{IPORHOST:clientip}\] ){0,1}%{GREEDYDATA:errormsg} diff --git a/plugins/inputs/logparser/grok/grok.go b/plugins/parsers/grok/parser.go similarity index 88% rename from plugins/inputs/logparser/grok/grok.go rename to plugins/parsers/grok/parser.go index 4e6efc2c7..810190b9d 100644 --- a/plugins/inputs/logparser/grok/grok.go +++ b/plugins/parsers/grok/parser.go @@ -2,6 +2,7 @@ package grok import ( "bufio" + "bytes" "fmt" "log" "os" @@ -10,10 +11,9 @@ import ( "strings" "time" - "github.com/vjeantet/grok" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/vjeantet/grok" ) var timeLayouts = map[string]string{ @@ -28,15 +28,17 @@ var timeLayouts = map[string]string{ "ts-rfc3339": "2006-01-02T15:04:05Z07:00", "ts-rfc3339nano": "2006-01-02T15:04:05.999999999Z07:00", "ts-httpd": "02/Jan/2006:15:04:05 -0700", - // These three are not exactly "layouts", but they are special cases that + // These four are not exactly "layouts", but they are special cases that // will get handled in the ParseLine function. - "ts-epoch": "EPOCH", - "ts-epochnano": "EPOCH_NANO", - "ts-syslog": "SYSLOG_TIMESTAMP", - "ts": "GENERIC_TIMESTAMP", // try parsing all known timestamp layouts. + "ts-epoch": "EPOCH", + "ts-epochnano": "EPOCH_NANO", + "ts-epochmilli": "EPOCH_MILLI", + "ts-syslog": "SYSLOG_TIMESTAMP", + "ts": "GENERIC_TIMESTAMP", // try parsing all known timestamp layouts. } const ( + MEASUREMENT = "measurement" INT = "int" TAG = "tag" FLOAT = "float" @@ -44,6 +46,7 @@ const ( DURATION = "duration" DROP = "drop" EPOCH = "EPOCH" + EPOCH_MILLI = "EPOCH_MILLI" EPOCH_NANO = "EPOCH_NANO" SYSLOG_TIMESTAMP = "SYSLOG_TIMESTAMP" GENERIC_TIMESTAMP = "GENERIC_TIMESTAMP" @@ -68,10 +71,11 @@ type Parser struct { // specified by the user in Patterns. // They will look like: // GROK_INTERNAL_PATTERN_0, GROK_INTERNAL_PATTERN_1, etc. - namedPatterns []string + NamedPatterns []string CustomPatterns string CustomPatternFiles []string Measurement string + DefaultTags map[string]string // Timezone is an optional component to help render log dates to // your chosen zone. @@ -83,6 +87,9 @@ type Parser struct { Timezone string loc *time.Location + // UniqueTimestamp when set to "disable", timestamp will not incremented if there is a duplicate. + UniqueTimestamp string + // typeMap is a map of patterns -> capture name -> modifier, // ie, { // "%{TESTLOG}": @@ -131,9 +138,13 @@ func (p *Parser) Compile() error { return err } + if p.UniqueTimestamp == "" { + p.UniqueTimestamp = "auto" + } + // Give Patterns fake names so that they can be treated as named // "custom patterns" - p.namedPatterns = make([]string, 0, len(p.Patterns)) + p.NamedPatterns = make([]string, 0, len(p.Patterns)) for i, pattern := range p.Patterns { pattern = strings.TrimSpace(pattern) if pattern == "" { @@ -141,10 +152,10 @@ func (p *Parser) Compile() error { } name := fmt.Sprintf("GROK_INTERNAL_PATTERN_%d", i) p.CustomPatterns += "\n" + name + " " + pattern + "\n" - p.namedPatterns = append(p.namedPatterns, "%{"+name+"}") + p.NamedPatterns = append(p.NamedPatterns, "%{"+name+"}") } - if len(p.namedPatterns) == 0 { + if len(p.NamedPatterns) == 0 { return fmt.Errorf("pattern required") } @@ -167,10 +178,6 @@ func (p *Parser) Compile() error { p.addCustomPatterns(scanner) } - if p.Measurement == "" { - p.Measurement = "logparser_grok" - } - p.loc, err = time.LoadLocation(p.Timezone) if err != nil { log.Printf("W! improper timezone supplied (%s), setting loc to UTC", p.Timezone) @@ -191,7 +198,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { var values map[string]string // the matching pattern string var patternName string - for _, pattern := range p.namedPatterns { + for _, pattern := range p.NamedPatterns { if values, err = p.g.Parse(pattern, line); err != nil { return nil, err } @@ -208,12 +215,17 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { fields := make(map[string]interface{}) tags := make(map[string]string) + + //add default tags + for k, v := range p.DefaultTags { + tags[k] = v + } + timestamp := time.Now() for k, v := range values { if k == "" || v == "" { continue } - // t is the modifier of the field var t string // check if pattern has some modifiers @@ -235,8 +247,10 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } switch t { + case MEASUREMENT: + p.Measurement = v case INT: - iv, err := strconv.ParseInt(v, 10, 64) + iv, err := strconv.ParseInt(v, 0, 64) if err != nil { log.Printf("E! Error parsing %s to int: %s", v, err) } else { @@ -259,7 +273,7 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case TAG: tags[k] = v case STRING: - fields[k] = strings.Trim(v, `"`) + fields[k] = v case EPOCH: parts := strings.SplitN(v, ".", 2) if len(parts) == 0 { @@ -285,6 +299,13 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { ts = ts.Add(time.Duration(nanosec) * time.Nanosecond) } timestamp = ts + case EPOCH_MILLI: + ms, err := strconv.ParseInt(v, 10, 64) + if err != nil { + log.Printf("E! Error parsing %s to int: %s", v, err) + } else { + timestamp = time.Unix(0, ms*int64(time.Millisecond)) + } case EPOCH_NANO: iv, err := strconv.ParseInt(v, 10, 64) if err != nil { @@ -335,11 +356,12 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { case DROP: // goodbye! default: - // Replace commas with dot character v = strings.Replace(v, ",", ".", -1) - ts, err := time.ParseInLocation(t, v, p.loc) if err == nil { + if ts.Year() == 0 { + ts = ts.AddDate(timestamp.Year(), 0, 0) + } timestamp = ts } else { log.Printf("E! Error parsing %s to time layout [%s]: %s", v, t, err) @@ -347,13 +369,38 @@ func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { } } - if len(fields) == 0 { - return nil, fmt.Errorf("logparser_grok: must have one or more fields") + if p.UniqueTimestamp != "auto" { + return metric.New(p.Measurement, tags, fields, timestamp) } return metric.New(p.Measurement, tags, fields, p.tsModder.tsMod(timestamp)) } +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + + metrics := make([]telegraf.Metric, 0) + + scanner := bufio.NewScanner(bytes.NewReader(buf)) + for scanner.Scan() { + line := scanner.Text() + m, err := p.ParseLine(line) + if err != nil { + return nil, err + } + + if m == nil { + continue + } + metrics = append(metrics, m) + } + + return metrics, nil +} + +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + func (p *Parser) addCustomPatterns(scanner *bufio.Scanner) { for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) @@ -456,6 +503,9 @@ type tsModder struct { // most significant time unit of ts. // ie, if the input is at ms precision, it will increment it 1µs. func (t *tsModder) tsMod(ts time.Time) time.Time { + if ts.IsZero() { + return ts + } defer func() { t.last = ts }() // don't mod the time if we don't need to if t.last.IsZero() || ts.IsZero() { @@ -469,7 +519,6 @@ func (t *tsModder) tsMod(ts time.Time) time.Time { t.rollover = 0 return ts } - if ts.Equal(t.last) { t.dupe = ts } diff --git a/plugins/inputs/logparser/grok/grok_test.go b/plugins/parsers/grok/parser_test.go similarity index 80% rename from plugins/inputs/logparser/grok/grok_test.go rename to plugins/parsers/grok/parser_test.go index 075c00ca4..ec5e47388 100644 --- a/plugins/inputs/logparser/grok/grok_test.go +++ b/plugins/parsers/grok/parser_test.go @@ -1,82 +1,23 @@ package grok import ( + "log" "testing" "time" - "github.com/influxdata/telegraf" - + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -var benchM telegraf.Metric - -func Benchmark_ParseLine_CommonLogFormat(b *testing.B) { - p := &Parser{ - Patterns: []string{"%{COMMON_LOG_FORMAT}"}, +func TestGrokParse(t *testing.T) { + parser := Parser{ + Measurement: "t_met", + Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } - _ = p.Compile() - - var m telegraf.Metric - for n := 0; n < b.N; n++ { - m, _ = p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) - } - benchM = m -} - -func Benchmark_ParseLine_CombinedLogFormat(b *testing.B) { - p := &Parser{ - Patterns: []string{"%{COMBINED_LOG_FORMAT}"}, - } - _ = p.Compile() - - var m telegraf.Metric - for n := 0; n < b.N; n++ { - m, _ = p.ParseLine(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326 "-" "Mozilla"`) - } - benchM = m -} - -func Benchmark_ParseLine_CustomPattern(b *testing.B) { - p := &Parser{ - Patterns: []string{"%{TEST_LOG_A}", "%{TEST_LOG_B}"}, - CustomPatterns: ` - DURATION %{NUMBER}[nuµm]?s - RESPONSE_CODE %{NUMBER:response_code:tag} - RESPONSE_TIME %{DURATION:response_time:duration} - TEST_LOG_A %{NUMBER:myfloat:float} %{RESPONSE_CODE} %{IPORHOST:clientip} %{RESPONSE_TIME} - `, - } - _ = p.Compile() - - var m telegraf.Metric - for n := 0; n < b.N; n++ { - m, _ = p.ParseLine(`[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101`) - } - benchM = m -} - -// Test a very simple parse pattern. -func TestSimpleParse(t *testing.T) { - p := &Parser{ - Patterns: []string{"%{TESTLOG}"}, - CustomPatterns: ` - TESTLOG %{NUMBER:num:int} %{WORD:client} - `, - } - assert.NoError(t, p.Compile()) - - m, err := p.ParseLine(`142 bot`) + parser.Compile() + _, err := parser.Parse([]byte(`127.0.0.1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`)) assert.NoError(t, err) - require.NotNil(t, m) - - assert.Equal(t, - map[string]interface{}{ - "num": int64(142), - "client": "bot", - }, - m.Fields()) } // Verify that patterns with a regex lookahead fail at compile time. @@ -96,8 +37,7 @@ func TestParsePatternsWithLookahead(t *testing.T) { func TestMeasurementName(t *testing.T) { p := &Parser{ - Measurement: "my_web_log", - Patterns: []string{"%{COMMON_LOG_FORMAT}"}, + Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } assert.NoError(t, p.Compile()) @@ -116,13 +56,11 @@ func TestMeasurementName(t *testing.T) { }, m.Fields()) assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) - assert.Equal(t, "my_web_log", m.Name()) } func TestCLF_IPv6(t *testing.T) { p := &Parser{ - Measurement: "my_web_log", - Patterns: []string{"%{COMMON_LOG_FORMAT}"}, + Patterns: []string{"%{COMMON_LOG_FORMAT}"}, } assert.NoError(t, p.Compile()) @@ -140,7 +78,6 @@ func TestCLF_IPv6(t *testing.T) { }, m.Fields()) assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) - assert.Equal(t, "my_web_log", m.Name()) m, err = p.ParseLine(`::1 user-identifier frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0" 200 2326`) require.NotNil(t, m) @@ -156,7 +93,6 @@ func TestCLF_IPv6(t *testing.T) { }, m.Fields()) assert.Equal(t, map[string]string{"verb": "GET", "resp_code": "200"}, m.Tags()) - assert.Equal(t, "my_web_log", m.Name()) } func TestCustomInfluxdbHttpd(t *testing.T) { @@ -341,6 +277,28 @@ func TestParsePatternsWithoutCustom(t *testing.T) { assert.Equal(t, time.Unix(0, 1466004605359052000), metricA.Time()) } +func TestParseEpochMilli(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{MYAPP}"}, + CustomPatterns: ` + MYAPP %{POSINT:ts:ts-epochmilli} response_time=%{POSINT:response_time:int} mymetric=%{NUMBER:metric:float} + `, + } + assert.NoError(t, p.Compile()) + + metricA, err := p.ParseLine(`1568540909963 response_time=20821 mymetric=10890.645`) + require.NotNil(t, metricA) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "response_time": int64(20821), + "metric": float64(10890.645), + }, + metricA.Fields()) + assert.Equal(t, map[string]string{}, metricA.Tags()) + assert.Equal(t, time.Unix(0, 1568540909963000000), metricA.Time()) +} + func TestParseEpochNano(t *testing.T) { p := &Parser{ Patterns: []string{"%{MYAPP}"}, @@ -552,7 +510,7 @@ func TestCompileFileAndParse(t *testing.T) { time.Date(2016, time.June, 4, 12, 41, 45, 0, time.FixedZone("foo", 60*60)).Nanosecond(), metricA.Time().Nanosecond()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -564,7 +522,7 @@ func TestCompileFileAndParse(t *testing.T) { metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) assert.Equal(t, - time.Date(2016, time.June, 4, 12, 41, 45, 0, time.FixedZone("foo", 60*60)).Nanosecond(), + time.Date(2016, time.June, 4, 12, 41, 46, 0, time.FixedZone("foo", 60*60)).Nanosecond(), metricB.Time().Nanosecond()) } @@ -636,61 +594,106 @@ func TestCompileErrors(t *testing.T) { assert.Error(t, p.Compile()) } -func TestParseErrors(t *testing.T) { - // Parse fails because the pattern doesn't exist +func TestParseErrors_MissingPattern(t *testing.T) { p := &Parser{ - Patterns: []string{"%{TEST_LOG_B}"}, + Measurement: "grok", + Patterns: []string{"%{TEST_LOG_B}"}, CustomPatterns: ` TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:int} %{} `, } - assert.Error(t, p.Compile()) + require.Error(t, p.Compile()) _, err := p.ParseLine(`[04/Jun/2016:12:41:45 +0100] notnumber 200 192.168.1.1 5.432µs 101`) - assert.Error(t, err) + require.Error(t, err) +} - // Parse fails because myword is not an int - p = &Parser{ - Patterns: []string{"%{TEST_LOG_A}"}, +func TestParseErrors_WrongIntegerType(t *testing.T) { + p := &Parser{ + Measurement: "grok", + Patterns: []string{"%{TEST_LOG_A}"}, CustomPatterns: ` - TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:int} + TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:int} + `, + } + require.NoError(t, p.Compile()) + m, err := p.ParseLine(`0 notnumber`) + require.NoError(t, err) + testutil.RequireMetricEqual(t, + m, + testutil.MustMetric("grok", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0))) +} + +func TestParseErrors_WrongFloatType(t *testing.T) { + p := &Parser{ + Measurement: "grok", + Patterns: []string{"%{TEST_LOG_A}"}, + CustomPatterns: ` + TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:float} + `, + } + require.NoError(t, p.Compile()) + m, err := p.ParseLine(`0 notnumber`) + require.NoError(t, err) + testutil.RequireMetricEqual(t, + m, + testutil.MustMetric("grok", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0))) +} + +func TestParseErrors_WrongDurationType(t *testing.T) { + p := &Parser{ + Measurement: "grok", + Patterns: []string{"%{TEST_LOG_A}"}, + CustomPatterns: ` + TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:duration} + `, + } + require.NoError(t, p.Compile()) + m, err := p.ParseLine(`0 notnumber`) + require.NoError(t, err) + testutil.RequireMetricEqual(t, + m, + testutil.MustMetric("grok", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0))) +} + +func TestParseErrors_WrongTimeLayout(t *testing.T) { + p := &Parser{ + Measurement: "grok", + Patterns: []string{"%{TEST_LOG_A}"}, + CustomPatterns: ` + TEST_LOG_A %{NUMBER:ts:ts-epoch} %{WORD:myword:duration} + `, + } + require.NoError(t, p.Compile()) + m, err := p.ParseLine(`0 notnumber`) + require.NoError(t, err) + testutil.RequireMetricEqual(t, + m, + testutil.MustMetric("grok", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0))) +} + +func TestParseInteger_Base16(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST_LOG_C}"}, + CustomPatterns: ` + DURATION %{NUMBER}[nuµm]?s + BASE10OR16NUM (?:%{BASE10NUM}|%{BASE16NUM}) + TEST_LOG_C %{NUMBER:myfloat} %{BASE10OR16NUM:response_code:int} %{IPORHOST:clientip} %{DURATION:rt} `, } assert.NoError(t, p.Compile()) - _, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`) - assert.Error(t, err) - // Parse fails because myword is not a float - p = &Parser{ - Patterns: []string{"%{TEST_LOG_A}"}, - CustomPatterns: ` - TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:float} - `, - } - assert.NoError(t, p.Compile()) - _, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`) - assert.Error(t, err) - - // Parse fails because myword is not a duration - p = &Parser{ - Patterns: []string{"%{TEST_LOG_A}"}, - CustomPatterns: ` - TEST_LOG_A %{HTTPDATE:ts:ts-httpd} %{WORD:myword:duration} - `, - } - assert.NoError(t, p.Compile()) - _, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`) - assert.Error(t, err) - - // Parse fails because the time layout is wrong. - p = &Parser{ - Patterns: []string{"%{TEST_LOG_A}"}, - CustomPatterns: ` - TEST_LOG_A %{HTTPDATE:ts:ts-unix} %{WORD:myword:duration} - `, - } - assert.NoError(t, p.Compile()) - _, err = p.ParseLine(`04/Jun/2016:12:41:45 +0100 notnumber`) - assert.Error(t, err) + metricA, err := p.ParseLine(`1.25 0xc8 192.168.1.1 5.432µs`) + require.NotNil(t, metricA) + assert.NoError(t, err) + assert.Equal(t, + map[string]interface{}{ + "clientip": "192.168.1.1", + "response_code": int64(200), + "myfloat": "1.25", + "rt": "5.432µs", + }, + metricA.Fields()) + assert.Equal(t, map[string]string{}, metricA.Tags()) } func TestTsModder(t *testing.T) { @@ -801,7 +804,7 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) { assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -812,7 +815,7 @@ func TestTimezoneEmptyCompileFileAndParse(t *testing.T) { }, metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465044105000000000), metricB.Time().UnixNano()) + assert.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano()) } func TestTimezoneMalformedCompileFileAndParse(t *testing.T) { @@ -837,7 +840,7 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) { assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -848,7 +851,7 @@ func TestTimezoneMalformedCompileFileAndParse(t *testing.T) { }, metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465044105000000000), metricB.Time().UnixNano()) + assert.Equal(t, int64(1465044106000000000), metricB.Time().UnixNano()) } func TestTimezoneEuropeCompileFileAndParse(t *testing.T) { @@ -873,7 +876,7 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) { assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -884,7 +887,7 @@ func TestTimezoneEuropeCompileFileAndParse(t *testing.T) { }, metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465036905000000000), metricB.Time().UnixNano()) + assert.Equal(t, int64(1465036906000000000), metricB.Time().UnixNano()) } func TestTimezoneAmericasCompileFileAndParse(t *testing.T) { @@ -909,7 +912,7 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) { assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -920,7 +923,7 @@ func TestTimezoneAmericasCompileFileAndParse(t *testing.T) { }, metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, int64(1465058505000000000), metricB.Time().UnixNano()) + assert.Equal(t, int64(1465058506000000000), metricB.Time().UnixNano()) } func TestTimezoneLocalCompileFileAndParse(t *testing.T) { @@ -945,7 +948,7 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) { assert.Equal(t, map[string]string{"response_code": "200"}, metricA.Tags()) assert.Equal(t, int64(1465040505000000000), metricA.Time().UnixNano()) - metricB, err := p.ParseLine(`[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier`) + metricB, err := p.ParseLine(`[04/06/2016--12:41:46] 1.25 mystring dropme nomodifier`) require.NotNil(t, metricB) assert.NoError(t, err) assert.Equal(t, @@ -956,7 +959,7 @@ func TestTimezoneLocalCompileFileAndParse(t *testing.T) { }, metricB.Fields()) assert.Equal(t, map[string]string{}, metricB.Tags()) - assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 45, 0, time.Local).UnixNano(), metricB.Time().UnixNano()) + assert.Equal(t, time.Date(2016, time.June, 4, 12, 41, 46, 0, time.Local).UnixNano(), metricB.Time().UnixNano()) } func TestNewlineInPatterns(t *testing.T) { @@ -972,6 +975,7 @@ func TestNewlineInPatterns(t *testing.T) { } func TestSyslogTimestamp(t *testing.T) { + currentYear := time.Now().Year() tests := []struct { name string line string @@ -980,17 +984,17 @@ func TestSyslogTimestamp(t *testing.T) { { name: "two digit day of month", line: "Sep 25 09:01:55 value=42", - expected: time.Date(2018, time.September, 25, 9, 1, 55, 0, time.UTC), + expected: time.Date(currentYear, time.September, 25, 9, 1, 55, 0, time.UTC), }, { name: "one digit day of month single space", line: "Sep 2 09:01:55 value=42", - expected: time.Date(2018, time.September, 2, 9, 1, 55, 0, time.UTC), + expected: time.Date(currentYear, time.September, 2, 9, 1, 55, 0, time.UTC), }, { name: "one digit day of month double space", line: "Sep 2 09:01:55 value=42", - expected: time.Date(2018, time.September, 2, 9, 1, 55, 0, time.UTC), + expected: time.Date(currentYear, time.September, 2, 9, 1, 55, 0, time.UTC), }, } for _, tt := range tests { @@ -1022,6 +1026,92 @@ func TestReplaceTimestampComma(t *testing.T) { require.Equal(t, 2018, m.Time().Year()) require.Equal(t, 13, m.Time().Hour()) require.Equal(t, 34, m.Time().Second()) - //Convert Nanosecond to milisecond for compare + // Convert nanosecond to millisecond for compare require.Equal(t, 555, m.Time().Nanosecond()/1000000) } + +func TestDynamicMeasurementModifier(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST}"}, + CustomPatterns: "TEST %{NUMBER:var1:tag} %{NUMBER:var2:float} %{WORD:test:measurement}", + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("4 5 hello") + require.NoError(t, err) + require.Equal(t, m.Name(), "hello") +} + +func TestStaticMeasurementModifier(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{WORD:hi:measurement} %{NUMBER:num:string}"}, + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("test_name 42") + log.Printf("%v", m) + require.NoError(t, err) + require.Equal(t, "test_name", m.Name()) +} + +// tests that the top level measurement name is used +func TestTwoMeasurementModifier(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST:test_name:measurement}"}, + CustomPatterns: "TEST %{NUMBER:var1:tag} %{NUMBER:var2:measurement} %{WORD:var3:measurement}", + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("4 5 hello") + require.NoError(t, err) + require.Equal(t, m.Name(), "4 5 hello") +} + +func TestMeasurementModifierNoName(t *testing.T) { + p := &Parser{ + Patterns: []string{"%{TEST}"}, + CustomPatterns: "TEST %{NUMBER:var1:tag} %{NUMBER:var2:float} %{WORD:hi:measurement}", + } + + require.NoError(t, p.Compile()) + m, err := p.ParseLine("4 5 hello") + require.NoError(t, err) + require.Equal(t, m.Name(), "hello") +} + +func TestEmptyYearInTimestamp(t *testing.T) { + p := &Parser{ + Patterns: []string{`%{APPLE_SYSLOG_TIME_SHORT:timestamp:ts-"Jan 2 15:04:05"} %{HOSTNAME} %{APP_NAME:app_name}\[%{NUMBER:pid:int}\]%{GREEDYDATA:message}`}, + CustomPatterns: ` + APPLE_SYSLOG_TIME_SHORT %{MONTH} +%{MONTHDAY} %{TIME} + APP_NAME [a-zA-Z0-9\.]+ + `, + } + require.NoError(t, p.Compile()) + p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: info> Scale factor of main display = 2.0") + m, err := p.ParseLine("Nov 6 13:57:03 generic iTunes[6504]: objc[6504]: Object descriptor was null.") + require.NoError(t, err) + require.NotNil(t, m) + require.Equal(t, time.Now().Year(), m.Time().Year()) +} + +func TestTrimRegression(t *testing.T) { + // https://github.com/influxdata/telegraf/issues/4998 + p := &Parser{ + Patterns: []string{`%{GREEDYDATA:message:string}`}, + } + require.NoError(t, p.Compile()) + + actual, err := p.ParseLine(`level=info msg="ok"`) + require.NoError(t, err) + + expected := testutil.MustMetric( + "", + map[string]string{}, + map[string]interface{}{ + "message": `level=info msg="ok"`, + }, + actual.Time(), + ) + require.Equal(t, expected, actual) +} diff --git a/plugins/inputs/logparser/grok/testdata/test-patterns b/plugins/parsers/grok/testdata/test-patterns similarity index 100% rename from plugins/inputs/logparser/grok/testdata/test-patterns rename to plugins/parsers/grok/testdata/test-patterns diff --git a/plugins/parsers/grok/testdata/test_a.log b/plugins/parsers/grok/testdata/test_a.log new file mode 100644 index 000000000..a44d72fdf --- /dev/null +++ b/plugins/parsers/grok/testdata/test_a.log @@ -0,0 +1 @@ +[04/Jun/2016:12:41:45 +0100] 1.25 200 192.168.1.1 5.432µs 101 diff --git a/plugins/parsers/grok/testdata/test_b.log b/plugins/parsers/grok/testdata/test_b.log new file mode 100644 index 000000000..49e2983e8 --- /dev/null +++ b/plugins/parsers/grok/testdata/test_b.log @@ -0,0 +1 @@ +[04/06/2016--12:41:45] 1.25 mystring dropme nomodifier diff --git a/plugins/parsers/influx/README.md b/plugins/parsers/influx/README.md new file mode 100644 index 000000000..51c0106e6 --- /dev/null +++ b/plugins/parsers/influx/README.md @@ -0,0 +1,20 @@ +# InfluxDB Line Protocol + +There are no additional configuration options for InfluxDB [line protocol][]. The +metrics are parsed directly into Telegraf metrics. + +[line protocol]: https://docs.influxdata.com/influxdb/latest/write_protocols/line/ + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + diff --git a/plugins/parsers/influx/handler.go b/plugins/parsers/influx/handler.go index af7445a53..ae08d5a7c 100644 --- a/plugins/parsers/influx/handler.go +++ b/plugins/parsers/influx/handler.go @@ -2,105 +2,131 @@ package influx import ( "bytes" + "errors" + "strconv" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" - "github.com/prometheus/common/log" ) +// MetricHandler implements the Handler interface and produces telegraf.Metric. type MetricHandler struct { - builder *metric.Builder - metrics []telegraf.Metric - precision time.Duration + err error + timePrecision time.Duration + timeFunc TimeFunc + metric telegraf.Metric } func NewMetricHandler() *MetricHandler { return &MetricHandler{ - builder: metric.NewBuilder(), - precision: time.Nanosecond, + timePrecision: time.Nanosecond, + timeFunc: time.Now, } } -func (h *MetricHandler) SetTimeFunc(f metric.TimeFunc) { - h.builder.TimeFunc = f +func (h *MetricHandler) SetTimePrecision(p time.Duration) { + h.timePrecision = p + // When the timestamp is omitted from the metric, the timestamp + // comes from the server clock, truncated to the nearest unit of + // measurement provided in precision. + // + // When a timestamp is provided in the metric, precision is + // overloaded to hold the unit of measurement of the timestamp. } -func (h *MetricHandler) SetTimePrecision(precision time.Duration) { - h.builder.TimePrecision = precision - h.precision = precision +func (h *MetricHandler) SetTimeFunc(f TimeFunc) { + h.timeFunc = f } func (h *MetricHandler) Metric() (telegraf.Metric, error) { - return h.builder.Metric() + if h.metric.Time().IsZero() { + h.metric.SetTime(h.timeFunc().Truncate(h.timePrecision)) + } + return h.metric, nil } -func (h *MetricHandler) SetMeasurement(name []byte) { - h.builder.SetName(nameUnescape(name)) +func (h *MetricHandler) SetMeasurement(name []byte) error { + var err error + h.metric, err = metric.New(nameUnescape(name), + nil, nil, time.Time{}) + return err } -func (h *MetricHandler) AddTag(key []byte, value []byte) { +func (h *MetricHandler) AddTag(key []byte, value []byte) error { tk := unescape(key) tv := unescape(value) - h.builder.AddTag(tk, tv) + h.metric.AddTag(tk, tv) + return nil } -func (h *MetricHandler) AddInt(key []byte, value []byte) { +func (h *MetricHandler) AddInt(key []byte, value []byte) error { fk := unescape(key) fv, err := parseIntBytes(bytes.TrimSuffix(value, []byte("i")), 10, 64) if err != nil { - log.Errorf("E! Received unparseable int value: %q: %v", value, err) - return + if numerr, ok := err.(*strconv.NumError); ok { + return numerr.Err + } + return err } - h.builder.AddField(fk, fv) + h.metric.AddField(fk, fv) + return nil } -func (h *MetricHandler) AddUint(key []byte, value []byte) { +func (h *MetricHandler) AddUint(key []byte, value []byte) error { fk := unescape(key) fv, err := parseUintBytes(bytes.TrimSuffix(value, []byte("u")), 10, 64) if err != nil { - log.Errorf("E! Received unparseable uint value: %q: %v", value, err) - return + if numerr, ok := err.(*strconv.NumError); ok { + return numerr.Err + } + return err } - h.builder.AddField(fk, fv) + h.metric.AddField(fk, fv) + return nil } -func (h *MetricHandler) AddFloat(key []byte, value []byte) { +func (h *MetricHandler) AddFloat(key []byte, value []byte) error { fk := unescape(key) fv, err := parseFloatBytes(value, 64) if err != nil { - log.Errorf("E! Received unparseable float value: %q: %v", value, err) - return + if numerr, ok := err.(*strconv.NumError); ok { + return numerr.Err + } + return err } - h.builder.AddField(fk, fv) + h.metric.AddField(fk, fv) + return nil } -func (h *MetricHandler) AddString(key []byte, value []byte) { +func (h *MetricHandler) AddString(key []byte, value []byte) error { fk := unescape(key) fv := stringFieldUnescape(value) - h.builder.AddField(fk, fv) + h.metric.AddField(fk, fv) + return nil } -func (h *MetricHandler) AddBool(key []byte, value []byte) { +func (h *MetricHandler) AddBool(key []byte, value []byte) error { fk := unescape(key) fv, err := parseBoolBytes(value) if err != nil { - log.Errorf("E! Received unparseable boolean value: %q: %v", value, err) - return + return errors.New("unparseable bool") } - h.builder.AddField(fk, fv) + h.metric.AddField(fk, fv) + return nil } -func (h *MetricHandler) SetTimestamp(tm []byte) { +func (h *MetricHandler) SetTimestamp(tm []byte) error { v, err := parseIntBytes(tm, 10, 64) if err != nil { - log.Errorf("E! Received unparseable timestamp: %q: %v", tm, err) - return + if numerr, ok := err.(*strconv.NumError); ok { + return numerr.Err + } + return err } - ns := v * int64(h.precision) - h.builder.SetTime(time.Unix(0, ns)) -} -func (h *MetricHandler) Reset() { - h.builder.Reset() + //time precision is overloaded to mean time unit here + ns := v * int64(h.timePrecision) + h.metric.SetTime(time.Unix(0, ns)) + return nil } diff --git a/plugins/parsers/influx/machine.go b/plugins/parsers/influx/machine.go index fef7e2d38..332b73592 100644 --- a/plugins/parsers/influx/machine.go +++ b/plugins/parsers/influx/machine.go @@ -4,53 +4,67 @@ package influx import ( "errors" + "io" ) +type readErr struct { + Err error +} + +func (e *readErr) Error() string { + return e.Err.Error() +} + var ( ErrNameParse = errors.New("expected measurement name") ErrFieldParse = errors.New("expected field") ErrTagParse = errors.New("expected tag") ErrTimestampParse = errors.New("expected timestamp") ErrParse = errors.New("parse error") + EOF = errors.New("EOF") ) -//line plugins/parsers/influx/machine.go.rl:226 +//line plugins/parsers/influx/machine.go.rl:318 -//line plugins/parsers/influx/machine.go:23 -const LineProtocol_start int = 1 -const LineProtocol_first_final int = 206 +//line plugins/parsers/influx/machine.go:33 +const LineProtocol_start int = 269 +const LineProtocol_first_final int = 269 const LineProtocol_error int = 0 -const LineProtocol_en_main int = 1 -const LineProtocol_en_discard_line int = 195 -const LineProtocol_en_align int = 196 -const LineProtocol_en_series int = 199 +const LineProtocol_en_main int = 269 +const LineProtocol_en_discard_line int = 257 +const LineProtocol_en_align int = 739 +const LineProtocol_en_series int = 260 -//line plugins/parsers/influx/machine.go.rl:229 +//line plugins/parsers/influx/machine.go.rl:321 type Handler interface { - SetMeasurement(name []byte) - AddTag(key []byte, value []byte) - AddInt(key []byte, value []byte) - AddUint(key []byte, value []byte) - AddFloat(key []byte, value []byte) - AddString(key []byte, value []byte) - AddBool(key []byte, value []byte) - SetTimestamp(tm []byte) + SetMeasurement(name []byte) error + AddTag(key []byte, value []byte) error + AddInt(key []byte, value []byte) error + AddUint(key []byte, value []byte) error + AddFloat(key []byte, value []byte) error + AddString(key []byte, value []byte) error + AddBool(key []byte, value []byte) error + SetTimestamp(tm []byte) error } type machine struct { - data []byte - cs int - p, pe, eof int - pb int - handler Handler - initState int - err error + data []byte + cs int + p, pe, eof int + pb int + lineno int + sol int + handler Handler + initState int + key []byte + beginMetric bool + finishMetric bool } func NewMachine(handler Handler) *machine { @@ -60,22 +74,24 @@ func NewMachine(handler Handler) *machine { } -//line plugins/parsers/influx/machine.go.rl:258 +//line plugins/parsers/influx/machine.go.rl:354 -//line plugins/parsers/influx/machine.go.rl:259 +//line plugins/parsers/influx/machine.go.rl:355 -//line plugins/parsers/influx/machine.go.rl:260 +//line plugins/parsers/influx/machine.go.rl:356 -//line plugins/parsers/influx/machine.go.rl:261 +//line plugins/parsers/influx/machine.go.rl:357 -//line plugins/parsers/influx/machine.go.rl:262 +//line plugins/parsers/influx/machine.go.rl:358 -//line plugins/parsers/influx/machine.go:74 +//line plugins/parsers/influx/machine.go.rl:359 + +//line plugins/parsers/influx/machine.go:90 { - m.cs = LineProtocol_start + ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:263 +//line plugins/parsers/influx/machine.go.rl:360 return m } @@ -87,22 +103,22 @@ func NewSeriesMachine(handler Handler) *machine { } -//line plugins/parsers/influx/machine.go.rl:274 +//line plugins/parsers/influx/machine.go.rl:371 -//line plugins/parsers/influx/machine.go.rl:275 +//line plugins/parsers/influx/machine.go.rl:372 -//line plugins/parsers/influx/machine.go.rl:276 +//line plugins/parsers/influx/machine.go.rl:373 -//line plugins/parsers/influx/machine.go.rl:277 +//line plugins/parsers/influx/machine.go.rl:374 -//line plugins/parsers/influx/machine.go.rl:278 +//line plugins/parsers/influx/machine.go.rl:375 -//line plugins/parsers/influx/machine.go:101 +//line plugins/parsers/influx/machine.go:117 { - m.cs = LineProtocol_start + ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:279 +//line plugins/parsers/influx/machine.go.rl:376 return m } @@ -111,34 +127,44 @@ func (m *machine) SetData(data []byte) { m.data = data m.p = 0 m.pb = 0 + m.lineno = 1 + m.sol = 0 m.pe = len(data) m.eof = len(data) - m.err = nil + m.key = nil + m.beginMetric = false + m.finishMetric = false -//line plugins/parsers/influx/machine.go:120 +//line plugins/parsers/influx/machine.go:140 { - m.cs = LineProtocol_start + ( m.cs) = LineProtocol_start } -//line plugins/parsers/influx/machine.go.rl:292 +//line plugins/parsers/influx/machine.go.rl:393 m.cs = m.initState } -// ParseLine parses a line of input and returns true if more data can be -// parsed. -func (m *machine) ParseLine() bool { - if m.data == nil || m.p >= m.pe { - m.err = nil - return false +// Next parses the next metric line and returns nil if it was successfully +// processed. If the line contains a syntax error an error is returned, +// otherwise if the end of file is reached before finding a metric line then +// EOF is returned. +func (m *machine) Next() error { + if m.p == m.pe && m.pe == m.eof { + return EOF } - m.err = nil - var key []byte - var yield bool + m.key = nil + m.beginMetric = false + m.finishMetric = false + return m.exec() +} + +func (m *machine) exec() error { + var err error -//line plugins/parsers/influx/machine.go:142 +//line plugins/parsers/influx/machine.go:168 { if ( m.p) == ( m.pe) { goto _test_eof @@ -146,71 +172,33 @@ func (m *machine) ParseLine() bool { goto _resume _again: - switch m.cs { + switch ( m.cs) { + case 269: + goto st269 case 1: goto st1 case 2: goto st2 case 3: goto st3 - case 4: - goto st4 case 0: goto st0 + case 4: + goto st4 case 5: goto st5 case 6: goto st6 + case 270: + goto st270 + case 271: + goto st271 + case 272: + goto st272 case 7: goto st7 - case 206: - goto st206 - case 207: - goto st207 - case 208: - goto st208 case 8: goto st8 - case 209: - goto st209 - case 210: - goto st210 - case 211: - goto st211 - case 212: - goto st212 - case 213: - goto st213 - case 214: - goto st214 - case 215: - goto st215 - case 216: - goto st216 - case 217: - goto st217 - case 218: - goto st218 - case 219: - goto st219 - case 220: - goto st220 - case 221: - goto st221 - case 222: - goto st222 - case 223: - goto st223 - case 224: - goto st224 - case 225: - goto st225 - case 226: - goto st226 - case 227: - goto st227 - case 228: - goto st228 case 9: goto st9 case 10: @@ -221,54 +209,26 @@ _again: goto st12 case 13: goto st13 - case 229: - goto st229 case 14: goto st14 case 15: goto st15 - case 230: - goto st230 - case 231: - goto st231 - case 232: - goto st232 - case 233: - goto st233 - case 234: - goto st234 - case 235: - goto st235 - case 236: - goto st236 - case 237: - goto st237 - case 238: - goto st238 case 16: goto st16 case 17: goto st17 case 18: goto st18 - case 239: - goto st239 case 19: goto st19 case 20: goto st20 case 21: goto st21 - case 240: - goto st240 case 22: goto st22 case 23: goto st23 - case 241: - goto st241 - case 242: - goto st242 case 24: goto st24 case 25: @@ -285,104 +245,22 @@ _again: goto st30 case 31: goto st31 - case 32: - goto st32 - case 33: - goto st33 - case 34: - goto st34 - case 35: - goto st35 - case 36: - goto st36 - case 37: - goto st37 - case 38: - goto st38 - case 39: - goto st39 - case 40: - goto st40 - case 41: - goto st41 - case 42: - goto st42 - case 243: - goto st243 - case 244: - goto st244 - case 43: - goto st43 - case 245: - goto st245 - case 246: - goto st246 - case 247: - goto st247 - case 248: - goto st248 - case 249: - goto st249 - case 250: - goto st250 - case 251: - goto st251 - case 252: - goto st252 - case 253: - goto st253 - case 254: - goto st254 - case 255: - goto st255 - case 256: - goto st256 - case 257: - goto st257 - case 258: - goto st258 - case 259: - goto st259 - case 260: - goto st260 - case 261: - goto st261 - case 262: - goto st262 - case 263: - goto st263 - case 264: - goto st264 - case 44: - goto st44 - case 265: - goto st265 - case 266: - goto st266 - case 45: - goto st45 - case 267: - goto st267 - case 268: - goto st268 - case 269: - goto st269 - case 270: - goto st270 - case 271: - goto st271 - case 272: - goto st272 case 273: goto st273 case 274: goto st274 + case 32: + goto st32 + case 33: + goto st33 case 275: goto st275 case 276: goto st276 case 277: goto st277 + case 34: + goto st34 case 278: goto st278 case 279: @@ -401,32 +279,12 @@ _again: goto st285 case 286: goto st286 - case 46: - goto st46 - case 47: - goto st47 - case 48: - goto st48 case 287: goto st287 - case 49: - goto st49 - case 50: - goto st50 - case 51: - goto st51 - case 52: - goto st52 - case 53: - goto st53 case 288: goto st288 - case 54: - goto st54 case 289: goto st289 - case 55: - goto st55 case 290: goto st290 case 291: @@ -439,48 +297,38 @@ _again: goto st294 case 295: goto st295 + case 35: + goto st35 + case 36: + goto st36 case 296: goto st296 case 297: goto st297 case 298: goto st298 - case 56: - goto st56 - case 57: - goto st57 - case 58: - goto st58 + case 37: + goto st37 + case 38: + goto st38 + case 39: + goto st39 + case 40: + goto st40 + case 41: + goto st41 case 299: goto st299 - case 59: - goto st59 - case 60: - goto st60 - case 61: - goto st61 case 300: goto st300 - case 62: - goto st62 - case 63: - goto st63 case 301: goto st301 case 302: goto st302 - case 64: - goto st64 - case 65: - goto st65 - case 66: - goto st66 + case 42: + goto st42 case 303: goto st303 - case 67: - goto st67 - case 68: - goto st68 case 304: goto st304 case 305: @@ -499,42 +347,14 @@ _again: goto st311 case 312: goto st312 - case 69: - goto st69 - case 70: - goto st70 - case 71: - goto st71 case 313: goto st313 - case 72: - goto st72 - case 73: - goto st73 - case 74: - goto st74 case 314: goto st314 - case 75: - goto st75 - case 76: - goto st76 case 315: goto st315 case 316: goto st316 - case 77: - goto st77 - case 78: - goto st78 - case 79: - goto st79 - case 80: - goto st80 - case 81: - goto st81 - case 82: - goto st82 case 317: goto st317 case 318: @@ -543,8 +363,6 @@ _again: goto st319 case 320: goto st320 - case 83: - goto st83 case 321: goto st321 case 322: @@ -553,18 +371,50 @@ _again: goto st323 case 324: goto st324 - case 84: - goto st84 + case 43: + goto st43 + case 44: + goto st44 + case 45: + goto st45 + case 46: + goto st46 + case 47: + goto st47 + case 48: + goto st48 + case 49: + goto st49 + case 50: + goto st50 + case 51: + goto st51 + case 52: + goto st52 case 325: goto st325 case 326: goto st326 case 327: goto st327 + case 53: + goto st53 + case 54: + goto st54 + case 55: + goto st55 + case 56: + goto st56 + case 57: + goto st57 + case 58: + goto st58 case 328: goto st328 case 329: goto st329 + case 59: + goto st59 case 330: goto st330 case 331: @@ -591,38 +441,10 @@ _again: goto st341 case 342: goto st342 - case 85: - goto st85 - case 86: - goto st86 - case 87: - goto st87 - case 88: - goto st88 - case 89: - goto st89 - case 90: - goto st90 - case 91: - goto st91 - case 92: - goto st92 - case 93: - goto st93 - case 94: - goto st94 - case 95: - goto st95 - case 96: - goto st96 - case 97: - goto st97 case 343: goto st343 case 344: goto st344 - case 98: - goto st98 case 345: goto st345 case 346: @@ -633,12 +455,16 @@ _again: goto st348 case 349: goto st349 + case 60: + goto st60 case 350: goto st350 case 351: goto st351 case 352: goto st352 + case 61: + goto st61 case 353: goto st353 case 354: @@ -663,16 +489,10 @@ _again: goto st363 case 364: goto st364 - case 99: - goto st99 - case 100: - goto st100 case 365: goto st365 case 366: goto st366 - case 101: - goto st101 case 367: goto st367 case 368: @@ -685,20 +505,48 @@ _again: goto st371 case 372: goto st372 + case 62: + goto st62 + case 63: + goto st63 + case 64: + goto st64 + case 65: + goto st65 + case 66: + goto st66 case 373: goto st373 + case 67: + goto st67 + case 68: + goto st68 + case 69: + goto st69 + case 70: + goto st70 + case 71: + goto st71 case 374: goto st374 case 375: goto st375 case 376: goto st376 + case 72: + goto st72 + case 73: + goto st73 + case 74: + goto st74 case 377: goto st377 case 378: goto st378 case 379: goto st379 + case 75: + goto st75 case 380: goto st380 case 381: @@ -713,28 +561,12 @@ _again: goto st385 case 386: goto st386 - case 102: - goto st102 case 387: goto st387 case 388: goto st388 - case 103: - goto st103 - case 104: - goto st104 - case 105: - goto st105 - case 106: - goto st106 - case 107: - goto st107 case 389: goto st389 - case 108: - goto st108 - case 109: - goto st109 case 390: goto st390 case 391: @@ -753,72 +585,80 @@ _again: goto st397 case 398: goto st398 - case 110: - goto st110 - case 111: - goto st111 - case 112: - goto st112 case 399: goto st399 - case 113: - goto st113 - case 114: - goto st114 - case 115: - goto st115 + case 76: + goto st76 + case 77: + goto st77 + case 78: + goto st78 + case 79: + goto st79 + case 80: + goto st80 + case 81: + goto st81 + case 82: + goto st82 + case 83: + goto st83 + case 84: + goto st84 + case 85: + goto st85 + case 86: + goto st86 + case 87: + goto st87 + case 88: + goto st88 + case 89: + goto st89 case 400: goto st400 - case 116: - goto st116 - case 117: - goto st117 case 401: goto st401 case 402: goto st402 - case 118: - goto st118 - case 119: - goto st119 - case 120: - goto st120 - case 121: - goto st121 - case 122: - goto st122 - case 123: - goto st123 - case 124: - goto st124 - case 125: - goto st125 - case 126: - goto st126 - case 127: - goto st127 - case 128: - goto st128 - case 129: - goto st129 case 403: goto st403 + case 90: + goto st90 + case 91: + goto st91 + case 92: + goto st92 + case 93: + goto st93 case 404: goto st404 case 405: goto st405 - case 130: - goto st130 + case 94: + goto st94 + case 95: + goto st95 case 406: goto st406 + case 96: + goto st96 + case 97: + goto st97 case 407: goto st407 case 408: goto st408 + case 98: + goto st98 case 409: goto st409 case 410: goto st410 + case 99: + goto st99 + case 100: + goto st100 case 411: goto st411 case 412: @@ -853,24 +693,28 @@ _again: goto st426 case 427: goto st427 - case 131: - goto st131 case 428: goto st428 + case 101: + goto st101 case 429: goto st429 case 430: goto st430 case 431: goto st431 - case 132: - goto st132 + case 102: + goto st102 + case 103: + goto st103 case 432: goto st432 case 433: goto st433 case 434: goto st434 + case 104: + goto st104 case 435: goto st435 case 436: @@ -905,20 +749,14 @@ _again: goto st450 case 451: goto st451 - case 133: - goto st133 - case 134: - goto st134 - case 135: - goto st135 case 452: goto st452 case 453: goto st453 - case 136: - goto st136 case 454: goto st454 + case 105: + goto st105 case 455: goto st455 case 456: @@ -957,22 +795,32 @@ _again: goto st472 case 473: goto st473 - case 137: - goto st137 case 474: goto st474 case 475: goto st475 case 476: goto st476 - case 138: - goto st138 + case 106: + goto st106 + case 107: + goto st107 + case 108: + goto st108 + case 109: + goto st109 + case 110: + goto st110 case 477: goto st477 + case 111: + goto st111 case 478: goto st478 case 479: goto st479 + case 112: + goto st112 case 480: goto st480 case 481: @@ -991,28 +839,52 @@ _again: goto st487 case 488: goto st488 + case 113: + goto st113 + case 114: + goto st114 + case 115: + goto st115 case 489: goto st489 + case 116: + goto st116 + case 117: + goto st117 + case 118: + goto st118 case 490: goto st490 + case 119: + goto st119 + case 120: + goto st120 case 491: goto st491 case 492: goto st492 + case 121: + goto st121 + case 122: + goto st122 + case 123: + goto st123 + case 124: + goto st124 case 493: goto st493 case 494: goto st494 case 495: goto st495 + case 125: + goto st125 case 496: goto st496 case 497: goto st497 case 498: goto st498 - case 139: - goto st139 case 499: goto st499 case 500: @@ -1047,6 +919,10 @@ _again: goto st514 case 515: goto st515 + case 126: + goto st126 + case 127: + goto st127 case 516: goto st516 case 517: @@ -1057,86 +933,90 @@ _again: goto st519 case 520: goto st520 - case 140: - goto st140 - case 141: - goto st141 - case 142: - goto st142 - case 143: - goto st143 - case 144: - goto st144 case 521: goto st521 - case 145: - goto st145 case 522: goto st522 - case 146: - goto st146 case 523: goto st523 case 524: goto st524 + case 128: + goto st128 + case 129: + goto st129 + case 130: + goto st130 case 525: goto st525 + case 131: + goto st131 + case 132: + goto st132 + case 133: + goto st133 case 526: goto st526 + case 134: + goto st134 + case 135: + goto st135 case 527: goto st527 case 528: goto st528 + case 136: + goto st136 + case 137: + goto st137 + case 138: + goto st138 case 529: goto st529 case 530: goto st530 + case 139: + goto st139 case 531: goto st531 - case 147: - goto st147 - case 148: - goto st148 - case 149: - goto st149 + case 140: + goto st140 case 532: goto st532 - case 150: - goto st150 - case 151: - goto st151 - case 152: - goto st152 case 533: goto st533 - case 153: - goto st153 - case 154: - goto st154 case 534: goto st534 case 535: goto st535 - case 155: - goto st155 - case 156: - goto st156 - case 157: - goto st157 case 536: goto st536 case 537: goto st537 case 538: goto st538 - case 158: - goto st158 case 539: goto st539 + case 141: + goto st141 + case 142: + goto st142 + case 143: + goto st143 case 540: goto st540 + case 144: + goto st144 + case 145: + goto st145 + case 146: + goto st146 case 541: goto st541 + case 147: + goto st147 + case 148: + goto st148 case 542: goto st542 case 543: @@ -1171,70 +1051,46 @@ _again: goto st557 case 558: goto st558 - case 159: - goto st159 - case 160: - goto st160 case 559: goto st559 case 560: goto st560 case 561: goto st561 + case 149: + goto st149 + case 150: + goto st150 case 562: goto st562 case 563: goto st563 case 564: goto st564 + case 151: + goto st151 case 565: goto st565 case 566: goto st566 + case 152: + goto st152 case 567: goto st567 - case 161: - goto st161 - case 162: - goto st162 - case 163: - goto st163 case 568: goto st568 - case 164: - goto st164 - case 165: - goto st165 - case 166: - goto st166 case 569: goto st569 - case 167: - goto st167 - case 168: - goto st168 case 570: goto st570 case 571: goto st571 - case 169: - goto st169 - case 170: - goto st170 - case 171: - goto st171 - case 172: - goto st172 case 572: goto st572 - case 173: - goto st173 case 573: goto st573 case 574: goto st574 - case 174: - goto st174 case 575: goto st575 case 576: @@ -1253,40 +1109,22 @@ _again: goto st582 case 583: goto st583 - case 175: - goto st175 - case 176: - goto st176 - case 177: - goto st177 case 584: goto st584 - case 178: - goto st178 - case 179: - goto st179 - case 180: - goto st180 + case 153: + goto st153 + case 154: + goto st154 case 585: goto st585 - case 181: - goto st181 - case 182: - goto st182 + case 155: + goto st155 case 586: goto st586 case 587: goto st587 - case 183: - goto st183 - case 184: - goto st184 case 588: goto st588 - case 185: - goto st185 - case 186: - goto st186 case 589: goto st589 case 590: @@ -1297,143 +1135,565 @@ _again: goto st592 case 593: goto st593 + case 156: + goto st156 + case 157: + goto st157 + case 158: + goto st158 case 594: goto st594 + case 159: + goto st159 + case 160: + goto st160 + case 161: + goto st161 case 595: goto st595 + case 162: + goto st162 + case 163: + goto st163 case 596: goto st596 - case 187: - goto st187 - case 188: - goto st188 - case 189: - goto st189 case 597: goto st597 - case 190: - goto st190 - case 191: - goto st191 - case 192: - goto st192 + case 164: + goto st164 + case 165: + goto st165 + case 166: + goto st166 + case 167: + goto st167 + case 168: + goto st168 + case 169: + goto st169 case 598: goto st598 - case 193: - goto st193 - case 194: - goto st194 case 599: goto st599 case 600: goto st600 - case 195: - goto st195 case 601: goto st601 - case 196: - goto st196 case 602: goto st602 case 603: goto st603 - case 197: - goto st197 - case 198: - goto st198 - case 199: - goto st199 case 604: goto st604 case 605: goto st605 case 606: goto st606 + case 607: + goto st607 + case 608: + goto st608 + case 609: + goto st609 + case 610: + goto st610 + case 611: + goto st611 + case 612: + goto st612 + case 613: + goto st613 + case 614: + goto st614 + case 615: + goto st615 + case 616: + goto st616 + case 170: + goto st170 + case 171: + goto st171 + case 172: + goto st172 + case 617: + goto st617 + case 618: + goto st618 + case 619: + goto st619 + case 173: + goto st173 + case 620: + goto st620 + case 621: + goto st621 + case 174: + goto st174 + case 622: + goto st622 + case 623: + goto st623 + case 624: + goto st624 + case 625: + goto st625 + case 626: + goto st626 + case 175: + goto st175 + case 176: + goto st176 + case 177: + goto st177 + case 627: + goto st627 + case 178: + goto st178 + case 179: + goto st179 + case 180: + goto st180 + case 628: + goto st628 + case 181: + goto st181 + case 182: + goto st182 + case 629: + goto st629 + case 630: + goto st630 + case 183: + goto st183 + case 631: + goto st631 + case 632: + goto st632 + case 633: + goto st633 + case 184: + goto st184 + case 185: + goto st185 + case 186: + goto st186 + case 634: + goto st634 + case 187: + goto st187 + case 188: + goto st188 + case 189: + goto st189 + case 635: + goto st635 + case 190: + goto st190 + case 191: + goto st191 + case 636: + goto st636 + case 637: + goto st637 + case 192: + goto st192 + case 193: + goto st193 + case 194: + goto st194 + case 638: + goto st638 + case 195: + goto st195 + case 196: + goto st196 + case 639: + goto st639 + case 640: + goto st640 + case 641: + goto st641 + case 642: + goto st642 + case 643: + goto st643 + case 644: + goto st644 + case 645: + goto st645 + case 646: + goto st646 + case 197: + goto st197 + case 198: + goto st198 + case 199: + goto st199 + case 647: + goto st647 case 200: goto st200 case 201: goto st201 case 202: goto st202 - case 607: - goto st607 + case 648: + goto st648 case 203: goto st203 case 204: goto st204 + case 649: + goto st649 + case 650: + goto st650 case 205: goto st205 + case 206: + goto st206 + case 207: + goto st207 + case 651: + goto st651 + case 652: + goto st652 + case 653: + goto st653 + case 654: + goto st654 + case 655: + goto st655 + case 656: + goto st656 + case 657: + goto st657 + case 658: + goto st658 + case 659: + goto st659 + case 660: + goto st660 + case 661: + goto st661 + case 662: + goto st662 + case 663: + goto st663 + case 664: + goto st664 + case 665: + goto st665 + case 666: + goto st666 + case 667: + goto st667 + case 668: + goto st668 + case 669: + goto st669 + case 208: + goto st208 + case 209: + goto st209 + case 210: + goto st210 + case 211: + goto st211 + case 212: + goto st212 + case 670: + goto st670 + case 213: + goto st213 + case 214: + goto st214 + case 671: + goto st671 + case 672: + goto st672 + case 673: + goto st673 + case 674: + goto st674 + case 675: + goto st675 + case 676: + goto st676 + case 677: + goto st677 + case 678: + goto st678 + case 679: + goto st679 + case 215: + goto st215 + case 216: + goto st216 + case 217: + goto st217 + case 680: + goto st680 + case 218: + goto st218 + case 219: + goto st219 + case 220: + goto st220 + case 681: + goto st681 + case 221: + goto st221 + case 222: + goto st222 + case 682: + goto st682 + case 683: + goto st683 + case 223: + goto st223 + case 224: + goto st224 + case 225: + goto st225 + case 684: + goto st684 + case 226: + goto st226 + case 227: + goto st227 + case 685: + goto st685 + case 686: + goto st686 + case 687: + goto st687 + case 688: + goto st688 + case 689: + goto st689 + case 690: + goto st690 + case 691: + goto st691 + case 692: + goto st692 + case 228: + goto st228 + case 229: + goto st229 + case 230: + goto st230 + case 693: + goto st693 + case 231: + goto st231 + case 232: + goto st232 + case 694: + goto st694 + case 695: + goto st695 + case 696: + goto st696 + case 697: + goto st697 + case 698: + goto st698 + case 699: + goto st699 + case 700: + goto st700 + case 701: + goto st701 + case 233: + goto st233 + case 234: + goto st234 + case 235: + goto st235 + case 702: + goto st702 + case 236: + goto st236 + case 237: + goto st237 + case 238: + goto st238 + case 703: + goto st703 + case 239: + goto st239 + case 240: + goto st240 + case 704: + goto st704 + case 705: + goto st705 + case 241: + goto st241 + case 242: + goto st242 + case 243: + goto st243 + case 706: + goto st706 + case 707: + goto st707 + case 708: + goto st708 + case 709: + goto st709 + case 710: + goto st710 + case 711: + goto st711 + case 712: + goto st712 + case 713: + goto st713 + case 714: + goto st714 + case 715: + goto st715 + case 716: + goto st716 + case 717: + goto st717 + case 718: + goto st718 + case 719: + goto st719 + case 720: + goto st720 + case 721: + goto st721 + case 722: + goto st722 + case 723: + goto st723 + case 724: + goto st724 + case 244: + goto st244 + case 245: + goto st245 + case 725: + goto st725 + case 246: + goto st246 + case 247: + goto st247 + case 726: + goto st726 + case 727: + goto st727 + case 728: + goto st728 + case 729: + goto st729 + case 730: + goto st730 + case 731: + goto st731 + case 732: + goto st732 + case 733: + goto st733 + case 248: + goto st248 + case 249: + goto st249 + case 250: + goto st250 + case 734: + goto st734 + case 251: + goto st251 + case 252: + goto st252 + case 253: + goto st253 + case 735: + goto st735 + case 254: + goto st254 + case 255: + goto st255 + case 736: + goto st736 + case 737: + goto st737 + case 256: + goto st256 + case 257: + goto st257 + case 738: + goto st738 + case 260: + goto st260 + case 740: + goto st740 + case 741: + goto st741 + case 261: + goto st261 + case 262: + goto st262 + case 263: + goto st263 + case 264: + goto st264 + case 742: + goto st742 + case 265: + goto st265 + case 743: + goto st743 + case 266: + goto st266 + case 267: + goto st267 + case 268: + goto st268 + case 739: + goto st739 + case 258: + goto st258 + case 259: + goto st259 } if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof } _resume: - switch m.cs { + switch ( m.cs) { + case 269: + goto st_case_269 case 1: goto st_case_1 case 2: goto st_case_2 case 3: goto st_case_3 - case 4: - goto st_case_4 case 0: goto st_case_0 + case 4: + goto st_case_4 case 5: goto st_case_5 case 6: goto st_case_6 + case 270: + goto st_case_270 + case 271: + goto st_case_271 + case 272: + goto st_case_272 case 7: goto st_case_7 - case 206: - goto st_case_206 - case 207: - goto st_case_207 - case 208: - goto st_case_208 case 8: goto st_case_8 - case 209: - goto st_case_209 - case 210: - goto st_case_210 - case 211: - goto st_case_211 - case 212: - goto st_case_212 - case 213: - goto st_case_213 - case 214: - goto st_case_214 - case 215: - goto st_case_215 - case 216: - goto st_case_216 - case 217: - goto st_case_217 - case 218: - goto st_case_218 - case 219: - goto st_case_219 - case 220: - goto st_case_220 - case 221: - goto st_case_221 - case 222: - goto st_case_222 - case 223: - goto st_case_223 - case 224: - goto st_case_224 - case 225: - goto st_case_225 - case 226: - goto st_case_226 - case 227: - goto st_case_227 - case 228: - goto st_case_228 case 9: goto st_case_9 case 10: @@ -1444,54 +1704,26 @@ _resume: goto st_case_12 case 13: goto st_case_13 - case 229: - goto st_case_229 case 14: goto st_case_14 case 15: goto st_case_15 - case 230: - goto st_case_230 - case 231: - goto st_case_231 - case 232: - goto st_case_232 - case 233: - goto st_case_233 - case 234: - goto st_case_234 - case 235: - goto st_case_235 - case 236: - goto st_case_236 - case 237: - goto st_case_237 - case 238: - goto st_case_238 case 16: goto st_case_16 case 17: goto st_case_17 case 18: goto st_case_18 - case 239: - goto st_case_239 case 19: goto st_case_19 case 20: goto st_case_20 case 21: goto st_case_21 - case 240: - goto st_case_240 case 22: goto st_case_22 case 23: goto st_case_23 - case 241: - goto st_case_241 - case 242: - goto st_case_242 case 24: goto st_case_24 case 25: @@ -1508,104 +1740,22 @@ _resume: goto st_case_30 case 31: goto st_case_31 - case 32: - goto st_case_32 - case 33: - goto st_case_33 - case 34: - goto st_case_34 - case 35: - goto st_case_35 - case 36: - goto st_case_36 - case 37: - goto st_case_37 - case 38: - goto st_case_38 - case 39: - goto st_case_39 - case 40: - goto st_case_40 - case 41: - goto st_case_41 - case 42: - goto st_case_42 - case 243: - goto st_case_243 - case 244: - goto st_case_244 - case 43: - goto st_case_43 - case 245: - goto st_case_245 - case 246: - goto st_case_246 - case 247: - goto st_case_247 - case 248: - goto st_case_248 - case 249: - goto st_case_249 - case 250: - goto st_case_250 - case 251: - goto st_case_251 - case 252: - goto st_case_252 - case 253: - goto st_case_253 - case 254: - goto st_case_254 - case 255: - goto st_case_255 - case 256: - goto st_case_256 - case 257: - goto st_case_257 - case 258: - goto st_case_258 - case 259: - goto st_case_259 - case 260: - goto st_case_260 - case 261: - goto st_case_261 - case 262: - goto st_case_262 - case 263: - goto st_case_263 - case 264: - goto st_case_264 - case 44: - goto st_case_44 - case 265: - goto st_case_265 - case 266: - goto st_case_266 - case 45: - goto st_case_45 - case 267: - goto st_case_267 - case 268: - goto st_case_268 - case 269: - goto st_case_269 - case 270: - goto st_case_270 - case 271: - goto st_case_271 - case 272: - goto st_case_272 case 273: goto st_case_273 case 274: goto st_case_274 + case 32: + goto st_case_32 + case 33: + goto st_case_33 case 275: goto st_case_275 case 276: goto st_case_276 case 277: goto st_case_277 + case 34: + goto st_case_34 case 278: goto st_case_278 case 279: @@ -1624,32 +1774,12 @@ _resume: goto st_case_285 case 286: goto st_case_286 - case 46: - goto st_case_46 - case 47: - goto st_case_47 - case 48: - goto st_case_48 case 287: goto st_case_287 - case 49: - goto st_case_49 - case 50: - goto st_case_50 - case 51: - goto st_case_51 - case 52: - goto st_case_52 - case 53: - goto st_case_53 case 288: goto st_case_288 - case 54: - goto st_case_54 case 289: goto st_case_289 - case 55: - goto st_case_55 case 290: goto st_case_290 case 291: @@ -1662,48 +1792,38 @@ _resume: goto st_case_294 case 295: goto st_case_295 + case 35: + goto st_case_35 + case 36: + goto st_case_36 case 296: goto st_case_296 case 297: goto st_case_297 case 298: goto st_case_298 - case 56: - goto st_case_56 - case 57: - goto st_case_57 - case 58: - goto st_case_58 + case 37: + goto st_case_37 + case 38: + goto st_case_38 + case 39: + goto st_case_39 + case 40: + goto st_case_40 + case 41: + goto st_case_41 case 299: goto st_case_299 - case 59: - goto st_case_59 - case 60: - goto st_case_60 - case 61: - goto st_case_61 case 300: goto st_case_300 - case 62: - goto st_case_62 - case 63: - goto st_case_63 case 301: goto st_case_301 case 302: goto st_case_302 - case 64: - goto st_case_64 - case 65: - goto st_case_65 - case 66: - goto st_case_66 + case 42: + goto st_case_42 case 303: goto st_case_303 - case 67: - goto st_case_67 - case 68: - goto st_case_68 case 304: goto st_case_304 case 305: @@ -1722,42 +1842,14 @@ _resume: goto st_case_311 case 312: goto st_case_312 - case 69: - goto st_case_69 - case 70: - goto st_case_70 - case 71: - goto st_case_71 case 313: goto st_case_313 - case 72: - goto st_case_72 - case 73: - goto st_case_73 - case 74: - goto st_case_74 case 314: goto st_case_314 - case 75: - goto st_case_75 - case 76: - goto st_case_76 case 315: goto st_case_315 case 316: goto st_case_316 - case 77: - goto st_case_77 - case 78: - goto st_case_78 - case 79: - goto st_case_79 - case 80: - goto st_case_80 - case 81: - goto st_case_81 - case 82: - goto st_case_82 case 317: goto st_case_317 case 318: @@ -1766,8 +1858,6 @@ _resume: goto st_case_319 case 320: goto st_case_320 - case 83: - goto st_case_83 case 321: goto st_case_321 case 322: @@ -1776,18 +1866,50 @@ _resume: goto st_case_323 case 324: goto st_case_324 - case 84: - goto st_case_84 + case 43: + goto st_case_43 + case 44: + goto st_case_44 + case 45: + goto st_case_45 + case 46: + goto st_case_46 + case 47: + goto st_case_47 + case 48: + goto st_case_48 + case 49: + goto st_case_49 + case 50: + goto st_case_50 + case 51: + goto st_case_51 + case 52: + goto st_case_52 case 325: goto st_case_325 case 326: goto st_case_326 case 327: goto st_case_327 + case 53: + goto st_case_53 + case 54: + goto st_case_54 + case 55: + goto st_case_55 + case 56: + goto st_case_56 + case 57: + goto st_case_57 + case 58: + goto st_case_58 case 328: goto st_case_328 case 329: goto st_case_329 + case 59: + goto st_case_59 case 330: goto st_case_330 case 331: @@ -1814,38 +1936,10 @@ _resume: goto st_case_341 case 342: goto st_case_342 - case 85: - goto st_case_85 - case 86: - goto st_case_86 - case 87: - goto st_case_87 - case 88: - goto st_case_88 - case 89: - goto st_case_89 - case 90: - goto st_case_90 - case 91: - goto st_case_91 - case 92: - goto st_case_92 - case 93: - goto st_case_93 - case 94: - goto st_case_94 - case 95: - goto st_case_95 - case 96: - goto st_case_96 - case 97: - goto st_case_97 case 343: goto st_case_343 case 344: goto st_case_344 - case 98: - goto st_case_98 case 345: goto st_case_345 case 346: @@ -1856,12 +1950,16 @@ _resume: goto st_case_348 case 349: goto st_case_349 + case 60: + goto st_case_60 case 350: goto st_case_350 case 351: goto st_case_351 case 352: goto st_case_352 + case 61: + goto st_case_61 case 353: goto st_case_353 case 354: @@ -1886,16 +1984,10 @@ _resume: goto st_case_363 case 364: goto st_case_364 - case 99: - goto st_case_99 - case 100: - goto st_case_100 case 365: goto st_case_365 case 366: goto st_case_366 - case 101: - goto st_case_101 case 367: goto st_case_367 case 368: @@ -1908,20 +2000,48 @@ _resume: goto st_case_371 case 372: goto st_case_372 + case 62: + goto st_case_62 + case 63: + goto st_case_63 + case 64: + goto st_case_64 + case 65: + goto st_case_65 + case 66: + goto st_case_66 case 373: goto st_case_373 + case 67: + goto st_case_67 + case 68: + goto st_case_68 + case 69: + goto st_case_69 + case 70: + goto st_case_70 + case 71: + goto st_case_71 case 374: goto st_case_374 case 375: goto st_case_375 case 376: goto st_case_376 + case 72: + goto st_case_72 + case 73: + goto st_case_73 + case 74: + goto st_case_74 case 377: goto st_case_377 case 378: goto st_case_378 case 379: goto st_case_379 + case 75: + goto st_case_75 case 380: goto st_case_380 case 381: @@ -1936,28 +2056,12 @@ _resume: goto st_case_385 case 386: goto st_case_386 - case 102: - goto st_case_102 case 387: goto st_case_387 case 388: goto st_case_388 - case 103: - goto st_case_103 - case 104: - goto st_case_104 - case 105: - goto st_case_105 - case 106: - goto st_case_106 - case 107: - goto st_case_107 case 389: goto st_case_389 - case 108: - goto st_case_108 - case 109: - goto st_case_109 case 390: goto st_case_390 case 391: @@ -1976,72 +2080,80 @@ _resume: goto st_case_397 case 398: goto st_case_398 - case 110: - goto st_case_110 - case 111: - goto st_case_111 - case 112: - goto st_case_112 case 399: goto st_case_399 - case 113: - goto st_case_113 - case 114: - goto st_case_114 - case 115: - goto st_case_115 + case 76: + goto st_case_76 + case 77: + goto st_case_77 + case 78: + goto st_case_78 + case 79: + goto st_case_79 + case 80: + goto st_case_80 + case 81: + goto st_case_81 + case 82: + goto st_case_82 + case 83: + goto st_case_83 + case 84: + goto st_case_84 + case 85: + goto st_case_85 + case 86: + goto st_case_86 + case 87: + goto st_case_87 + case 88: + goto st_case_88 + case 89: + goto st_case_89 case 400: goto st_case_400 - case 116: - goto st_case_116 - case 117: - goto st_case_117 case 401: goto st_case_401 case 402: goto st_case_402 - case 118: - goto st_case_118 - case 119: - goto st_case_119 - case 120: - goto st_case_120 - case 121: - goto st_case_121 - case 122: - goto st_case_122 - case 123: - goto st_case_123 - case 124: - goto st_case_124 - case 125: - goto st_case_125 - case 126: - goto st_case_126 - case 127: - goto st_case_127 - case 128: - goto st_case_128 - case 129: - goto st_case_129 case 403: goto st_case_403 + case 90: + goto st_case_90 + case 91: + goto st_case_91 + case 92: + goto st_case_92 + case 93: + goto st_case_93 case 404: goto st_case_404 case 405: goto st_case_405 - case 130: - goto st_case_130 + case 94: + goto st_case_94 + case 95: + goto st_case_95 case 406: goto st_case_406 + case 96: + goto st_case_96 + case 97: + goto st_case_97 case 407: goto st_case_407 case 408: goto st_case_408 + case 98: + goto st_case_98 case 409: goto st_case_409 case 410: goto st_case_410 + case 99: + goto st_case_99 + case 100: + goto st_case_100 case 411: goto st_case_411 case 412: @@ -2076,24 +2188,28 @@ _resume: goto st_case_426 case 427: goto st_case_427 - case 131: - goto st_case_131 case 428: goto st_case_428 + case 101: + goto st_case_101 case 429: goto st_case_429 case 430: goto st_case_430 case 431: goto st_case_431 - case 132: - goto st_case_132 + case 102: + goto st_case_102 + case 103: + goto st_case_103 case 432: goto st_case_432 case 433: goto st_case_433 case 434: goto st_case_434 + case 104: + goto st_case_104 case 435: goto st_case_435 case 436: @@ -2128,20 +2244,14 @@ _resume: goto st_case_450 case 451: goto st_case_451 - case 133: - goto st_case_133 - case 134: - goto st_case_134 - case 135: - goto st_case_135 case 452: goto st_case_452 case 453: goto st_case_453 - case 136: - goto st_case_136 case 454: goto st_case_454 + case 105: + goto st_case_105 case 455: goto st_case_455 case 456: @@ -2180,22 +2290,32 @@ _resume: goto st_case_472 case 473: goto st_case_473 - case 137: - goto st_case_137 case 474: goto st_case_474 case 475: goto st_case_475 case 476: goto st_case_476 - case 138: - goto st_case_138 + case 106: + goto st_case_106 + case 107: + goto st_case_107 + case 108: + goto st_case_108 + case 109: + goto st_case_109 + case 110: + goto st_case_110 case 477: goto st_case_477 + case 111: + goto st_case_111 case 478: goto st_case_478 case 479: goto st_case_479 + case 112: + goto st_case_112 case 480: goto st_case_480 case 481: @@ -2214,28 +2334,52 @@ _resume: goto st_case_487 case 488: goto st_case_488 + case 113: + goto st_case_113 + case 114: + goto st_case_114 + case 115: + goto st_case_115 case 489: goto st_case_489 + case 116: + goto st_case_116 + case 117: + goto st_case_117 + case 118: + goto st_case_118 case 490: goto st_case_490 + case 119: + goto st_case_119 + case 120: + goto st_case_120 case 491: goto st_case_491 case 492: goto st_case_492 + case 121: + goto st_case_121 + case 122: + goto st_case_122 + case 123: + goto st_case_123 + case 124: + goto st_case_124 case 493: goto st_case_493 case 494: goto st_case_494 case 495: goto st_case_495 + case 125: + goto st_case_125 case 496: goto st_case_496 case 497: goto st_case_497 case 498: goto st_case_498 - case 139: - goto st_case_139 case 499: goto st_case_499 case 500: @@ -2270,6 +2414,10 @@ _resume: goto st_case_514 case 515: goto st_case_515 + case 126: + goto st_case_126 + case 127: + goto st_case_127 case 516: goto st_case_516 case 517: @@ -2280,86 +2428,90 @@ _resume: goto st_case_519 case 520: goto st_case_520 - case 140: - goto st_case_140 - case 141: - goto st_case_141 - case 142: - goto st_case_142 - case 143: - goto st_case_143 - case 144: - goto st_case_144 case 521: goto st_case_521 - case 145: - goto st_case_145 case 522: goto st_case_522 - case 146: - goto st_case_146 case 523: goto st_case_523 case 524: goto st_case_524 + case 128: + goto st_case_128 + case 129: + goto st_case_129 + case 130: + goto st_case_130 case 525: goto st_case_525 + case 131: + goto st_case_131 + case 132: + goto st_case_132 + case 133: + goto st_case_133 case 526: goto st_case_526 + case 134: + goto st_case_134 + case 135: + goto st_case_135 case 527: goto st_case_527 case 528: goto st_case_528 + case 136: + goto st_case_136 + case 137: + goto st_case_137 + case 138: + goto st_case_138 case 529: goto st_case_529 case 530: goto st_case_530 + case 139: + goto st_case_139 case 531: goto st_case_531 - case 147: - goto st_case_147 - case 148: - goto st_case_148 - case 149: - goto st_case_149 + case 140: + goto st_case_140 case 532: goto st_case_532 - case 150: - goto st_case_150 - case 151: - goto st_case_151 - case 152: - goto st_case_152 case 533: goto st_case_533 - case 153: - goto st_case_153 - case 154: - goto st_case_154 case 534: goto st_case_534 case 535: goto st_case_535 - case 155: - goto st_case_155 - case 156: - goto st_case_156 - case 157: - goto st_case_157 case 536: goto st_case_536 case 537: goto st_case_537 case 538: goto st_case_538 - case 158: - goto st_case_158 case 539: goto st_case_539 + case 141: + goto st_case_141 + case 142: + goto st_case_142 + case 143: + goto st_case_143 case 540: goto st_case_540 + case 144: + goto st_case_144 + case 145: + goto st_case_145 + case 146: + goto st_case_146 case 541: goto st_case_541 + case 147: + goto st_case_147 + case 148: + goto st_case_148 case 542: goto st_case_542 case 543: @@ -2394,70 +2546,46 @@ _resume: goto st_case_557 case 558: goto st_case_558 - case 159: - goto st_case_159 - case 160: - goto st_case_160 case 559: goto st_case_559 case 560: goto st_case_560 case 561: goto st_case_561 + case 149: + goto st_case_149 + case 150: + goto st_case_150 case 562: goto st_case_562 case 563: goto st_case_563 case 564: goto st_case_564 + case 151: + goto st_case_151 case 565: goto st_case_565 case 566: goto st_case_566 + case 152: + goto st_case_152 case 567: goto st_case_567 - case 161: - goto st_case_161 - case 162: - goto st_case_162 - case 163: - goto st_case_163 case 568: goto st_case_568 - case 164: - goto st_case_164 - case 165: - goto st_case_165 - case 166: - goto st_case_166 case 569: goto st_case_569 - case 167: - goto st_case_167 - case 168: - goto st_case_168 case 570: goto st_case_570 case 571: goto st_case_571 - case 169: - goto st_case_169 - case 170: - goto st_case_170 - case 171: - goto st_case_171 - case 172: - goto st_case_172 case 572: goto st_case_572 - case 173: - goto st_case_173 case 573: goto st_case_573 case 574: goto st_case_574 - case 174: - goto st_case_174 case 575: goto st_case_575 case 576: @@ -2476,40 +2604,22 @@ _resume: goto st_case_582 case 583: goto st_case_583 - case 175: - goto st_case_175 - case 176: - goto st_case_176 - case 177: - goto st_case_177 case 584: goto st_case_584 - case 178: - goto st_case_178 - case 179: - goto st_case_179 - case 180: - goto st_case_180 + case 153: + goto st_case_153 + case 154: + goto st_case_154 case 585: goto st_case_585 - case 181: - goto st_case_181 - case 182: - goto st_case_182 + case 155: + goto st_case_155 case 586: goto st_case_586 case 587: goto st_case_587 - case 183: - goto st_case_183 - case 184: - goto st_case_184 case 588: goto st_case_588 - case 185: - goto st_case_185 - case 186: - goto st_case_186 case 589: goto st_case_589 case 590: @@ -2520,138 +2630,655 @@ _resume: goto st_case_592 case 593: goto st_case_593 + case 156: + goto st_case_156 + case 157: + goto st_case_157 + case 158: + goto st_case_158 case 594: goto st_case_594 + case 159: + goto st_case_159 + case 160: + goto st_case_160 + case 161: + goto st_case_161 case 595: goto st_case_595 + case 162: + goto st_case_162 + case 163: + goto st_case_163 case 596: goto st_case_596 - case 187: - goto st_case_187 - case 188: - goto st_case_188 - case 189: - goto st_case_189 case 597: goto st_case_597 - case 190: - goto st_case_190 - case 191: - goto st_case_191 - case 192: - goto st_case_192 + case 164: + goto st_case_164 + case 165: + goto st_case_165 + case 166: + goto st_case_166 + case 167: + goto st_case_167 + case 168: + goto st_case_168 + case 169: + goto st_case_169 case 598: goto st_case_598 - case 193: - goto st_case_193 - case 194: - goto st_case_194 case 599: goto st_case_599 case 600: goto st_case_600 - case 195: - goto st_case_195 case 601: goto st_case_601 - case 196: - goto st_case_196 case 602: goto st_case_602 case 603: goto st_case_603 - case 197: - goto st_case_197 - case 198: - goto st_case_198 - case 199: - goto st_case_199 case 604: goto st_case_604 case 605: goto st_case_605 case 606: goto st_case_606 + case 607: + goto st_case_607 + case 608: + goto st_case_608 + case 609: + goto st_case_609 + case 610: + goto st_case_610 + case 611: + goto st_case_611 + case 612: + goto st_case_612 + case 613: + goto st_case_613 + case 614: + goto st_case_614 + case 615: + goto st_case_615 + case 616: + goto st_case_616 + case 170: + goto st_case_170 + case 171: + goto st_case_171 + case 172: + goto st_case_172 + case 617: + goto st_case_617 + case 618: + goto st_case_618 + case 619: + goto st_case_619 + case 173: + goto st_case_173 + case 620: + goto st_case_620 + case 621: + goto st_case_621 + case 174: + goto st_case_174 + case 622: + goto st_case_622 + case 623: + goto st_case_623 + case 624: + goto st_case_624 + case 625: + goto st_case_625 + case 626: + goto st_case_626 + case 175: + goto st_case_175 + case 176: + goto st_case_176 + case 177: + goto st_case_177 + case 627: + goto st_case_627 + case 178: + goto st_case_178 + case 179: + goto st_case_179 + case 180: + goto st_case_180 + case 628: + goto st_case_628 + case 181: + goto st_case_181 + case 182: + goto st_case_182 + case 629: + goto st_case_629 + case 630: + goto st_case_630 + case 183: + goto st_case_183 + case 631: + goto st_case_631 + case 632: + goto st_case_632 + case 633: + goto st_case_633 + case 184: + goto st_case_184 + case 185: + goto st_case_185 + case 186: + goto st_case_186 + case 634: + goto st_case_634 + case 187: + goto st_case_187 + case 188: + goto st_case_188 + case 189: + goto st_case_189 + case 635: + goto st_case_635 + case 190: + goto st_case_190 + case 191: + goto st_case_191 + case 636: + goto st_case_636 + case 637: + goto st_case_637 + case 192: + goto st_case_192 + case 193: + goto st_case_193 + case 194: + goto st_case_194 + case 638: + goto st_case_638 + case 195: + goto st_case_195 + case 196: + goto st_case_196 + case 639: + goto st_case_639 + case 640: + goto st_case_640 + case 641: + goto st_case_641 + case 642: + goto st_case_642 + case 643: + goto st_case_643 + case 644: + goto st_case_644 + case 645: + goto st_case_645 + case 646: + goto st_case_646 + case 197: + goto st_case_197 + case 198: + goto st_case_198 + case 199: + goto st_case_199 + case 647: + goto st_case_647 case 200: goto st_case_200 case 201: goto st_case_201 case 202: goto st_case_202 - case 607: - goto st_case_607 + case 648: + goto st_case_648 case 203: goto st_case_203 case 204: goto st_case_204 + case 649: + goto st_case_649 + case 650: + goto st_case_650 case 205: goto st_case_205 + case 206: + goto st_case_206 + case 207: + goto st_case_207 + case 651: + goto st_case_651 + case 652: + goto st_case_652 + case 653: + goto st_case_653 + case 654: + goto st_case_654 + case 655: + goto st_case_655 + case 656: + goto st_case_656 + case 657: + goto st_case_657 + case 658: + goto st_case_658 + case 659: + goto st_case_659 + case 660: + goto st_case_660 + case 661: + goto st_case_661 + case 662: + goto st_case_662 + case 663: + goto st_case_663 + case 664: + goto st_case_664 + case 665: + goto st_case_665 + case 666: + goto st_case_666 + case 667: + goto st_case_667 + case 668: + goto st_case_668 + case 669: + goto st_case_669 + case 208: + goto st_case_208 + case 209: + goto st_case_209 + case 210: + goto st_case_210 + case 211: + goto st_case_211 + case 212: + goto st_case_212 + case 670: + goto st_case_670 + case 213: + goto st_case_213 + case 214: + goto st_case_214 + case 671: + goto st_case_671 + case 672: + goto st_case_672 + case 673: + goto st_case_673 + case 674: + goto st_case_674 + case 675: + goto st_case_675 + case 676: + goto st_case_676 + case 677: + goto st_case_677 + case 678: + goto st_case_678 + case 679: + goto st_case_679 + case 215: + goto st_case_215 + case 216: + goto st_case_216 + case 217: + goto st_case_217 + case 680: + goto st_case_680 + case 218: + goto st_case_218 + case 219: + goto st_case_219 + case 220: + goto st_case_220 + case 681: + goto st_case_681 + case 221: + goto st_case_221 + case 222: + goto st_case_222 + case 682: + goto st_case_682 + case 683: + goto st_case_683 + case 223: + goto st_case_223 + case 224: + goto st_case_224 + case 225: + goto st_case_225 + case 684: + goto st_case_684 + case 226: + goto st_case_226 + case 227: + goto st_case_227 + case 685: + goto st_case_685 + case 686: + goto st_case_686 + case 687: + goto st_case_687 + case 688: + goto st_case_688 + case 689: + goto st_case_689 + case 690: + goto st_case_690 + case 691: + goto st_case_691 + case 692: + goto st_case_692 + case 228: + goto st_case_228 + case 229: + goto st_case_229 + case 230: + goto st_case_230 + case 693: + goto st_case_693 + case 231: + goto st_case_231 + case 232: + goto st_case_232 + case 694: + goto st_case_694 + case 695: + goto st_case_695 + case 696: + goto st_case_696 + case 697: + goto st_case_697 + case 698: + goto st_case_698 + case 699: + goto st_case_699 + case 700: + goto st_case_700 + case 701: + goto st_case_701 + case 233: + goto st_case_233 + case 234: + goto st_case_234 + case 235: + goto st_case_235 + case 702: + goto st_case_702 + case 236: + goto st_case_236 + case 237: + goto st_case_237 + case 238: + goto st_case_238 + case 703: + goto st_case_703 + case 239: + goto st_case_239 + case 240: + goto st_case_240 + case 704: + goto st_case_704 + case 705: + goto st_case_705 + case 241: + goto st_case_241 + case 242: + goto st_case_242 + case 243: + goto st_case_243 + case 706: + goto st_case_706 + case 707: + goto st_case_707 + case 708: + goto st_case_708 + case 709: + goto st_case_709 + case 710: + goto st_case_710 + case 711: + goto st_case_711 + case 712: + goto st_case_712 + case 713: + goto st_case_713 + case 714: + goto st_case_714 + case 715: + goto st_case_715 + case 716: + goto st_case_716 + case 717: + goto st_case_717 + case 718: + goto st_case_718 + case 719: + goto st_case_719 + case 720: + goto st_case_720 + case 721: + goto st_case_721 + case 722: + goto st_case_722 + case 723: + goto st_case_723 + case 724: + goto st_case_724 + case 244: + goto st_case_244 + case 245: + goto st_case_245 + case 725: + goto st_case_725 + case 246: + goto st_case_246 + case 247: + goto st_case_247 + case 726: + goto st_case_726 + case 727: + goto st_case_727 + case 728: + goto st_case_728 + case 729: + goto st_case_729 + case 730: + goto st_case_730 + case 731: + goto st_case_731 + case 732: + goto st_case_732 + case 733: + goto st_case_733 + case 248: + goto st_case_248 + case 249: + goto st_case_249 + case 250: + goto st_case_250 + case 734: + goto st_case_734 + case 251: + goto st_case_251 + case 252: + goto st_case_252 + case 253: + goto st_case_253 + case 735: + goto st_case_735 + case 254: + goto st_case_254 + case 255: + goto st_case_255 + case 736: + goto st_case_736 + case 737: + goto st_case_737 + case 256: + goto st_case_256 + case 257: + goto st_case_257 + case 738: + goto st_case_738 + case 260: + goto st_case_260 + case 740: + goto st_case_740 + case 741: + goto st_case_741 + case 261: + goto st_case_261 + case 262: + goto st_case_262 + case 263: + goto st_case_263 + case 264: + goto st_case_264 + case 742: + goto st_case_742 + case 265: + goto st_case_265 + case 743: + goto st_case_743 + case 266: + goto st_case_266 + case 267: + goto st_case_267 + case 268: + goto st_case_268 + case 739: + goto st_case_739 + case 258: + goto st_case_258 + case 259: + goto st_case_259 } goto st_out + st269: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof269 + } + st_case_269: + switch ( m.data)[( m.p)] { + case 10: + goto tr33 + case 11: + goto tr457 + case 13: + goto tr33 + case 32: + goto tr456 + case 35: + goto tr33 + case 44: + goto tr33 + case 92: + goto tr458 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr456 + } + goto tr455 +tr31: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st1 +tr455: +//line plugins/parsers/influx/machine.go.rl:82 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st1 st1: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof1 } st_case_1: +//line plugins/parsers/influx/machine.go:3208 switch ( m.data)[( m.p)] { + case 10: + goto tr2 + case 11: + goto tr3 + case 13: + goto tr2 case 32: goto tr1 - case 35: - goto tr1 case 44: - goto tr1 + goto tr4 case 92: - goto tr2 + goto st94 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr1 - } - case ( m.data)[( m.p)] >= 9: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { goto tr1 } - goto tr0 -tr0: -//line plugins/parsers/influx/machine.go.rl:18 + goto st1 +tr1: + ( m.cs) = 2 +//line plugins/parsers/influx/machine.go.rl:86 - m.pb = m.p + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st2 + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr58: + ( m.cs) = 2 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st2: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof2 } st_case_2: -//line plugins/parsers/influx/machine.go:2627 +//line plugins/parsers/influx/machine.go:3258 switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr8 case 11: - goto tr6 + goto tr9 case 13: - goto tr5 + goto tr8 case 32: - goto tr4 + goto st2 case 44: - goto tr7 + goto tr8 + case 61: + goto tr8 case 92: - goto st133 + goto tr10 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + goto st2 } - goto st2 -tr4: -//line plugins/parsers/influx/machine.go.rl:72 + goto tr6 +tr6: +//line plugins/parsers/influx/machine.go.rl:28 - m.handler.SetMeasurement(m.text()) - - goto st3 -tr60: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) + m.pb = m.p goto st3 st3: @@ -2659,31 +3286,236 @@ tr60: goto _test_eof3 } st_case_3: -//line plugins/parsers/influx/machine.go:2663 +//line plugins/parsers/influx/machine.go:3290 switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr11 - case 13: - goto tr5 case 32: - goto st3 + goto tr8 case 44: - goto tr5 + goto tr8 case 61: - goto tr5 - case 92: goto tr12 + case 92: + goto st34 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st3 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 } - goto tr9 -tr9: -//line plugins/parsers/influx/machine.go.rl:18 + goto st3 +tr2: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:46 - m.pb = m.p + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr8: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:39 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr33: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:32 + + err = ErrNameParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr37: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:32 + + err = ErrNameParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr41: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:32 + + err = ErrNameParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:39 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr45: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:39 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr103: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:39 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:53 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr130: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:39 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:53 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr196: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:53 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr421: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:32 + + err = ErrNameParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + +//line plugins/parsers/influx/machine.go.rl:39 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr424: + ( m.cs) = 0 +//line plugins/parsers/influx/machine.go.rl:53 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + + goto _again +tr1053: +//line plugins/parsers/influx/machine.go.rl:73 + + ( m.p)-- + + {goto st269 } + + goto st0 +//line plugins/parsers/influx/machine.go:3511 +st_case_0: + st0: + ( m.cs) = 0 + goto _out +tr12: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() goto st4 st4: @@ -2691,458 +3523,390 @@ tr9: goto _test_eof4 } st_case_4: -//line plugins/parsers/influx/machine.go:2695 +//line plugins/parsers/influx/machine.go:3527 switch ( m.data)[( m.p)] { - case 32: - goto tr5 - case 44: - goto tr5 - case 61: - goto tr14 - case 92: - goto st10 + case 34: + goto st5 + case 45: + goto tr15 + case 46: + goto tr16 + case 48: + goto tr17 + case 70: + goto tr19 + case 84: + goto tr20 + case 102: + goto tr21 + case 116: + goto tr22 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr18 } - goto st4 -tr1: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr5: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:35 - - m.err = ErrFieldParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr31: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:49 - - m.err = ErrTimestampParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr52: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:42 - - m.err = ErrTagParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr61: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:42 - - m.err = ErrTagParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:35 - - m.err = ErrFieldParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr101: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:35 - - m.err = ErrFieldParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:49 - - m.err = ErrTimestampParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr207: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:42 - - m.err = ErrTagParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:35 - - m.err = ErrFieldParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:49 - - m.err = ErrTimestampParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -tr216: - m.cs = 0 -//line plugins/parsers/influx/machine.go.rl:42 - - m.err = ErrTagParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:49 - - m.err = ErrTimestampParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse - ( m.p)-- - - m.cs = 195; - {( m.p)++; goto _out } - - goto _again -//line plugins/parsers/influx/machine.go:2899 -st_case_0: - st0: - m.cs = 0 - goto _out -tr14: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st5 + goto tr8 st5: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof5 } st_case_5: -//line plugins/parsers/influx/machine.go:2915 switch ( m.data)[( m.p)] { - case 34: - goto st6 - case 45: - goto tr17 - case 46: - goto tr18 - case 48: - goto tr19 - case 70: - goto tr21 - case 84: - goto tr22 - case 102: - goto tr23 - case 116: + case 10: goto tr24 + case 34: + goto tr25 + case 92: + goto tr26 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr20 - } - goto tr5 + goto tr23 +tr23: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st6 +tr24: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st6 +tr28: +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st6 st6: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof6 } st_case_6: +//line plugins/parsers/influx/machine.go:3595 switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr28 case 34: - goto tr26 + goto tr29 case 92: - goto tr27 + goto st73 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr25 + goto st6 tr25: -//line plugins/parsers/influx/machine.go.rl:18 + ( m.cs) = 270 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr29: + ( m.cs) = 270 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st270: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof270 + } + st_case_270: +//line plugins/parsers/influx/machine.go:3640 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto st35 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st271 + } + goto tr103 +tr921: + ( m.cs) = 271 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1041: + ( m.cs) = 271 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1044: + ( m.cs) = 271 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1047: + ( m.cs) = 271 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st271: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof271 + } + st_case_271: +//line plugins/parsers/influx/machine.go:3712 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 13: + goto st32 + case 32: + goto st271 + case 45: + goto tr462 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr463 + } + case ( m.data)[( m.p)] >= 9: + goto st271 + } + goto tr424 +tr101: +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st272 +tr468: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr730: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr942: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr948: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr954: + ( m.cs) = 272 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again + st272: +//line plugins/parsers/influx/machine.go.rl:172 + + m.finishMetric = true + ( m.cs) = 739; + {( m.p)++; goto _out } + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof272 + } + st_case_272: +//line plugins/parsers/influx/machine.go:3846 + switch ( m.data)[( m.p)] { + case 10: + goto tr33 + case 11: + goto tr34 + case 13: + goto tr33 + case 32: + goto st7 + case 35: + goto tr33 + case 44: + goto tr33 + case 92: + goto tr35 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st7 + } + goto tr31 +tr456: +//line plugins/parsers/influx/machine.go.rl:82 + + m.beginMetric = true + goto st7 st7: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof7 } st_case_7: -//line plugins/parsers/influx/machine.go:2966 +//line plugins/parsers/influx/machine.go:3878 switch ( m.data)[( m.p)] { case 10: - goto tr5 - case 34: - goto tr29 + goto tr33 + case 11: + goto tr34 + case 13: + goto tr33 + case 32: + goto st7 + case 35: + goto tr33 + case 44: + goto tr33 case 92: - goto st11 + goto tr35 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st7 } - goto st7 -tr26: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr31 +tr34: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p -//line plugins/parsers/influx/machine.go.rl:104 + goto st8 +tr457: +//line plugins/parsers/influx/machine.go.rl:82 - m.handler.AddString(key, m.text()) + m.beginMetric = true - goto st206 -tr29: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st206 - st206: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof206 - } - st_case_206: -//line plugins/parsers/influx/machine.go:3000 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto st9 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st207 - } - goto tr101 -tr382: -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st207 -tr388: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st207 -tr392: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st207 -tr396: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st207 - st207: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof207 - } - st_case_207: -//line plugins/parsers/influx/machine.go:3044 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 13: - goto tr357 - case 32: - goto st207 - case 45: - goto tr359 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr360 - } - case ( m.data)[( m.p)] >= 9: - goto st207 - } - goto tr31 -tr357: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr362: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr383: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr389: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr393: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr397: - m.cs = 208 -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again - st208: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof208 - } - st_case_208: -//line plugins/parsers/influx/machine.go:3143 - goto tr1 -tr359: -//line plugins/parsers/influx/machine.go.rl:18 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -3152,506 +3916,70 @@ tr359: goto _test_eof8 } st_case_8: -//line plugins/parsers/influx/machine.go:3156 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st209 - } - goto tr31 -tr360: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st209 - st209: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof209 - } - st_case_209: -//line plugins/parsers/influx/machine.go:3172 +//line plugins/parsers/influx/machine.go:3920 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr37 + case 11: + goto tr38 case 13: - goto tr362 + goto tr37 case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st211 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 -tr361: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st210 - st210: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof210 - } - st_case_210: -//line plugins/parsers/influx/machine.go:3201 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 13: - goto tr357 - case 32: - goto st210 + goto tr36 + case 35: + goto st1 + case 44: + goto tr4 + case 92: + goto tr35 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st210 - } - goto tr1 - st211: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof211 - } - st_case_211: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st212 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr36 } goto tr31 - st212: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof212 - } - st_case_212: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st213 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st213: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof213 - } - st_case_213: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st214 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st214: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof214 - } - st_case_214: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st215 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st215: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof215 - } - st_case_215: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st216 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st216: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof216 - } - st_case_216: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st217 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st217: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof217 - } - st_case_217: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st218 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st218: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof218 - } - st_case_218: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st219 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st219: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof219 - } - st_case_219: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st220 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st220: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof220 - } - st_case_220: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st221 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st221: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof221 - } - st_case_221: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st222 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st222: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof222 - } - st_case_222: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st223 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st223: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof223 - } - st_case_223: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st224 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st224: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof224 - } - st_case_224: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st225 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st225: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof225 - } - st_case_225: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st226 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st226: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof226 - } - st_case_226: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st227 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st227: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof227 - } - st_case_227: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st228 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto tr31 - st228: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof228 - } - st_case_228: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 13: - goto tr362 - case 32: - goto tr361 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr361 - } - goto tr31 -tr384: -//line plugins/parsers/influx/machine.go.rl:96 +tr36: + ( m.cs) = 9 +//line plugins/parsers/influx/machine.go.rl:86 - m.handler.AddFloat(key, m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st9 -tr390: -//line plugins/parsers/influx/machine.go.rl:88 + ( m.cs) = 257; + {( m.p)++; goto _out } + } - m.handler.AddInt(key, m.text()) - - goto st9 -tr394: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st9 -tr398: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st9 + goto _again st9: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof9 } st_case_9: -//line plugins/parsers/influx/machine.go:3634 +//line plugins/parsers/influx/machine.go:3959 switch ( m.data)[( m.p)] { + case 10: + goto tr41 + case 11: + goto tr42 + case 13: + goto tr41 case 32: - goto tr5 + goto st9 + case 35: + goto tr6 case 44: - goto tr5 + goto tr41 case 61: - goto tr5 + goto tr31 case 92: - goto tr12 + goto tr43 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st9 } - goto tr9 -tr12: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr39 +tr39: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -3661,59 +3989,136 @@ tr12: goto _test_eof10 } st_case_10: -//line plugins/parsers/influx/machine.go:3665 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 +//line plugins/parsers/influx/machine.go:3993 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr46 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 } - goto st4 -tr27: -//line plugins/parsers/influx/machine.go.rl:18 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st10 +tr46: + ( m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr49: + ( m.cs) = 11 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st11 + goto _again st11: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof11 } st_case_11: -//line plugins/parsers/influx/machine.go:3686 +//line plugins/parsers/influx/machine.go:4049 switch ( m.data)[( m.p)] { - case 34: - goto st7 + case 10: + goto tr45 + case 11: + goto tr49 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 61: + goto tr47 case 92: - goto st7 + goto tr43 } - goto tr5 -tr17: -//line plugins/parsers/influx/machine.go.rl:18 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto tr39 +tr4: + ( m.cs) = 12 +//line plugins/parsers/influx/machine.go.rl:86 - m.pb = m.p + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st12 + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr60: + ( m.cs) = 12 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st12: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof12 } st_case_12: -//line plugins/parsers/influx/machine.go:3705 +//line plugins/parsers/influx/machine.go:4101 switch ( m.data)[( m.p)] { - case 46: - goto st13 - case 48: - goto st231 + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr2 + case 92: + goto tr51 } - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st234 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 } - goto tr5 -tr18: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr50 +tr50: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -3723,546 +4128,388 @@ tr18: goto _test_eof13 } st_case_13: -//line plugins/parsers/influx/machine.go:3727 - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st229 - } - goto tr5 - st229: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof229 - } - st_case_229: +//line plugins/parsers/influx/machine.go:4132 switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 case 32: - goto tr382 + goto tr2 case 44: - goto tr384 - case 69: - goto st14 - case 101: - goto st14 + goto tr2 + case 61: + goto tr53 + case 92: + goto st23 } switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st229 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 } case ( m.data)[( m.p)] >= 9: - goto tr382 + goto tr2 } - goto tr101 + goto st13 +tr53: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + + goto st14 st14: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof14 } st_case_14: +//line plugins/parsers/influx/machine.go:4163 switch ( m.data)[( m.p)] { - case 34: - goto st15 - case 43: - goto st15 - case 45: - goto st15 + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr2 + case 92: + goto tr56 } - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st230 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 } - goto tr5 + goto tr55 +tr55: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st15 st15: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof15 } st_case_15: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st230 - } - goto tr5 - st230: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof230 - } - st_case_230: +//line plugins/parsers/influx/machine.go:4194 switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr2 + case 11: + goto tr59 case 13: - goto tr383 + goto tr2 case 32: - goto tr382 + goto tr58 case 44: - goto tr384 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st230 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 - st231: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof231 - } - st_case_231: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 - case 32: - goto tr382 - case 44: - goto tr384 - case 46: - goto st229 - case 69: - goto st14 - case 101: - goto st14 - case 105: - goto st233 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st232 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 - st232: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof232 - } - st_case_232: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 - case 32: - goto tr382 - case 44: - goto tr384 - case 46: - goto st229 - case 69: - goto st14 - case 101: - goto st14 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st232 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 - st233: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof233 - } - st_case_233: - switch ( m.data)[( m.p)] { - case 10: - goto tr389 - case 13: - goto tr389 - case 32: - goto tr388 - case 44: - goto tr390 + goto tr60 + case 61: + goto tr2 + case 92: + goto st21 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr388 + goto tr58 } - goto tr101 - st234: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof234 - } - st_case_234: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 - case 32: - goto tr382 - case 44: - goto tr384 - case 46: - goto st229 - case 69: - goto st14 - case 101: - goto st14 - case 105: - goto st233 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st234 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 -tr19: -//line plugins/parsers/influx/machine.go.rl:18 + goto st15 +tr59: + ( m.cs) = 16 +//line plugins/parsers/influx/machine.go.rl:99 - m.pb = m.p + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- - goto st235 - st235: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof235 - } - st_case_235: -//line plugins/parsers/influx/machine.go:3934 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 - case 32: - goto tr382 - case 44: - goto tr384 - case 46: - goto st229 - case 69: - goto st14 - case 101: - goto st14 - case 105: - goto st233 - case 117: - goto st236 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st232 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 - st236: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof236 - } - st_case_236: - switch ( m.data)[( m.p)] { - case 10: - goto tr393 - case 13: - goto tr393 - case 32: - goto tr392 - case 44: - goto tr394 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr392 - } - goto tr101 -tr20: -//line plugins/parsers/influx/machine.go.rl:18 + ( m.cs) = 257; + {( m.p)++; goto _out } + } - m.pb = m.p - - goto st237 - st237: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof237 - } - st_case_237: -//line plugins/parsers/influx/machine.go:3994 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 13: - goto tr383 - case 32: - goto tr382 - case 44: - goto tr384 - case 46: - goto st229 - case 69: - goto st14 - case 101: - goto st14 - case 105: - goto st233 - case 117: - goto st236 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st237 - } - case ( m.data)[( m.p)] >= 9: - goto tr382 - } - goto tr101 -tr21: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st238 - st238: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof238 - } - st_case_238: -//line plugins/parsers/influx/machine.go:4035 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 13: - goto tr397 - case 32: - goto tr396 - case 44: - goto tr398 - case 65: - goto st16 - case 97: - goto st19 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr396 - } - goto tr101 + goto _again st16: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof16 } st_case_16: - if ( m.data)[( m.p)] == 76 { - goto st17 +//line plugins/parsers/influx/machine.go:4233 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr63 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr45 + case 92: + goto tr64 } - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto tr62 +tr62: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st17 st17: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof17 } st_case_17: - if ( m.data)[( m.p)] == 83 { - goto st18 +//line plugins/parsers/influx/machine.go:4265 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr66 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 } - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st17 +tr66: + ( m.cs) = 18 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr63: + ( m.cs) = 18 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again st18: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof18 } st_case_18: - if ( m.data)[( m.p)] == 69 { - goto st239 - } - goto tr5 - st239: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof239 - } - st_case_239: +//line plugins/parsers/influx/machine.go:4321 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto tr45 + case 11: + goto tr63 case 13: - goto tr397 + goto tr45 case 32: - goto tr396 + goto tr58 case 44: - goto tr398 + goto tr60 + case 61: + goto tr12 + case 92: + goto tr64 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr396 + goto tr58 } - goto tr101 + goto tr62 +tr64: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st19 st19: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof19 } st_case_19: - if ( m.data)[( m.p)] == 108 { +//line plugins/parsers/influx/machine.go:4353 + if ( m.data)[( m.p)] == 92 { goto st20 } - goto tr5 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st17 st20: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof20 } st_case_20: - if ( m.data)[( m.p)] == 115 { - goto st21 +//line plugins/parsers/influx/machine.go:4374 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr66 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 } - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st17 +tr56: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st21 st21: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof21 } st_case_21: - if ( m.data)[( m.p)] == 101 { - goto st239 - } - goto tr5 -tr22: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st240 - st240: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof240 - } - st_case_240: -//line plugins/parsers/influx/machine.go:4138 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 13: - goto tr397 - case 32: - goto tr396 - case 44: - goto tr398 - case 82: +//line plugins/parsers/influx/machine.go:4406 + if ( m.data)[( m.p)] == 92 { goto st22 - case 114: - goto st23 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr396 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 } - goto tr101 + goto st15 st22: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof22 } st_case_22: - if ( m.data)[( m.p)] == 85 { - goto st18 +//line plugins/parsers/influx/machine.go:4427 + switch ( m.data)[( m.p)] { + case 10: + goto tr2 + case 11: + goto tr59 + case 13: + goto tr2 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr2 + case 92: + goto st21 } - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st15 +tr51: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st23 st23: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof23 } st_case_23: - if ( m.data)[( m.p)] == 117 { - goto st21 +//line plugins/parsers/influx/machine.go:4459 + if ( m.data)[( m.p)] == 92 { + goto st24 } - goto tr5 -tr23: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st241 - st241: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof241 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 } - st_case_241: -//line plugins/parsers/influx/machine.go:4186 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 13: - goto tr397 - case 32: - goto tr396 - case 44: - goto tr398 - case 97: - goto st19 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr396 - } - goto tr101 -tr24: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st242 - st242: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof242 - } - st_case_242: -//line plugins/parsers/influx/machine.go:4214 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 13: - goto tr397 - case 32: - goto tr396 - case 44: - goto tr398 - case 114: - goto st23 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr396 - } - goto tr101 -tr11: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st24 + goto st13 st24: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof24 } st_case_24: -//line plugins/parsers/influx/machine.go:4242 +//line plugins/parsers/influx/machine.go:4480 switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr11 - case 13: - goto tr5 case 32: - goto st3 + goto tr2 case 44: - goto tr5 + goto tr2 case 61: - goto tr14 + goto tr53 case 92: - goto tr12 + goto st23 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st3 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 } - goto tr9 -tr6: -//line plugins/parsers/influx/machine.go.rl:72 + goto st13 +tr47: +//line plugins/parsers/influx/machine.go.rl:108 - m.handler.SetMeasurement(m.text()) + m.key = m.text() + + goto st25 +tr423: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() goto st25 st25: @@ -4270,426 +4517,20 @@ tr6: goto _test_eof25 } st_case_25: -//line plugins/parsers/influx/machine.go:4274 +//line plugins/parsers/influx/machine.go:4521 switch ( m.data)[( m.p)] { case 10: - goto tr5 - case 11: goto tr45 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 61: - goto st2 - case 92: - goto tr46 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto tr44 -tr44: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st26 - st26: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof26 - } - st_case_26: -//line plugins/parsers/influx/machine.go:4306 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 case 11: - goto tr48 + goto tr3 case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto st26 -tr48: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st27 -tr45: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st27 - st27: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof27 - } - st_case_27: -//line plugins/parsers/influx/machine.go:4348 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: goto tr45 - case 13: - goto tr5 case 32: - goto tr4 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto tr46 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto tr44 -tr7: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st28 -tr63: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st28 - st28: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof28 - } - st_case_28: -//line plugins/parsers/influx/machine.go:4386 - switch ( m.data)[( m.p)] { - case 32: - goto tr52 - case 44: - goto tr52 - case 61: - goto tr52 - case 92: - goto tr53 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 - } - goto tr51 -tr51: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st29 - st29: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof29 - } - st_case_29: -//line plugins/parsers/influx/machine.go:4417 - switch ( m.data)[( m.p)] { - case 32: - goto tr52 - case 44: - goto tr52 - case 61: - goto tr55 - case 92: - goto st37 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 - } - goto st29 -tr55: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - - goto st30 - st30: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof30 - } - st_case_30: -//line plugins/parsers/influx/machine.go:4448 - switch ( m.data)[( m.p)] { - case 32: - goto tr52 - case 44: - goto tr52 - case 61: - goto tr52 - case 92: - goto tr58 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 - } - goto tr57 -tr57: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st31 - st31: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof31 - } - st_case_31: -//line plugins/parsers/influx/machine.go:4479 - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 -tr62: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st32 - st32: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof32 - } - st_case_32: -//line plugins/parsers/influx/machine.go:4511 - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr66 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 92: - goto tr67 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto tr65 -tr65: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st33 - st33: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof33 - } - st_case_33: -//line plugins/parsers/influx/machine.go:4543 - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr69 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st33 -tr69: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st34 -tr66: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st34 - st34: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof34 - } - st_case_34: -//line plugins/parsers/influx/machine.go:4585 - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr66 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto tr67 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto tr65 -tr67: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st35 - st35: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof35 - } - st_case_35: -//line plugins/parsers/influx/machine.go:4617 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st33 -tr58: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st36 - st36: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof36 - } - st_case_36: -//line plugins/parsers/influx/machine.go:4638 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 - } - goto st31 -tr53: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st37 - st37: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof37 - } - st_case_37: -//line plugins/parsers/influx/machine.go:4659 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 - } - goto st29 -tr49: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st38 - st38: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof38 - } - st_case_38: -//line plugins/parsers/influx/machine.go:4680 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 + goto tr1 case 34: - goto st39 + goto st28 case 44: - goto tr7 + goto tr4 case 45: goto tr72 case 46: @@ -4701,7 +4542,7 @@ tr49: case 84: goto tr77 case 92: - goto st133 + goto st94 case 102: goto tr78 case 116: @@ -4713,21 +4554,81 @@ tr49: goto tr75 } case ( m.data)[( m.p)] >= 9: - goto tr4 + goto tr1 } - goto st2 - st39: + goto st1 +tr3: + ( m.cs) = 26 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st26: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof39 + goto _test_eof26 } - st_case_39: + st_case_26: +//line plugins/parsers/influx/machine.go:4579 switch ( m.data)[( m.p)] { - case 9: - goto tr81 + case 10: + goto tr45 + case 11: + goto tr49 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 61: + goto st1 + case 92: + goto tr43 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto tr39 +tr43: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st27 + st27: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof27 + } + st_case_27: +//line plugins/parsers/influx/machine.go:4611 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st10 + st28: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof28 + } + st_case_28: + switch ( m.data)[( m.p)] { + case 10: + goto tr24 case 11: goto tr82 - case 12: - goto tr4 + case 13: + goto tr23 case 32: goto tr81 case 34: @@ -4737,12 +4638,1574 @@ tr49: case 92: goto tr85 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr81 } goto tr80 tr80: -//line plugins/parsers/influx/machine.go.rl:18 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st29 + st29: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof29 + } + st_case_29: +//line plugins/parsers/influx/machine.go:4657 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 92: + goto st140 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 +tr87: + ( m.cs) = 30 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr81: + ( m.cs) = 30 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr229: + ( m.cs) = 30 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st30: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof30 + } + st_case_30: +//line plugins/parsers/influx/machine.go:4726 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr94 + case 13: + goto st6 + case 32: + goto st30 + case 34: + goto tr95 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st30 + } + goto tr92 +tr92: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st31 + st31: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof31 + } + st_case_31: +//line plugins/parsers/influx/machine.go:4760 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr99 + case 92: + goto st75 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st31 +tr95: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr98: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr384: + ( m.cs) = 273 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st273: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof273 + } + st_case_273: +//line plugins/parsers/influx/machine.go:4833 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st274 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto st35 + case 61: + goto tr12 + case 92: + goto st34 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st271 + } + goto st3 + st274: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof274 + } + st_case_274: + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st274 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto tr103 + case 45: + goto tr465 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr466 + } + case ( m.data)[( m.p)] >= 9: + goto st271 + } + goto st3 +tr470: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr732: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr944: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr950: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr956: + ( m.cs) = 32 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st32: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof32 + } + st_case_32: +//line plugins/parsers/influx/machine.go:4956 + if ( m.data)[( m.p)] == 10 { + goto tr101 + } + goto st0 +tr465: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st33 + st33: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof33 + } + st_case_33: +//line plugins/parsers/influx/machine.go:4972 + switch ( m.data)[( m.p)] { + case 32: + goto tr103 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr103 + } + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st275 + } + default: + goto tr103 + } + goto st3 +tr466: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st275 + st275: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof275 + } + st_case_275: +//line plugins/parsers/influx/machine.go:5007 + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st278 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 +tr467: + ( m.cs) = 276 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st276: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof276 + } + st_case_276: +//line plugins/parsers/influx/machine.go:5051 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 13: + goto st32 + case 32: + goto st276 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st276 + } + goto st0 +tr469: + ( m.cs) = 277 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st277: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof277 + } + st_case_277: +//line plugins/parsers/influx/machine.go:5082 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st277 + case 13: + goto st32 + case 32: + goto st276 + case 44: + goto tr8 + case 61: + goto tr12 + case 92: + goto st34 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st276 + } + goto st3 +tr10: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st34 + st34: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof34 + } + st_case_34: +//line plugins/parsers/influx/machine.go:5114 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st3 + st278: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof278 + } + st_case_278: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st279 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st279: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof279 + } + st_case_279: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st280 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st280: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof280 + } + st_case_280: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st281 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st281: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof281 + } + st_case_281: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st282 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st282: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof282 + } + st_case_282: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st283 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st283: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof283 + } + st_case_283: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st284 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st284: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof284 + } + st_case_284: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st285 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st285: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof285 + } + st_case_285: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st286 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st286: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof286 + } + st_case_286: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st287 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st287: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof287 + } + st_case_287: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st288 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st288: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof288 + } + st_case_288: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st289 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st289: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof289 + } + st_case_289: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st290 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st290: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof290 + } + st_case_290: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st291 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st291: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof291 + } + st_case_291: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st292 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st292: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof292 + } + st_case_292: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st293 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st293: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof293 + } + st_case_293: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st294 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st294: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof294 + } + st_case_294: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st295 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st3 + st295: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof295 + } + st_case_295: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr469 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr103 + case 61: + goto tr12 + case 92: + goto st34 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr467 + } + goto st3 +tr922: + ( m.cs) = 35 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1042: + ( m.cs) = 35 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1045: + ( m.cs) = 35 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1048: + ( m.cs) = 35 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st35: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof35 + } + st_case_35: +//line plugins/parsers/influx/machine.go:5716 + switch ( m.data)[( m.p)] { + case 32: + goto tr8 + case 44: + goto tr8 + case 61: + goto tr8 + case 92: + goto tr10 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto tr6 +tr99: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st36 + st36: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof36 + } + st_case_36: +//line plugins/parsers/influx/machine.go:5747 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr105 + case 45: + goto tr106 + case 46: + goto tr107 + case 48: + goto tr108 + case 70: + goto tr110 + case 84: + goto tr111 + case 92: + goto st73 + case 102: + goto tr112 + case 116: + goto tr113 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr109 + } + goto st6 +tr105: + ( m.cs) = 296 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st296: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof296 + } + st_case_296: +//line plugins/parsers/influx/machine.go:5792 + switch ( m.data)[( m.p)] { + case 10: + goto tr492 + case 13: + goto tr493 + case 32: + goto tr491 + case 34: + goto tr25 + case 44: + goto tr494 + case 92: + goto tr26 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr491 + } + goto tr23 +tr491: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st297 +tr980: + ( m.cs) = 297 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr985: + ( m.cs) = 297 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr988: + ( m.cs) = 297 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr991: + ( m.cs) = 297 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st297: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof297 + } + st_case_297: +//line plugins/parsers/influx/machine.go:5874 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 13: + goto st72 + case 32: + goto st297 + case 34: + goto tr29 + case 45: + goto tr497 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr498 + } + case ( m.data)[( m.p)] >= 9: + goto st297 + } + goto st6 +tr492: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st298 +tr219: +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st298 +tr636: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr600: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr817: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr822: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr803: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr758: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr791: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr797: + ( m.cs) = 298 +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st298: +//line plugins/parsers/influx/machine.go.rl:172 + + m.finishMetric = true + ( m.cs) = 739; + {( m.p)++; goto _out } + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof298 + } + st_case_298: +//line plugins/parsers/influx/machine.go:6081 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr115 + case 13: + goto st6 + case 32: + goto st37 + case 34: + goto tr116 + case 35: + goto st6 + case 44: + goto st6 + case 92: + goto tr85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st37 + } + goto tr80 + st37: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof37 + } + st_case_37: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr115 + case 13: + goto st6 + case 32: + goto st37 + case 34: + goto tr116 + case 35: + goto st6 + case 44: + goto st6 + case 92: + goto tr85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st37 + } + goto tr80 +tr115: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st38 + st38: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof38 + } + st_case_38: +//line plugins/parsers/influx/machine.go:6142 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr118 + case 13: + goto st6 + case 32: + goto tr117 + case 34: + goto tr83 + case 35: + goto st29 + case 44: + goto tr90 + case 92: + goto tr85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr117 + } + goto tr80 +tr117: + ( m.cs) = 39 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st39: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof39 + } + st_case_39: +//line plugins/parsers/influx/machine.go:6183 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr121 + case 13: + goto st6 + case 32: + goto st39 + case 34: + goto tr122 + case 35: + goto tr92 + case 44: + goto st6 + case 61: + goto tr80 + case 92: + goto tr123 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st39 + } + goto tr119 +tr119: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -4752,2567 +6215,388 @@ tr80: goto _test_eof40 } st_case_40: -//line plugins/parsers/influx/machine.go:4756 +//line plugins/parsers/influx/machine.go:6219 switch ( m.data)[( m.p)] { - case 9: - goto tr87 + case 10: + goto tr28 case 11: - goto tr88 - case 12: - goto tr4 + goto tr125 + case 13: + goto st6 case 32: goto tr87 case 34: - goto tr89 + goto tr126 case 44: goto tr90 + case 61: + goto tr127 case 92: - goto st170 + goto st92 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 } goto st40 -tr87: -//line plugins/parsers/influx/machine.go.rl:72 +tr125: + ( m.cs) = 41 +//line plugins/parsers/influx/machine.go.rl:86 - m.handler.SetMeasurement(m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st41 -tr81: -//line plugins/parsers/influx/machine.go.rl:72 + ( m.cs) = 257; + {( m.p)++; goto _out } + } - m.handler.SetMeasurement(m.text()) + goto _again +tr129: + ( m.cs) = 41 +//line plugins/parsers/influx/machine.go.rl:86 -//line plugins/parsers/influx/machine.go.rl:18 + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st41 -tr237: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st41 + goto _again st41: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof41 } st_case_41: -//line plugins/parsers/influx/machine.go:4804 +//line plugins/parsers/influx/machine.go:6277 switch ( m.data)[( m.p)] { - case 9: - goto st41 + case 10: + goto tr28 case 11: - goto tr94 - case 12: - goto st3 - case 32: - goto st41 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr96 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr92 -tr92: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st42 - st42: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof42 - } - st_case_42: -//line plugins/parsers/influx/machine.go:4838 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st42 -tr95: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st243 -tr98: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st243 -tr114: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st243 - st243: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof243 - } - st_case_243: -//line plugins/parsers/influx/machine.go:4890 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st244 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto st9 - case 61: - goto tr14 - case 92: - goto st10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st207 - } - goto st4 - st244: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof244 - } - st_case_244: - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st244 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto tr101 - case 45: - goto tr404 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr405 - } - case ( m.data)[( m.p)] >= 9: - goto st207 - } - goto st4 -tr404: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st43 - st43: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof43 - } - st_case_43: -//line plugins/parsers/influx/machine.go:4954 - switch ( m.data)[( m.p)] { - case 32: - goto tr101 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr101 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st245 - } - default: - goto tr101 - } - goto st4 -tr405: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st245 - st245: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof245 - } - st_case_245: -//line plugins/parsers/influx/machine.go:4989 - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st247 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 -tr406: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st246 - st246: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof246 - } - st_case_246: -//line plugins/parsers/influx/machine.go:5026 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st246 - case 13: - goto tr357 - case 32: - goto st210 - case 44: - goto tr5 - case 61: - goto tr14 - case 92: - goto st10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st210 - } - goto st4 - st247: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof247 - } - st_case_247: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st248 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st248: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof248 - } - st_case_248: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st249 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st249: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof249 - } - st_case_249: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st250 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st250: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof250 - } - st_case_250: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st251 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st251: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof251 - } - st_case_251: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st252 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st252: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof252 - } - st_case_252: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st253 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st253: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof253 - } - st_case_253: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st254 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st254: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof254 - } - st_case_254: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st255 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st255: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof255 - } - st_case_255: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st256 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st256: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof256 - } - st_case_256: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st257 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st257: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof257 - } - st_case_257: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st258 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st258: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof258 - } - st_case_258: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st259 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st259: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof259 - } - st_case_259: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st260 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st260: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof260 - } - st_case_260: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st261 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st261: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof261 - } - st_case_261: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st262 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st262: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof262 - } - st_case_262: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st263 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st263: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof263 - } - st_case_263: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st264 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st4 - st264: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof264 - } - st_case_264: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr406 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr101 - case 61: - goto tr14 - case 92: - goto st10 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr361 - } - goto st4 -tr99: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st44 - st44: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof44 - } - st_case_44: -//line plugins/parsers/influx/machine.go:5593 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr103 - case 45: - goto tr104 - case 46: - goto tr105 - case 48: - goto tr106 - case 70: - goto tr108 - case 84: - goto tr109 - case 92: - goto st11 - case 102: - goto tr110 - case 116: - goto tr111 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr107 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr103: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st265 - st265: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof265 - } - st_case_265: -//line plugins/parsers/influx/machine.go:5636 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 12: - goto st207 - case 13: - goto tr357 - case 32: - goto tr426 - case 34: - goto tr26 - case 44: - goto tr427 - case 92: - goto tr27 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr426 - } - goto tr25 -tr426: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st266 -tr452: -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st266 -tr457: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st266 -tr460: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st266 -tr463: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st266 - st266: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof266 - } - st_case_266: -//line plugins/parsers/influx/machine.go:5692 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 12: - goto st207 - case 13: - goto tr357 - case 32: - goto st266 - case 34: - goto tr29 - case 45: - goto tr429 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr430 - } - case ( m.data)[( m.p)] >= 9: - goto st266 - } - goto st7 -tr429: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st45 - st45: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof45 - } - st_case_45: -//line plugins/parsers/influx/machine.go:5729 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st267 - } - case ( m.data)[( m.p)] >= 12: - goto tr101 - } - goto st7 -tr430: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st267 - st267: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof267 - } - st_case_267: -//line plugins/parsers/influx/machine.go:5758 - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st269 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 -tr431: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st268 - st268: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof268 - } - st_case_268: -//line plugins/parsers/influx/machine.go:5793 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 12: - goto st210 - case 13: - goto tr357 - case 32: - goto st268 - case 34: - goto tr29 - case 92: - goto st11 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto st268 - } - goto st7 - st269: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof269 - } - st_case_269: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st270 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st270: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof270 - } - st_case_270: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st271 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st271: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof271 - } - st_case_271: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st272 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st272: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof272 - } - st_case_272: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st273 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st273: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof273 - } - st_case_273: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st274 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st274: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof274 - } - st_case_274: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st275 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st275: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof275 - } - st_case_275: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st276 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st276: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof276 - } - st_case_276: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st277 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st277: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof277 - } - st_case_277: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st278 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st278: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof278 - } - st_case_278: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st279 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st279: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof279 - } - st_case_279: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st280 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st280: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof280 - } - st_case_280: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st281 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st281: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof281 - } - st_case_281: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st282 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st282: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof282 - } - st_case_282: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st283 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st283: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof283 - } - st_case_283: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st284 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st284: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof284 - } - st_case_284: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st285 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st285: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof285 - } - st_case_285: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st286 - } - case ( m.data)[( m.p)] >= 9: - goto tr431 - } - goto st7 - st286: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof286 - } - st_case_286: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 12: - goto tr361 - case 13: - goto tr362 - case 32: - goto tr431 - case 34: - goto tr29 - case 92: - goto st11 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr431 - } - goto st7 -tr427: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st46 -tr469: -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st46 -tr473: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st46 -tr475: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st46 -tr477: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st46 - st46: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof46 - } - st_case_46: -//line plugins/parsers/influx/machine.go:6346 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr114 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr115 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr113 -tr113: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st47 - st47: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof47 - } - st_case_47: -//line plugins/parsers/influx/machine.go:6378 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr117 - case 92: - goto st77 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st47 -tr117: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st48 - st48: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof48 - } - st_case_48: -//line plugins/parsers/influx/machine.go:6410 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr119 - case 45: - goto tr104 - case 46: - goto tr105 - case 48: - goto tr106 - case 70: - goto tr108 - case 84: - goto tr109 - case 92: - goto st11 - case 102: - goto tr110 - case 116: - goto tr111 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr107 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr119: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st287 - st287: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof287 - } - st_case_287: -//line plugins/parsers/influx/machine.go:6453 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 12: - goto st207 - case 13: - goto tr357 - case 32: - goto tr426 - case 34: - goto tr26 - case 44: - goto tr451 - case 92: - goto tr27 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr426 - } - goto tr25 -tr451: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st49 -tr453: -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st49 -tr458: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st49 -tr461: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st49 -tr464: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st49 - st49: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof49 - } - st_case_49: -//line plugins/parsers/influx/machine.go:6509 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr121 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr120 -tr120: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st50 - st50: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof50 - } - st_case_50: -//line plugins/parsers/influx/machine.go:6541 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr123 - case 92: - goto st64 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st50 -tr123: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st51 - st51: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof51 - } - st_case_51: -//line plugins/parsers/influx/machine.go:6573 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr119 - case 45: - goto tr125 - case 46: - goto tr126 - case 48: - goto tr127 - case 70: goto tr129 - case 84: - goto tr130 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr122 + case 44: + goto tr90 + case 61: + goto tr127 case 92: - goto st11 - case 102: - goto tr131 - case 116: - goto tr132 + goto tr123 } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr128 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 } - goto st7 -tr125: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr119 +tr122: + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st52 - st52: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof52 - } - st_case_52: -//line plugins/parsers/influx/machine.go:6616 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 46: - goto st53 - case 48: - goto st291 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st294 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again tr126: -//line plugins/parsers/influx/machine.go.rl:18 + ( m.cs) = 299 +//line plugins/parsers/influx/machine.go.rl:148 - m.pb = m.p + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- - goto st53 - st53: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof53 - } - st_case_53: -//line plugins/parsers/influx/machine.go:6649 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st288 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 - st288: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof288 - } - st_case_288: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr453 - case 69: - goto st54 - case 92: - goto st11 - case 101: - goto st54 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st288 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st54: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof54 - } - st_case_54: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr137 - case 43: - goto st55 - case 45: - goto st55 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st290 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr137: -//line plugins/parsers/influx/machine.go.rl:104 + ( m.cs) = 257; + {( m.p)++; goto _out } + } - m.handler.AddString(key, m.text()) - - goto st289 - st289: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof289 - } - st_case_289: -//line plugins/parsers/influx/machine.go:6738 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto st9 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st230 - } - case ( m.data)[( m.p)] >= 9: - goto st207 - } - goto tr101 - st55: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof55 - } - st_case_55: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st290 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 - st290: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof290 - } - st_case_290: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr453 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st290 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st291: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof291 - } - st_case_291: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr453 - case 46: - goto st288 - case 69: - goto st54 - case 92: - goto st11 - case 101: - goto st54 - case 105: - goto st293 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st292 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st292: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof292 - } - st_case_292: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr453 - case 46: - goto st288 - case 69: - goto st54 - case 92: - goto st11 - case 101: - goto st54 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st292 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st293: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof293 - } - st_case_293: - switch ( m.data)[( m.p)] { - case 10: - goto tr389 - case 12: - goto tr388 - case 13: - goto tr389 - case 32: - goto tr457 - case 34: - goto tr29 - case 44: - goto tr458 - case 92: - goto st11 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr457 - } - goto st7 - st294: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof294 - } - st_case_294: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr453 - case 46: - goto st288 - case 69: - goto st54 - case 92: - goto st11 - case 101: - goto st54 - case 105: - goto st293 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st294 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 -tr127: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st295 - st295: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof295 - } - st_case_295: -//line plugins/parsers/influx/machine.go:6958 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr453 - case 46: - goto st288 - case 69: - goto st54 - case 92: - goto st11 - case 101: - goto st54 - case 105: - goto st293 - case 117: - goto st296 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st292 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st296: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof296 - } - st_case_296: - switch ( m.data)[( m.p)] { - case 10: - goto tr393 - case 12: - goto tr392 - case 13: - goto tr393 - case 32: - goto tr460 - case 34: - goto tr29 - case 44: - goto tr461 - case 92: - goto st11 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr460 - } - goto st7 -tr128: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st297 - st297: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof297 - } - st_case_297: -//line plugins/parsers/influx/machine.go:7030 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr453 - case 46: - goto st288 - case 69: - goto st54 - case 92: - goto st11 - case 101: - goto st54 - case 105: - goto st293 - case 117: - goto st296 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st297 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 -tr129: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st298 - st298: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof298 - } - st_case_298: -//line plugins/parsers/influx/machine.go:7077 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 12: - goto tr396 - case 13: - goto tr397 - case 32: - goto tr463 - case 34: - goto tr29 - case 44: - goto tr464 - case 65: - goto st56 - case 92: - goto st11 - case 97: - goto st59 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 - } - goto st7 - st56: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof56 - } - st_case_56: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 76: - goto st57 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st57: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof57 - } - st_case_57: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 83: - goto st58 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st58: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof58 - } - st_case_58: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 69: - goto st299 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 + goto _again st299: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof299 } st_case_299: +//line plugins/parsers/influx/machine.go:6335 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr101 + case 11: + goto tr500 case 13: - goto tr397 + goto st32 case 32: - goto tr463 - case 34: - goto tr29 + goto tr499 case 44: - goto tr464 + goto tr501 + case 61: + goto tr47 case 92: - goto st11 + goto st27 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr499 } - goto st7 - st59: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof59 - } - st_case_59: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 108: - goto st60 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st60: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof60 - } - st_case_60: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 115: - goto st61 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st61: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof61 - } - st_case_61: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 101: - goto st299 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 -tr130: -//line plugins/parsers/influx/machine.go.rl:18 + goto st10 +tr499: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:86 - m.pb = m.p + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st300 + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr563: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr811: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr729: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr941: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr947: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr953: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1005: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1009: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1013: + ( m.cs) = 300 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st300: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof300 } st_case_300: -//line plugins/parsers/influx/machine.go:7252 +//line plugins/parsers/influx/machine.go:6571 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr101 + case 11: + goto tr503 case 13: - goto tr397 + goto st32 case 32: - goto tr463 - case 34: - goto tr29 + goto st300 case 44: - goto tr464 - case 82: - goto st62 + goto tr103 + case 45: + goto tr465 + case 61: + goto tr103 case 92: - goto st11 - case 114: - goto st63 + goto tr10 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr466 + } + case ( m.data)[( m.p)] >= 9: + goto st300 } - goto st7 - st62: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof62 - } - st_case_62: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 85: - goto st58 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st63: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof63 - } - st_case_63: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 117: - goto st61 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 -tr131: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr6 +tr503: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -7322,92 +6606,3853 @@ tr131: goto _test_eof301 } st_case_301: -//line plugins/parsers/influx/machine.go:7326 +//line plugins/parsers/influx/machine.go:6610 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr101 + case 11: + goto tr503 case 13: - goto tr397 + goto st32 case 32: - goto tr463 - case 34: - goto tr29 + goto st300 case 44: - goto tr464 + goto tr103 + case 45: + goto tr465 + case 61: + goto tr12 case 92: - goto st11 - case 97: - goto st59 + goto tr10 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr466 + } + case ( m.data)[( m.p)] >= 9: + goto st300 } - goto st7 -tr132: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr6 +tr500: + ( m.cs) = 302 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr504: + ( m.cs) = 302 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st302 + goto _again st302: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof302 } st_case_302: -//line plugins/parsers/influx/machine.go:7360 +//line plugins/parsers/influx/machine.go:6673 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 12: - goto tr396 + goto tr101 + case 11: + goto tr504 case 13: - goto tr397 + goto st32 case 32: - goto tr463 - case 34: - goto tr29 + goto tr499 case 44: - goto tr464 + goto tr4 + case 45: + goto tr505 + case 61: + goto tr47 case 92: - goto st11 - case 114: - goto st63 + goto tr43 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr506 + } + case ( m.data)[( m.p)] >= 9: + goto tr499 } - goto st7 -tr121: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr39 +tr505: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st64 + goto st42 + st42: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof42 + } + st_case_42: +//line plugins/parsers/influx/machine.go:6712 + switch ( m.data)[( m.p)] { + case 10: + goto tr130 + case 11: + goto tr46 + case 13: + goto tr130 + case 32: + goto tr1 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st303 + } + case ( m.data)[( m.p)] >= 9: + goto tr1 + } + goto st10 +tr506: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st303 + st303: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof303 + } + st_case_303: +//line plugins/parsers/influx/machine.go:6749 + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st307 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 +tr512: + ( m.cs) = 304 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr572: + ( m.cs) = 304 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr507: + ( m.cs) = 304 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr569: + ( m.cs) = 304 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st304: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof304 + } + st_case_304: +//line plugins/parsers/influx/machine.go:6852 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr511 + case 13: + goto st32 + case 32: + goto st304 + case 44: + goto tr8 + case 61: + goto tr8 + case 92: + goto tr10 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st304 + } + goto tr6 +tr511: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st305 + st305: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof305 + } + st_case_305: +//line plugins/parsers/influx/machine.go:6884 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr511 + case 13: + goto st32 + case 32: + goto st304 + case 44: + goto tr8 + case 61: + goto tr12 + case 92: + goto tr10 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st304 + } + goto tr6 +tr513: + ( m.cs) = 306 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr508: + ( m.cs) = 306 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st306: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof306 + } + st_case_306: +//line plugins/parsers/influx/machine.go:6950 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr513 + case 13: + goto st32 + case 32: + goto tr512 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto tr43 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr512 + } + goto tr39 + st307: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof307 + } + st_case_307: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st308 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st308: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof308 + } + st_case_308: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st309 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st309: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof309 + } + st_case_309: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st310 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st310: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof310 + } + st_case_310: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st311 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st311: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof311 + } + st_case_311: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st312 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st312: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof312 + } + st_case_312: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st313 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st313: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof313 + } + st_case_313: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st314 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st314: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof314 + } + st_case_314: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st315 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st315: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof315 + } + st_case_315: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st316 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st316: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof316 + } + st_case_316: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st317 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st317: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof317 + } + st_case_317: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st318 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st318: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof318 + } + st_case_318: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st319 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st319: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof319 + } + st_case_319: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st320 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st320: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof320 + } + st_case_320: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st321 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st321: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof321 + } + st_case_321: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st322 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st322: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof322 + } + st_case_322: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st323 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st323: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof323 + } + st_case_323: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st324 + } + case ( m.data)[( m.p)] >= 9: + goto tr507 + } + goto st10 + st324: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof324 + } + st_case_324: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr508 + case 13: + goto tr470 + case 32: + goto tr507 + case 44: + goto tr4 + case 61: + goto tr47 + case 92: + goto st27 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr507 + } + goto st10 +tr501: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr565: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr813: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr733: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr945: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr951: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr957: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1007: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1011: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1015: + ( m.cs) = 43 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st43: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof43 + } + st_case_43: +//line plugins/parsers/influx/machine.go:7721 + switch ( m.data)[( m.p)] { + case 32: + goto tr45 + case 44: + goto tr45 + case 61: + goto tr45 + case 92: + goto tr133 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto tr132 +tr132: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st44 + st44: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof44 + } + st_case_44: +//line plugins/parsers/influx/machine.go:7752 + switch ( m.data)[( m.p)] { + case 32: + goto tr45 + case 44: + goto tr45 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st44 +tr135: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st45 + st45: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof45 + } + st_case_45: +//line plugins/parsers/influx/machine.go:7787 + switch ( m.data)[( m.p)] { + case 32: + goto tr45 + case 34: + goto tr137 + case 44: + goto tr45 + case 45: + goto tr138 + case 46: + goto tr139 + case 48: + goto tr140 + case 61: + goto tr45 + case 70: + goto tr142 + case 84: + goto tr143 + case 92: + goto tr56 + case 102: + goto tr144 + case 116: + goto tr145 + } + switch { + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr45 + } + case ( m.data)[( m.p)] > 13: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr141 + } + default: + goto tr45 + } + goto tr55 +tr137: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st46 + st46: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof46 + } + st_case_46: +//line plugins/parsers/influx/machine.go:7838 + switch ( m.data)[( m.p)] { + case 10: + goto tr24 + case 11: + goto tr148 + case 13: + goto tr23 + case 32: + goto tr147 + case 34: + goto tr149 + case 44: + goto tr150 + case 61: + goto tr23 + case 92: + goto tr151 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr147 + } + goto tr146 +tr146: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st47 + st47: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof47 + } + st_case_47: +//line plugins/parsers/influx/machine.go:7872 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 92: + goto st62 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 +tr178: + ( m.cs) = 48 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr153: + ( m.cs) = 48 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr147: + ( m.cs) = 48 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st48: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof48 + } + st_case_48: +//line plugins/parsers/influx/machine.go:7943 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr160 + case 13: + goto st6 + case 32: + goto st48 + case 34: + goto tr95 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr161 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st48 + } + goto tr158 +tr158: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st49 + st49: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof49 + } + st_case_49: +//line plugins/parsers/influx/machine.go:7977 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st49 +tr163: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st50 + st50: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof50 + } + st_case_50: +//line plugins/parsers/influx/machine.go:8009 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr105 + case 45: + goto tr165 + case 46: + goto tr166 + case 48: + goto tr167 + case 70: + goto tr169 + case 84: + goto tr170 + case 92: + goto st73 + case 102: + goto tr171 + case 116: + goto tr172 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr168 + } + goto st6 +tr165: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st51 + st51: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof51 + } + st_case_51: +//line plugins/parsers/influx/machine.go:8047 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 46: + goto st52 + case 48: + goto st631 + case 92: + goto st73 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st632 + } + goto st6 +tr166: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st52 + st52: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof52 + } + st_case_52: +//line plugins/parsers/influx/machine.go:8075 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st325 + } + goto st6 + st325: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof325 + } + st_case_325: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 13: + goto tr533 + case 32: + goto tr531 + case 34: + goto tr29 + case 44: + goto tr534 + case 69: + goto st173 + case 92: + goto st73 + case 101: + goto st173 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st325 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 + } + goto st6 +tr916: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st326 +tr531: + ( m.cs) = 326 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr923: + ( m.cs) = 326 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr925: + ( m.cs) = 326 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr928: + ( m.cs) = 326 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st326: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof326 + } + st_case_326: +//line plugins/parsers/influx/machine.go:8183 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 13: + goto st102 + case 32: + goto st326 + case 34: + goto tr29 + case 45: + goto tr538 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr539 + } + case ( m.data)[( m.p)] >= 9: + goto st326 + } + goto st6 +tr665: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st327 +tr273: +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st327 +tr532: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr674: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr737: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr743: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr749: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr891: + ( m.cs) = 327 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again + st327: +//line plugins/parsers/influx/machine.go.rl:172 + + m.finishMetric = true + ( m.cs) = 739; + {( m.p)++; goto _out } + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof327 + } + st_case_327: +//line plugins/parsers/influx/machine.go:8352 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr337 + case 13: + goto st6 + case 32: + goto st164 + case 34: + goto tr116 + case 35: + goto st6 + case 44: + goto st6 + case 92: + goto tr338 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st164 + } + goto tr335 +tr335: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st53 + st53: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof53 + } + st_case_53: +//line plugins/parsers/influx/machine.go:8386 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 92: + goto st155 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 +tr179: + ( m.cs) = 54 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st54: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof54 + } + st_case_54: +//line plugins/parsers/influx/machine.go:8425 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr183 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr122 + case 44: + goto tr180 + case 61: + goto st53 + case 92: + goto tr184 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto tr182 +tr182: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st55 + st55: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof55 + } + st_case_55: +//line plugins/parsers/influx/machine.go:8459 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr186 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr126 + case 44: + goto tr180 + case 61: + goto tr187 + case 92: + goto st152 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st55 +tr186: + ( m.cs) = 56 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr183: + ( m.cs) = 56 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st56: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof56 + } + st_case_56: +//line plugins/parsers/influx/machine.go:8517 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr183 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr122 + case 44: + goto tr180 + case 61: + goto tr187 + case 92: + goto tr184 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto tr182 +tr180: + ( m.cs) = 57 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr156: + ( m.cs) = 57 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr150: + ( m.cs) = 57 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st57: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof57 + } + st_case_57: +//line plugins/parsers/influx/machine.go:8588 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr190 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr191 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr189 +tr189: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st58 + st58: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof58 + } + st_case_58: +//line plugins/parsers/influx/machine.go:8620 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr193 + case 44: + goto st6 + case 61: + goto tr194 + case 92: + goto st69 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st58 +tr190: + ( m.cs) = 328 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr193: + ( m.cs) = 328 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st328: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof328 + } + st_case_328: +//line plugins/parsers/influx/machine.go:8676 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st329 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto st35 + case 61: + goto tr53 + case 92: + goto st23 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st271 + } + goto st13 + st329: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof329 + } + st_case_329: + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st329 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto tr196 + case 45: + goto tr541 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr542 + } + case ( m.data)[( m.p)] >= 9: + goto st271 + } + goto st13 +tr541: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st59 + st59: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof59 + } + st_case_59: +//line plugins/parsers/influx/machine.go:8740 + switch ( m.data)[( m.p)] { + case 32: + goto tr196 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr196 + } + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st330 + } + default: + goto tr196 + } + goto st13 +tr542: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st330 + st330: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof330 + } + st_case_330: +//line plugins/parsers/influx/machine.go:8775 + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st332 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 +tr543: + ( m.cs) = 331 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st331: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof331 + } + st_case_331: +//line plugins/parsers/influx/machine.go:8819 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st331 + case 13: + goto st32 + case 32: + goto st276 + case 44: + goto tr2 + case 61: + goto tr53 + case 92: + goto st23 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st276 + } + goto st13 + st332: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof332 + } + st_case_332: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st333 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st333: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof333 + } + st_case_333: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st334 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st334: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof334 + } + st_case_334: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st335 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st335: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof335 + } + st_case_335: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st336 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st336: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof336 + } + st_case_336: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st337 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st337: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof337 + } + st_case_337: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st338 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st338: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof338 + } + st_case_338: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st339 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st339: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof339 + } + st_case_339: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st340 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st340: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof340 + } + st_case_340: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st341 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st341: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof341 + } + st_case_341: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st342 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st342: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof342 + } + st_case_342: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st343 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st343: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof343 + } + st_case_343: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st344 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st344: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof344 + } + st_case_344: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st345 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st345: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof345 + } + st_case_345: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st346 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st346: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof346 + } + st_case_346: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st347 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st347: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof347 + } + st_case_347: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st348 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st348: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof348 + } + st_case_348: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st349 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st13 + st349: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof349 + } + st_case_349: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr543 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr196 + case 61: + goto tr53 + case 92: + goto st23 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr467 + } + goto st13 +tr194: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + + goto st60 + st60: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof60 + } + st_case_60: +//line plugins/parsers/influx/machine.go:9386 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr149 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr151 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr146 +tr149: + ( m.cs) = 350 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr155: + ( m.cs) = 350 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st350: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof350 + } + st_case_350: +//line plugins/parsers/influx/machine.go:9442 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr564 + case 13: + goto st32 + case 32: + goto tr563 + case 44: + goto tr565 + case 61: + goto tr130 + case 92: + goto st21 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr563 + } + goto st15 +tr564: + ( m.cs) = 351 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr731: + ( m.cs) = 351 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr943: + ( m.cs) = 351 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr949: + ( m.cs) = 351 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr955: + ( m.cs) = 351 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st351: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof351 + } + st_case_351: +//line plugins/parsers/influx/machine.go:9573 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr566 + case 13: + goto st32 + case 32: + goto tr563 + case 44: + goto tr60 + case 45: + goto tr567 + case 61: + goto tr130 + case 92: + goto tr64 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr568 + } + case ( m.data)[( m.p)] >= 9: + goto tr563 + } + goto tr62 +tr591: + ( m.cs) = 352 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr566: + ( m.cs) = 352 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st352: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof352 + } + st_case_352: +//line plugins/parsers/influx/machine.go:9636 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr566 + case 13: + goto st32 + case 32: + goto tr563 + case 44: + goto tr60 + case 45: + goto tr567 + case 61: + goto tr12 + case 92: + goto tr64 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr568 + } + case ( m.data)[( m.p)] >= 9: + goto tr563 + } + goto tr62 +tr567: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st61 + st61: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof61 + } + st_case_61: +//line plugins/parsers/influx/machine.go:9675 + switch ( m.data)[( m.p)] { + case 10: + goto tr130 + case 11: + goto tr66 + case 13: + goto tr130 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st353 + } + case ( m.data)[( m.p)] >= 9: + goto tr58 + } + goto st17 +tr568: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st353 + st353: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof353 + } + st_case_353: +//line plugins/parsers/influx/machine.go:9712 + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st355 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 +tr573: + ( m.cs) = 354 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr570: + ( m.cs) = 354 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st354: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof354 + } + st_case_354: +//line plugins/parsers/influx/machine.go:9783 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr573 + case 13: + goto st32 + case 32: + goto tr572 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto tr64 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr572 + } + goto tr62 + st355: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof355 + } + st_case_355: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st356 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st356: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof356 + } + st_case_356: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st357 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st357: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof357 + } + st_case_357: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st358 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st358: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof358 + } + st_case_358: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st359 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st359: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof359 + } + st_case_359: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st360 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st360: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof360 + } + st_case_360: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st361 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st361: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof361 + } + st_case_361: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st362 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st362: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof362 + } + st_case_362: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st363 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st363: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof363 + } + st_case_363: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st364 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st364: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof364 + } + st_case_364: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st365 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st365: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof365 + } + st_case_365: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st366 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st366: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof366 + } + st_case_366: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st367 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st367: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof367 + } + st_case_367: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st368 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st368: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof368 + } + st_case_368: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st369 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st369: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof369 + } + st_case_369: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st370 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st370: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof370 + } + st_case_370: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st371 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st371: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof371 + } + st_case_371: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st372 + } + case ( m.data)[( m.p)] >= 9: + goto tr569 + } + goto st17 + st372: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof372 + } + st_case_372: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr570 + case 13: + goto tr470 + case 32: + goto tr569 + case 44: + goto tr60 + case 61: + goto tr12 + case 92: + goto st19 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr569 + } + goto st17 +tr151: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st62 + st62: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof62 + } + st_case_62: +//line plugins/parsers/influx/machine.go:10350 + switch ( m.data)[( m.p)] { + case 34: + goto st47 + case 92: + goto st63 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st15 + st63: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof63 + } + st_case_63: +//line plugins/parsers/influx/machine.go:10374 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 92: + goto st62 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 +tr154: + ( m.cs) = 64 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr148: + ( m.cs) = 64 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again st64: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof64 } st_case_64: -//line plugins/parsers/influx/machine.go:7394 +//line plugins/parsers/influx/machine.go:10432 switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr201 + case 13: + goto st6 + case 32: + goto tr153 case 34: - goto st50 + goto tr202 + case 44: + goto tr156 + case 61: + goto st6 case 92: - goto st50 + goto tr203 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 } - goto st4 -tr104: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr200 +tr200: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -7417,3864 +10462,831 @@ tr104: goto _test_eof65 } st_case_65: -//line plugins/parsers/influx/machine.go:7421 +//line plugins/parsers/influx/machine.go:10466 switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr28 + case 11: + goto tr205 + case 13: + goto st6 + case 32: + goto tr153 case 34: - goto tr29 - case 46: - goto st66 - case 48: - goto st305 + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 case 92: - goto st11 + goto st67 } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st308 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 } - goto st7 -tr105: -//line plugins/parsers/influx/machine.go.rl:18 + goto st65 +tr205: + ( m.cs) = 66 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr201: + ( m.cs) = 66 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st66 + goto _again st66: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof66 } st_case_66: -//line plugins/parsers/influx/machine.go:7454 +//line plugins/parsers/influx/machine.go:10524 switch ( m.data)[( m.p)] { case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st303 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 - st303: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof303 - } - st_case_303: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr469 - case 69: - goto st67 - case 92: - goto st11 - case 101: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st303 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st67: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof67 - } - st_case_67: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr137 - case 43: - goto st68 - case 45: - goto st68 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st304 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 - st68: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof68 - } - st_case_68: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st304 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 - st304: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof304 - } - st_case_304: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr469 - case 92: - goto st11 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st304 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st305: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof305 - } - st_case_305: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr469 - case 46: - goto st303 - case 69: - goto st67 - case 92: - goto st11 - case 101: - goto st67 - case 105: - goto st307 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st306 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st306: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof306 - } - st_case_306: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr469 - case 46: - goto st303 - case 69: - goto st67 - case 92: - goto st11 - case 101: - goto st67 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st306 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st307: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof307 - } - st_case_307: - switch ( m.data)[( m.p)] { - case 10: - goto tr389 - case 12: - goto tr388 - case 13: - goto tr389 - case 32: - goto tr457 - case 34: - goto tr29 - case 44: - goto tr473 - case 92: - goto st11 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr457 - } - goto st7 - st308: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof308 - } - st_case_308: - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr469 - case 46: - goto st303 - case 69: - goto st67 - case 92: - goto st11 - case 101: - goto st67 - case 105: - goto st307 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st308 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 -tr106: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st309 - st309: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof309 - } - st_case_309: -//line plugins/parsers/influx/machine.go:7732 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr469 - case 46: - goto st303 - case 69: - goto st67 - case 92: - goto st11 - case 101: - goto st67 - case 105: - goto st307 - case 117: - goto st310 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st306 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 - st310: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof310 - } - st_case_310: - switch ( m.data)[( m.p)] { - case 10: - goto tr393 - case 12: - goto tr392 - case 13: - goto tr393 - case 32: - goto tr460 - case 34: - goto tr29 - case 44: - goto tr475 - case 92: - goto st11 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr460 - } - goto st7 -tr107: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st311 - st311: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof311 - } - st_case_311: -//line plugins/parsers/influx/machine.go:7804 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 12: - goto tr382 - case 13: - goto tr383 - case 32: - goto tr452 - case 34: - goto tr29 - case 44: - goto tr469 - case 46: - goto st303 - case 69: - goto st67 - case 92: - goto st11 - case 101: - goto st67 - case 105: - goto st307 - case 117: - goto st310 - } - switch { - case ( m.data)[( m.p)] > 11: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st311 - } - case ( m.data)[( m.p)] >= 9: - goto tr452 - } - goto st7 -tr108: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st312 - st312: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof312 - } - st_case_312: -//line plugins/parsers/influx/machine.go:7851 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 12: - goto tr396 - case 13: - goto tr397 - case 32: - goto tr463 - case 34: - goto tr29 - case 44: - goto tr477 - case 65: - goto st69 - case 92: - goto st11 - case 97: - goto st72 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 - } - goto st7 - st69: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof69 - } - st_case_69: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 76: - goto st70 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st70: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof70 - } - st_case_70: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 83: - goto st71 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st71: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof71 - } - st_case_71: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 69: - goto st313 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st313: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof313 - } - st_case_313: - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 12: - goto tr396 - case 13: - goto tr397 - case 32: - goto tr463 - case 34: - goto tr29 - case 44: - goto tr477 - case 92: - goto st11 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 - } - goto st7 - st72: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof72 - } - st_case_72: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 108: - goto st73 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st73: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof73 - } - st_case_73: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 115: - goto st74 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st74: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof74 - } - st_case_74: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 101: - goto st313 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 -tr109: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st314 - st314: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof314 - } - st_case_314: -//line plugins/parsers/influx/machine.go:8026 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 12: - goto tr396 - case 13: - goto tr397 - case 32: - goto tr463 - case 34: - goto tr29 - case 44: - goto tr477 - case 82: - goto st75 - case 92: - goto st11 - case 114: - goto st76 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 - } - goto st7 - st75: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof75 - } - st_case_75: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 85: - goto st71 - case 92: - goto st11 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 - st76: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof76 - } - st_case_76: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr29 - case 92: - goto st11 - case 117: - goto st74 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st7 -tr110: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st315 - st315: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof315 - } - st_case_315: -//line plugins/parsers/influx/machine.go:8100 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 12: - goto tr396 - case 13: - goto tr397 - case 32: - goto tr463 - case 34: - goto tr29 - case 44: - goto tr477 - case 92: - goto st11 - case 97: - goto st72 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 - } - goto st7 -tr111: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st316 - st316: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof316 - } - st_case_316: -//line plugins/parsers/influx/machine.go:8134 - switch ( m.data)[( m.p)] { - case 10: - goto tr397 - case 12: - goto tr396 - case 13: - goto tr397 - case 32: - goto tr463 - case 34: - goto tr29 - case 44: - goto tr477 - case 92: - goto st11 - case 114: - goto st76 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 11 { - goto tr463 - } - goto st7 -tr115: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st77 - st77: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof77 - } - st_case_77: -//line plugins/parsers/influx/machine.go:8168 - switch ( m.data)[( m.p)] { - case 34: - goto st47 - case 92: - goto st47 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st4 -tr96: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st78 - st78: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof78 - } - st_case_78: -//line plugins/parsers/influx/machine.go:8195 - switch ( m.data)[( m.p)] { - case 34: - goto st42 - case 92: - goto st42 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st4 -tr94: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st79 - st79: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof79 - } - st_case_79: -//line plugins/parsers/influx/machine.go:8222 - switch ( m.data)[( m.p)] { - case 9: - goto st41 + goto tr28 case 11: - goto tr94 - case 12: - goto st3 + goto tr201 + case 13: + goto st6 case 32: - goto st41 + goto tr153 case 34: - goto tr95 + goto tr202 case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto tr96 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr92 -tr88: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st80 -tr82: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st80 - st80: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof80 - } - st_case_80: -//line plugins/parsers/influx/machine.go:8266 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr157 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr158 - case 44: - goto tr90 - case 61: - goto st40 - case 92: - goto tr159 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr156 -tr156: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st81 - st81: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof81 - } - st_case_81: -//line plugins/parsers/influx/machine.go:8300 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr161 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr162 - case 44: - goto tr90 + goto tr156 case 61: goto tr163 case 92: - goto st132 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st81 -tr161: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st82 -tr157: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st82 - st82: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof82 - } - st_case_82: -//line plugins/parsers/influx/machine.go:8344 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr157 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr158 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto tr159 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr156 -tr158: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st317 -tr162: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st317 - st317: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof317 - } - st_case_317: -//line plugins/parsers/influx/machine.go:8388 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr483 - case 13: - goto tr357 - case 32: - goto tr482 - case 44: - goto tr484 - case 61: - goto tr49 - case 92: - goto st84 + goto tr203 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr482 + goto tr153 } - goto st26 -tr482: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st318 -tr514: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st318 -tr566: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st318 -tr572: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st318 -tr576: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st318 -tr580: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st318 -tr791: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st318 -tr800: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st318 -tr805: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st318 -tr810: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st318 - st318: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof318 - } - st_case_318: -//line plugins/parsers/influx/machine.go:8506 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr486 - case 13: - goto tr357 - case 32: - goto st318 - case 44: - goto tr101 - case 45: - goto tr404 - case 61: - goto tr101 - case 92: - goto tr12 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr405 - } - case ( m.data)[( m.p)] >= 9: - goto st318 - } - goto tr9 -tr486: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr200 +tr202: + ( m.cs) = 373 +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st319 - st319: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof319 - } - st_case_319: -//line plugins/parsers/influx/machine.go:8545 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr486 - case 13: - goto tr357 - case 32: - goto st318 - case 44: - goto tr101 - case 45: - goto tr404 - case 61: - goto tr14 - case 92: - goto tr12 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr405 - } - case ( m.data)[( m.p)] >= 9: - goto st318 - } - goto tr9 -tr483: -//line plugins/parsers/influx/machine.go.rl:72 +//line plugins/parsers/influx/machine.go.rl:148 - m.handler.SetMeasurement(m.text()) + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- - goto st320 -tr487: -//line plugins/parsers/influx/machine.go.rl:72 + ( m.cs) = 257; + {( m.p)++; goto _out } + } - m.handler.SetMeasurement(m.text()) + goto _again +tr206: + ( m.cs) = 373 +//line plugins/parsers/influx/machine.go.rl:148 -//line plugins/parsers/influx/machine.go.rl:18 + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- - m.pb = m.p + ( m.cs) = 257; + {( m.p)++; goto _out } + } - goto st320 - st320: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof320 - } - st_case_320: -//line plugins/parsers/influx/machine.go:8594 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr487 - case 13: - goto tr357 - case 32: - goto tr482 - case 44: - goto tr7 - case 45: - goto tr488 - case 61: - goto tr49 - case 92: - goto tr46 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr489 - } - case ( m.data)[( m.p)] >= 9: - goto tr482 - } - goto tr44 -tr488: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st83 - st83: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof83 - } - st_case_83: -//line plugins/parsers/influx/machine.go:8633 - switch ( m.data)[( m.p)] { - case 10: - goto tr101 - case 11: - goto tr48 - case 13: - goto tr101 - case 32: - goto tr4 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st321 - } - case ( m.data)[( m.p)] >= 9: - goto tr4 - } - goto st26 -tr489: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st321 - st321: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof321 - } - st_case_321: -//line plugins/parsers/influx/machine.go:8670 - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st325 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 -tr495: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st322 -tr523: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st322 -tr490: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st322 -tr520: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st322 - st322: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof322 - } - st_case_322: -//line plugins/parsers/influx/machine.go:8733 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr494 - case 13: - goto tr357 - case 32: - goto st322 - case 44: - goto tr5 - case 61: - goto tr5 - case 92: - goto tr12 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st322 - } - goto tr9 -tr494: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st323 - st323: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof323 - } - st_case_323: -//line plugins/parsers/influx/machine.go:8765 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr494 - case 13: - goto tr357 - case 32: - goto st322 - case 44: - goto tr5 - case 61: - goto tr14 - case 92: - goto tr12 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st322 - } - goto tr9 -tr496: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st324 -tr491: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st324 - st324: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof324 - } - st_case_324: -//line plugins/parsers/influx/machine.go:8811 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr496 - case 13: - goto tr357 - case 32: - goto tr495 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto tr46 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr495 - } - goto tr44 -tr46: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st84 - st84: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof84 - } - st_case_84: -//line plugins/parsers/influx/machine.go:8843 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st26 - st325: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof325 - } - st_case_325: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st326 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st326: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof326 - } - st_case_326: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st327 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st327: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof327 - } - st_case_327: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st328 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st328: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof328 - } - st_case_328: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st329 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st329: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof329 - } - st_case_329: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st330 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st330: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof330 - } - st_case_330: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st331 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st331: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof331 - } - st_case_331: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st332 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st332: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof332 - } - st_case_332: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st333 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st333: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof333 - } - st_case_333: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st334 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st334: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof334 - } - st_case_334: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st335 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st335: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof335 - } - st_case_335: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st336 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st336: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof336 - } - st_case_336: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st337 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st337: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof337 - } - st_case_337: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st338 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st338: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof338 - } - st_case_338: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st339 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st339: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof339 - } - st_case_339: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st340 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st340: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof340 - } - st_case_340: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st341 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st341: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof341 - } - st_case_341: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st342 - } - case ( m.data)[( m.p)] >= 9: - goto tr490 - } - goto st26 - st342: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof342 - } - st_case_342: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr491 - case 13: - goto tr362 - case 32: - goto tr490 - case 44: - goto tr7 - case 61: - goto tr49 - case 92: - goto st84 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr490 - } - goto st26 -tr484: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st85 -tr516: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st85 -tr568: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st85 -tr574: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st85 -tr578: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st85 -tr582: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st85 -tr795: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st85 -tr820: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st85 -tr823: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st85 -tr826: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st85 - st85: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof85 - } - st_case_85: -//line plugins/parsers/influx/machine.go:9485 - switch ( m.data)[( m.p)] { - case 32: - goto tr61 - case 44: - goto tr61 - case 61: - goto tr61 - case 92: - goto tr167 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto tr166 -tr166: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st86 - st86: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof86 - } - st_case_86: -//line plugins/parsers/influx/machine.go:9516 - switch ( m.data)[( m.p)] { - case 32: - goto tr61 - case 44: - goto tr61 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st86 -tr169: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st87 - st87: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof87 - } - st_case_87: -//line plugins/parsers/influx/machine.go:9551 - switch ( m.data)[( m.p)] { - case 32: - goto tr61 - case 34: - goto tr171 - case 44: - goto tr61 - case 45: - goto tr172 - case 46: - goto tr173 - case 48: - goto tr174 - case 61: - goto tr61 - case 70: - goto tr176 - case 84: - goto tr177 - case 92: - goto tr58 - case 102: - goto tr178 - case 116: - goto tr179 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr61 - } - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr175 - } - default: - goto tr61 - } - goto tr57 -tr171: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st88 - st88: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof88 - } - st_case_88: -//line plugins/parsers/influx/machine.go:9602 - switch ( m.data)[( m.p)] { - case 9: - goto tr181 - case 11: - goto tr182 - case 12: - goto tr60 - case 32: - goto tr181 - case 34: - goto tr183 - case 44: - goto tr184 - case 61: - goto tr25 - case 92: - goto tr185 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr180 -tr180: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st89 - st89: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof89 - } - st_case_89: -//line plugins/parsers/influx/machine.go:9636 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 -tr187: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st90 -tr181: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st90 - st90: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof90 - } - st_case_90: -//line plugins/parsers/influx/machine.go:9680 - switch ( m.data)[( m.p)] { - case 9: - goto st90 - case 11: - goto tr194 - case 12: - goto st3 - case 32: - goto st90 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr195 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr192 -tr192: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st91 - st91: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof91 - } - st_case_91: -//line plugins/parsers/influx/machine.go:9714 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr5 - case 32: - goto st7 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st91 -tr197: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st92 - st92: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof92 - } - st_case_92: -//line plugins/parsers/influx/machine.go:9746 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 34: - goto tr103 - case 45: - goto tr125 - case 46: - goto tr126 - case 48: - goto tr127 - case 70: - goto tr129 - case 84: - goto tr130 - case 92: - goto st11 - case 102: - goto tr131 - case 116: - goto tr132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr128 - } - case ( m.data)[( m.p)] >= 12: - goto tr5 - } - goto st7 -tr195: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st93 - st93: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof93 - } - st_case_93: -//line plugins/parsers/influx/machine.go:9789 - switch ( m.data)[( m.p)] { - case 34: - goto st91 - case 92: - goto st91 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st4 -tr194: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st94 - st94: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof94 - } - st_case_94: -//line plugins/parsers/influx/machine.go:9816 - switch ( m.data)[( m.p)] { - case 9: - goto st90 - case 11: - goto tr194 - case 12: - goto st3 - case 32: - goto st90 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto tr195 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto tr192 -tr188: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st95 -tr182: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st95 - st95: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof95 - } - st_case_95: -//line plugins/parsers/influx/machine.go:9860 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr200 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr201 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto tr202 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr199 -tr199: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st96 - st96: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof96 - } - st_case_96: -//line plugins/parsers/influx/machine.go:9894 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr204 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr205 - case 44: - goto tr190 - case 61: - goto tr197 - case 92: - goto st105 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st96 -tr204: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st97 -tr200: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st97 - st97: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof97 - } - st_case_97: -//line plugins/parsers/influx/machine.go:9938 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr200 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr201 - case 44: - goto tr190 - case 61: - goto tr197 - case 92: - goto tr202 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr199 -tr201: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st343 -tr205: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st343 - st343: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof343 - } - st_case_343: -//line plugins/parsers/influx/machine.go:9982 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr515 - case 13: - goto tr357 - case 32: - goto tr514 - case 44: - goto tr516 - case 61: - goto tr14 - case 92: - goto st35 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr514 - } - goto st33 -tr515: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st344 -tr517: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st344 - st344: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof344 - } - st_case_344: -//line plugins/parsers/influx/machine.go:10024 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr517 - case 13: - goto tr357 - case 32: - goto tr514 - case 44: - goto tr63 - case 45: - goto tr518 - case 61: - goto tr14 - case 92: - goto tr67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr519 - } - case ( m.data)[( m.p)] >= 9: - goto tr514 - } - goto tr65 -tr518: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st98 - st98: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof98 - } - st_case_98: -//line plugins/parsers/influx/machine.go:10063 - switch ( m.data)[( m.p)] { - case 10: - goto tr207 - case 11: - goto tr69 - case 13: - goto tr207 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st345 - } - case ( m.data)[( m.p)] >= 9: - goto tr60 - } - goto st33 -tr519: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st345 - st345: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof345 - } - st_case_345: -//line plugins/parsers/influx/machine.go:10100 - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st347 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 -tr524: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st346 -tr521: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st346 - st346: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof346 - } - st_case_346: -//line plugins/parsers/influx/machine.go:10151 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr524 - case 13: - goto tr357 - case 32: - goto tr523 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto tr67 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr523 - } - goto tr65 - st347: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof347 - } - st_case_347: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st348 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st348: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof348 - } - st_case_348: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st349 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st349: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof349 - } - st_case_349: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st350 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st350: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof350 - } - st_case_350: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st351 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st351: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof351 - } - st_case_351: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st352 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st352: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof352 - } - st_case_352: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st353 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st353: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof353 - } - st_case_353: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st354 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st354: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof354 - } - st_case_354: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st355 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st355: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof355 - } - st_case_355: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st356 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st356: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof356 - } - st_case_356: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st357 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st357: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof357 - } - st_case_357: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st358 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st358: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof358 - } - st_case_358: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st359 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st359: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof359 - } - st_case_359: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st360 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st360: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof360 - } - st_case_360: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st361 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st361: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof361 - } - st_case_361: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st362 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st362: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof362 - } - st_case_362: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st363 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st363: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof363 - } - st_case_363: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st364 - } - case ( m.data)[( m.p)] >= 9: - goto tr520 - } - goto st33 - st364: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof364 - } - st_case_364: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr521 - case 13: - goto tr362 - case 32: - goto tr520 - case 44: - goto tr63 - case 61: - goto tr14 - case 92: - goto st35 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr520 - } - goto st33 -tr190: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st99 -tr184: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st99 - st99: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof99 - } - st_case_99: -//line plugins/parsers/influx/machine.go:10728 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr210 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr211 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr209 -tr209: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st100 - st100: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof100 - } - st_case_100: -//line plugins/parsers/influx/machine.go:10760 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr213 - case 44: - goto st7 - case 61: - goto tr214 - case 92: - goto st104 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st100 -tr210: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st365 -tr213: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st365 - st365: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof365 - } - st_case_365: -//line plugins/parsers/influx/machine.go:10802 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st366 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto st9 - case 61: - goto tr55 - case 92: - goto st37 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st207 - } - goto st29 - st366: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof366 - } - st_case_366: - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st366 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto tr216 - case 45: - goto tr543 - case 61: - goto tr55 - case 92: - goto st37 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr544 - } - case ( m.data)[( m.p)] >= 9: - goto st207 - } - goto st29 -tr543: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st101 - st101: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof101 - } - st_case_101: -//line plugins/parsers/influx/machine.go:10866 - switch ( m.data)[( m.p)] { - case 32: - goto tr216 - case 44: - goto tr216 - case 61: - goto tr55 - case 92: - goto st37 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr216 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st367 - } - default: - goto tr216 - } - goto st29 -tr544: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st367 - st367: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof367 - } - st_case_367: -//line plugins/parsers/influx/machine.go:10901 - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr545 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr216 - case 61: - goto tr55 - case 92: - goto st37 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st369 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st29 -tr545: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st368 - st368: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof368 - } - st_case_368: -//line plugins/parsers/influx/machine.go:10938 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st368 - case 13: - goto tr357 - case 32: - goto st210 - case 44: - goto tr52 - case 61: - goto tr55 - case 92: - goto st37 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st210 - } - goto st29 - st369: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof369 - } - st_case_369: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr545 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr216 - case 61: - goto tr55 - case 92: - goto st37 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st370 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st29 - st370: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof370 - } - st_case_370: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr545 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr216 - case 61: - goto tr55 - case 92: - goto st37 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st371 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st29 - st371: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof371 - } - st_case_371: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr545 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr216 - case 61: - goto tr55 - case 92: - goto st37 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st372 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st29 - st372: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof372 - } - st_case_372: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr545 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr216 - case 61: - goto tr55 - case 92: - goto st37 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st373 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st29 + goto _again st373: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof373 } st_case_373: +//line plugins/parsers/influx/machine.go:10582 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr101 case 11: - goto tr545 + goto tr591 case 13: - goto tr362 + goto st32 case 32: - goto tr361 + goto tr563 case 44: - goto tr216 + goto tr565 case 61: - goto tr55 + goto tr12 case 92: - goto st37 + goto st19 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr563 + } + goto st17 +tr203: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st67 + st67: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof67 + } + st_case_67: +//line plugins/parsers/influx/machine.go:10614 + switch ( m.data)[( m.p)] { + case 34: + goto st65 + case 92: + goto st68 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st17 + st68: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof68 + } + st_case_68: +//line plugins/parsers/influx/machine.go:10638 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr205 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st65 +tr191: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st69 + st69: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof69 + } + st_case_69: +//line plugins/parsers/influx/machine.go:10672 + switch ( m.data)[( m.p)] { + case 34: + goto st58 + case 92: + goto st70 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st13 + st70: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof70 + } + st_case_70: +//line plugins/parsers/influx/machine.go:10696 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr193 + case 44: + goto st6 + case 61: + goto tr194 + case 92: + goto st69 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st58 +tr187: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st71 +tr344: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st71 + st71: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof71 + } + st_case_71: +//line plugins/parsers/influx/machine.go:10738 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr210 + case 44: + goto tr180 + case 45: + goto tr211 + case 46: + goto tr212 + case 48: + goto tr213 + case 70: + goto tr215 + case 84: + goto tr216 + case 92: + goto st155 + case 102: + goto tr217 + case 116: + goto tr218 } switch { case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st374 + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr214 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr178 } - goto st29 + goto st53 +tr210: + ( m.cs) = 374 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st374: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof374 } st_case_374: +//line plugins/parsers/influx/machine.go:10796 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr492 case 11: - goto tr545 + goto tr593 case 13: - goto tr362 + goto tr493 case 32: - goto tr361 + goto tr592 + case 34: + goto tr83 case 44: - goto tr216 - case 61: - goto tr55 + goto tr594 case 92: - goto st37 + goto tr85 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st375 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr592 } - goto st29 + goto tr80 +tr623: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr592: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr762: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr635: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr757: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr790: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr796: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr802: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr816: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr821: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr826: + ( m.cs) = 375 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st375: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof375 } st_case_375: +//line plugins/parsers/influx/machine.go:11049 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr219 case 11: - goto tr545 + goto tr596 case 13: - goto tr362 + goto st72 case 32: - goto tr361 + goto st375 + case 34: + goto tr95 case 44: - goto tr216 + goto st6 + case 45: + goto tr597 case 61: - goto tr55 + goto st6 case 92: - goto st37 + goto tr96 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st376 + goto tr598 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto st375 } - goto st29 + goto tr92 +tr596: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st376 st376: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof376 } st_case_376: +//line plugins/parsers/influx/machine.go:11090 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr219 case 11: - goto tr545 + goto tr596 case 13: - goto tr362 + goto st72 case 32: - goto tr361 + goto st375 + case 34: + goto tr95 case 44: - goto tr216 + goto st6 + case 45: + goto tr597 case 61: - goto tr55 + goto tr99 case 92: - goto st37 + goto tr96 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st377 + goto tr598 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto st375 } - goto st29 + goto tr92 +tr493: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st72 +tr602: + ( m.cs) = 72 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr638: + ( m.cs) = 72 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr793: + ( m.cs) = 72 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr799: + ( m.cs) = 72 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr805: + ( m.cs) = 72 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st72: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof72 + } + st_case_72: +//line plugins/parsers/influx/machine.go:11196 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 34: + goto tr29 + case 92: + goto st73 + } + goto st6 +tr26: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st73 + st73: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof73 + } + st_case_73: +//line plugins/parsers/influx/machine.go:11217 + switch ( m.data)[( m.p)] { + case 34: + goto st6 + case 92: + goto st6 + } + goto tr8 +tr597: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st74 + st74: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof74 + } + st_case_74: +//line plugins/parsers/influx/machine.go:11236 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr99 + case 92: + goto st75 + } + switch { + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st377 + } + case ( m.data)[( m.p)] >= 12: + goto st6 + } + goto st31 +tr598: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st377 st377: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof377 } st_case_377: +//line plugins/parsers/influx/machine.go:11273 switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr600 case 11: - goto tr545 + goto tr601 case 13: - goto tr362 + goto tr602 case 32: - goto tr361 + goto tr599 + case 34: + goto tr98 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr99 case 92: - goto st37 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st378 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st29 - st378: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof378 - } - st_case_378: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr545 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr216 - case 61: - goto tr55 - case 92: - goto st37 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st379 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st29 - st379: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof379 - } - st_case_379: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr545 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr216 - case 61: - goto tr55 - case 92: - goto st37 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -11282,9 +11294,112 @@ tr545: goto st380 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr599 } - goto st29 + goto st31 +tr599: + ( m.cs) = 378 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st378: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof378 + } + st_case_378: +//line plugins/parsers/influx/machine.go:11319 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 13: + goto st72 + case 32: + goto st378 + case 34: + goto tr29 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st378 + } + goto st6 +tr601: + ( m.cs) = 379 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st379: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof379 + } + st_case_379: +//line plugins/parsers/influx/machine.go:11354 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto st379 + case 13: + goto st72 + case 32: + goto st378 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr99 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st378 + } + goto st31 +tr96: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st75 + st75: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof75 + } + st_case_75: +//line plugins/parsers/influx/machine.go:11388 + switch ( m.data)[( m.p)] { + case 34: + goto st31 + case 92: + goto st31 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st3 st380: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof380 @@ -11292,19 +11407,21 @@ tr545: st_case_380: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr600 case 11: - goto tr545 + goto tr601 case 13: - goto tr362 + goto tr602 case 32: - goto tr361 + goto tr599 + case 34: + goto tr98 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr99 case 92: - goto st37 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -11312,9 +11429,9 @@ tr545: goto st381 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr599 } - goto st29 + goto st31 st381: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof381 @@ -11322,19 +11439,21 @@ tr545: st_case_381: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr600 case 11: - goto tr545 + goto tr601 case 13: - goto tr362 + goto tr602 case 32: - goto tr361 + goto tr599 + case 34: + goto tr98 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr99 case 92: - goto st37 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -11342,9 +11461,9 @@ tr545: goto st382 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr599 } - goto st29 + goto st31 st382: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof382 @@ -11352,19 +11471,21 @@ tr545: st_case_382: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr600 case 11: - goto tr545 + goto tr601 case 13: - goto tr362 + goto tr602 case 32: - goto tr361 + goto tr599 + case 34: + goto tr98 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr99 case 92: - goto st37 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -11372,9 +11493,9 @@ tr545: goto st383 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr599 } - goto st29 + goto st31 st383: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof383 @@ -11382,19 +11503,21 @@ tr545: st_case_383: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr600 case 11: - goto tr545 + goto tr601 case 13: - goto tr362 + goto tr602 case 32: - goto tr361 + goto tr599 + case 34: + goto tr98 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr99 case 92: - goto st37 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -11402,9 +11525,9 @@ tr545: goto st384 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr599 } - goto st29 + goto st31 st384: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof384 @@ -11412,19 +11535,21 @@ tr545: st_case_384: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr600 case 11: - goto tr545 + goto tr601 case 13: - goto tr362 + goto tr602 case 32: - goto tr361 + goto tr599 + case 34: + goto tr98 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr99 case 92: - goto st37 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -11432,9 +11557,9 @@ tr545: goto st385 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr599 } - goto st29 + goto st31 st385: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof385 @@ -11442,19 +11567,21 @@ tr545: st_case_385: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr600 case 11: - goto tr545 + goto tr601 case 13: - goto tr362 + goto tr602 case 32: - goto tr361 + goto tr599 + case 34: + goto tr98 case 44: - goto tr216 + goto st6 case 61: - goto tr55 + goto tr99 case 92: - goto st37 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -11462,9 +11589,9 @@ tr545: goto st386 } case ( m.data)[( m.p)] >= 9: - goto tr361 + goto tr599 } - goto st29 + goto st31 st386: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof386 @@ -11472,326 +11599,85 @@ tr545: st_case_386: switch ( m.data)[( m.p)] { case 10: - goto tr362 + goto tr600 case 11: - goto tr545 + goto tr601 case 13: - goto tr362 + goto tr602 case 32: - goto tr361 - case 44: - goto tr216 - case 61: - goto tr55 - case 92: - goto st37 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr361 - } - goto st29 -tr214: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - - goto st102 - st102: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof102 - } - st_case_102: -//line plugins/parsers/influx/machine.go:11505 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 + goto tr599 case 34: - goto tr183 + goto tr98 case 44: - goto st7 + goto st6 case 61: - goto st7 + goto tr99 case 92: - goto tr185 + goto st75 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st387 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } - goto tr180 -tr183: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st387 -tr189: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st387 + goto st31 st387: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof387 } st_case_387: -//line plugins/parsers/influx/machine.go:11547 switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto tr600 case 11: - goto tr565 + goto tr601 case 13: - goto tr357 + goto tr602 case 32: - goto tr514 + goto tr599 + case 34: + goto tr98 case 44: - goto tr516 + goto st6 case 61: - goto tr207 + goto tr99 case 92: - goto st36 + goto st75 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr514 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st388 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } goto st31 -tr565: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st388 -tr567: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st388 -tr573: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st388 -tr577: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st388 -tr581: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st388 st388: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof388 } st_case_388: -//line plugins/parsers/influx/machine.go:11619 switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto tr600 case 11: - goto tr517 + goto tr601 case 13: - goto tr357 + goto tr602 case 32: - goto tr514 - case 44: - goto tr63 - case 45: - goto tr518 - case 61: - goto tr207 - case 92: - goto tr67 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr519 - } - case ( m.data)[( m.p)] >= 9: - goto tr514 - } - goto tr65 -tr185: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st103 - st103: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof103 - } - st_case_103: -//line plugins/parsers/influx/machine.go:11658 - switch ( m.data)[( m.p)] { + goto tr599 case 34: - goto st89 - case 92: - goto st89 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st31 -tr211: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st104 - st104: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof104 - } - st_case_104: -//line plugins/parsers/influx/machine.go:11685 - switch ( m.data)[( m.p)] { - case 34: - goto st100 - case 92: - goto st100 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st29 -tr202: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st105 - st105: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof105 - } - st_case_105: -//line plugins/parsers/influx/machine.go:11712 - switch ( m.data)[( m.p)] { - case 34: - goto st96 - case 92: - goto st96 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st33 -tr172: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st106 - st106: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof106 - } - st_case_106: -//line plugins/parsers/influx/machine.go:11739 - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 + goto tr98 case 44: - goto tr63 - case 46: - goto st107 - case 48: - goto st391 + goto st6 case 61: - goto tr61 + goto tr99 case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st394 - } - case ( m.data)[( m.p)] >= 9: - goto tr60 - } - goto st31 -tr173: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st107 - st107: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof107 - } - st_case_107: -//line plugins/parsers/influx/machine.go:11780 - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 92: - goto st36 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -11799,7 +11685,7 @@ tr173: goto st389 } case ( m.data)[( m.p)] >= 9: - goto tr60 + goto tr599 } goto st31 st389: @@ -11809,89 +11695,21 @@ tr173: st_case_389: switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr600 case 11: - goto tr567 + goto tr601 case 13: - goto tr383 + goto tr602 case 32: - goto tr566 - case 44: - goto tr568 - case 61: - goto tr207 - case 69: - goto st108 - case 92: - goto st36 - case 101: - goto st108 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st389 - } - case ( m.data)[( m.p)] >= 9: - goto tr566 - } - goto st31 - st108: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof108 - } - st_case_108: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 + goto tr599 case 34: - goto st109 + goto tr98 case 44: - goto tr63 + goto st6 case 61: - goto tr61 + goto tr99 case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 - } - default: - goto st109 - } - goto st31 - st109: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof109 - } - st_case_109: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 92: - goto st36 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -11899,7 +11717,7 @@ tr173: goto st390 } case ( m.data)[( m.p)] >= 9: - goto tr60 + goto tr599 } goto st31 st390: @@ -11909,27 +11727,29 @@ tr173: st_case_390: switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr600 case 11: - goto tr567 + goto tr601 case 13: - goto tr383 + goto tr602 case 32: - goto tr566 + goto tr599 + case 34: + goto tr98 case 44: - goto tr568 + goto st6 case 61: - goto tr207 + goto tr99 case 92: - goto st36 + goto st75 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 + goto st391 } case ( m.data)[( m.p)] >= 9: - goto tr566 + goto tr599 } goto st31 st391: @@ -11939,27 +11759,21 @@ tr173: st_case_391: switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr600 case 11: - goto tr567 + goto tr601 case 13: - goto tr383 + goto tr602 case 32: - goto tr566 + goto tr599 + case 34: + goto tr98 case 44: - goto tr568 - case 46: - goto st389 + goto st6 case 61: - goto tr207 - case 69: - goto st108 + goto tr99 case 92: - goto st36 - case 101: - goto st108 - case 105: - goto st393 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -11967,7 +11781,7 @@ tr173: goto st392 } case ( m.data)[( m.p)] >= 9: - goto tr566 + goto tr599 } goto st31 st392: @@ -11977,33 +11791,29 @@ tr173: st_case_392: switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr600 case 11: - goto tr567 + goto tr601 case 13: - goto tr383 + goto tr602 case 32: - goto tr566 + goto tr599 + case 34: + goto tr98 case 44: - goto tr568 - case 46: - goto st389 + goto st6 case 61: - goto tr207 - case 69: - goto st108 + goto tr99 case 92: - goto st36 - case 101: - goto st108 + goto st75 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st392 + goto st393 } case ( m.data)[( m.p)] >= 9: - goto tr566 + goto tr599 } goto st31 st393: @@ -12013,22 +11823,29 @@ tr173: st_case_393: switch ( m.data)[( m.p)] { case 10: - goto tr389 + goto tr600 case 11: - goto tr573 + goto tr601 case 13: - goto tr389 + goto tr602 case 32: - goto tr572 + goto tr599 + case 34: + goto tr98 case 44: - goto tr574 + goto st6 case 61: - goto tr207 + goto tr99 case 92: - goto st36 + goto st75 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr572 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st394 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 } goto st31 st394: @@ -12038,82 +11855,61 @@ tr173: st_case_394: switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr600 case 11: - goto tr567 + goto tr601 case 13: - goto tr383 + goto tr602 case 32: - goto tr566 + goto tr599 + case 34: + goto tr98 case 44: - goto tr568 - case 46: - goto st389 + goto st6 case 61: - goto tr207 - case 69: - goto st108 + goto tr99 case 92: - goto st36 - case 101: - goto st108 - case 105: - goto st393 + goto st75 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st394 + goto st395 } case ( m.data)[( m.p)] >= 9: - goto tr566 + goto tr599 } goto st31 -tr174: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st395 st395: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof395 } st_case_395: -//line plugins/parsers/influx/machine.go:12084 switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr600 case 11: - goto tr567 + goto tr601 case 13: - goto tr383 + goto tr602 case 32: - goto tr566 + goto tr599 + case 34: + goto tr98 case 44: - goto tr568 - case 46: - goto st389 + goto st6 case 61: - goto tr207 - case 69: - goto st108 + goto tr99 case 92: - goto st36 - case 101: - goto st108 - case 105: - goto st393 - case 117: - goto st396 + goto st75 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st392 + goto st396 } case ( m.data)[( m.p)] >= 9: - goto tr566 + goto tr599 } goto st31 st396: @@ -12123,61 +11919,21 @@ tr174: st_case_396: switch ( m.data)[( m.p)] { case 10: - goto tr393 + goto tr600 case 11: - goto tr577 + goto tr601 case 13: - goto tr393 + goto tr602 case 32: - goto tr576 + goto tr599 + case 34: + goto tr98 case 44: - goto tr578 + goto st6 case 61: - goto tr207 + goto tr99 case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr576 - } - goto st31 -tr175: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st397 - st397: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof397 - } - st_case_397: -//line plugins/parsers/influx/machine.go:12156 - switch ( m.data)[( m.p)] { - case 10: - goto tr383 - case 11: - goto tr567 - case 13: - goto tr383 - case 32: - goto tr566 - case 44: - goto tr568 - case 46: - goto st389 - case 61: - goto tr207 - case 69: - goto st108 - case 92: - goto st36 - case 101: - goto st108 - case 105: - goto st393 - case 117: - goto st396 + goto st75 } switch { case ( m.data)[( m.p)] > 12: @@ -12185,234 +11941,745 @@ tr175: goto st397 } case ( m.data)[( m.p)] >= 9: - goto tr566 + goto tr599 } goto st31 -tr176: -//line plugins/parsers/influx/machine.go.rl:18 + st397: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof397 + } + st_case_397: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 11: + goto tr601 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr99 + case 92: + goto st75 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr599 + } + goto st31 +tr593: + ( m.cs) = 398 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st398 + goto _again +tr637: + ( m.cs) = 398 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr818: + ( m.cs) = 398 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr823: + ( m.cs) = 398 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr827: + ( m.cs) = 398 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st398: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof398 } st_case_398: -//line plugins/parsers/influx/machine.go:12203 +//line plugins/parsers/influx/machine.go:12089 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto tr219 case 11: - goto tr581 + goto tr624 case 13: - goto tr397 + goto st72 case 32: - goto tr580 + goto tr623 + case 34: + goto tr122 case 44: - goto tr582 + goto tr90 + case 45: + goto tr625 case 61: - goto tr207 - case 65: - goto st110 + goto st29 case 92: - goto st36 - case 97: - goto st113 + goto tr123 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr580 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr626 + } + case ( m.data)[( m.p)] >= 9: + goto tr623 } - goto st31 - st110: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof110 - } - st_case_110: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 76: - goto st111 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 - st111: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof111 - } - st_case_111: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 83: - goto st112 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 - st112: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof112 - } - st_case_112: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 69: - goto st399 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 + goto tr119 +tr624: + ( m.cs) = 399 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again st399: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof399 } st_case_399: +//line plugins/parsers/influx/machine.go:12141 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto tr219 case 11: - goto tr581 + goto tr624 case 13: - goto tr397 + goto st72 case 32: - goto tr580 + goto tr623 + case 34: + goto tr122 case 44: - goto tr582 + goto tr90 + case 45: + goto tr625 case 61: - goto tr207 + goto tr127 case 92: - goto st36 + goto tr123 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr580 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr626 + } + case ( m.data)[( m.p)] >= 9: + goto tr623 } - goto st31 - st113: + goto tr119 +tr90: + ( m.cs) = 76 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr84: + ( m.cs) = 76 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr231: + ( m.cs) = 76 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st76: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof113 + goto _test_eof76 } - st_case_113: + st_case_76: +//line plugins/parsers/influx/machine.go:12219 switch ( m.data)[( m.p)] { + case 9: + goto st6 case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 + goto tr28 case 32: - goto tr60 + goto st6 + case 34: + goto tr190 case 44: - goto tr63 + goto st6 case 61: - goto tr61 + goto st6 case 92: - goto st36 - case 108: - goto st114 + goto tr222 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 } - goto st31 - st114: + goto tr221 +tr221: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st77 + st77: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof114 + goto _test_eof77 } - st_case_114: + st_case_77: +//line plugins/parsers/influx/machine.go:12251 switch ( m.data)[( m.p)] { + case 9: + goto st6 case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 + goto tr28 case 32: - goto tr60 + goto st6 + case 34: + goto tr193 case 44: - goto tr63 + goto st6 case 61: - goto tr61 + goto tr224 case 92: - goto st36 - case 115: - goto st115 + goto st87 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 } - goto st31 - st115: + goto st77 +tr224: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + + goto st78 + st78: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof115 + goto _test_eof78 } - st_case_115: + st_case_78: +//line plugins/parsers/influx/machine.go:12283 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr149 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr227 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr226 +tr226: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st79 + st79: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof79 + } + st_case_79: +//line plugins/parsers/influx/machine.go:12315 switch ( m.data)[( m.p)] { case 10: - goto tr61 + goto tr28 case 11: - goto tr62 + goto tr230 case 13: - goto tr61 + goto st6 case 32: - goto tr60 + goto tr229 + case 34: + goto tr155 case 44: - goto tr63 + goto tr231 case 61: - goto tr61 + goto st6 case 92: - goto st36 - case 101: - goto st399 + goto st85 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 + goto tr229 } - goto st31 -tr177: -//line plugins/parsers/influx/machine.go.rl:18 + goto st79 +tr230: + ( m.cs) = 80 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st80: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof80 + } + st_case_80: +//line plugins/parsers/influx/machine.go:12356 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr234 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr202 + case 44: + goto tr231 + case 61: + goto st6 + case 92: + goto tr235 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto tr233 +tr233: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st81 + st81: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof81 + } + st_case_81: +//line plugins/parsers/influx/machine.go:12390 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr237 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr206 + case 44: + goto tr231 + case 61: + goto tr99 + case 92: + goto st83 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st81 +tr237: + ( m.cs) = 82 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr234: + ( m.cs) = 82 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st82: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof82 + } + st_case_82: +//line plugins/parsers/influx/machine.go:12448 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr234 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr202 + case 44: + goto tr231 + case 61: + goto tr99 + case 92: + goto tr235 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto tr233 +tr235: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st83 + st83: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof83 + } + st_case_83: +//line plugins/parsers/influx/machine.go:12482 + switch ( m.data)[( m.p)] { + case 34: + goto st81 + case 92: + goto st84 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st17 + st84: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof84 + } + st_case_84: +//line plugins/parsers/influx/machine.go:12506 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr237 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr206 + case 44: + goto tr231 + case 61: + goto tr99 + case 92: + goto st83 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st81 +tr227: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st85 + st85: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof85 + } + st_case_85: +//line plugins/parsers/influx/machine.go:12540 + switch ( m.data)[( m.p)] { + case 34: + goto st79 + case 92: + goto st86 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st15 + st86: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof86 + } + st_case_86: +//line plugins/parsers/influx/machine.go:12564 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 92: + goto st85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 +tr222: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st87 + st87: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof87 + } + st_case_87: +//line plugins/parsers/influx/machine.go:12598 + switch ( m.data)[( m.p)] { + case 34: + goto st77 + case 92: + goto st88 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st13 + st88: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof88 + } + st_case_88: +//line plugins/parsers/influx/machine.go:12622 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr193 + case 44: + goto st6 + case 61: + goto tr224 + case 92: + goto st87 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st77 +tr625: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st89 + st89: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof89 + } + st_case_89: +//line plugins/parsers/influx/machine.go:12654 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr125 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr126 + case 44: + goto tr90 + case 61: + goto tr127 + case 92: + goto st92 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st400 + } + case ( m.data)[( m.p)] >= 9: + goto tr87 + } + goto st40 +tr626: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -12422,121 +12689,136 @@ tr177: goto _test_eof400 } st_case_400: -//line plugins/parsers/influx/machine.go:12426 +//line plugins/parsers/influx/machine.go:12693 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto tr600 case 11: - goto tr581 + goto tr628 case 13: - goto tr397 + goto tr602 case 32: - goto tr580 + goto tr627 + case 34: + goto tr126 case 44: - goto tr582 + goto tr90 case 61: - goto tr207 - case 82: - goto st116 + goto tr127 case 92: - goto st36 - case 114: - goto st117 + goto st92 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr580 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st544 + } + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st31 - st116: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof116 - } - st_case_116: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 85: - goto st112 - case 92: - goto st36 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 - st117: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof117 - } - st_case_117: - switch ( m.data)[( m.p)] { - case 10: - goto tr61 - case 11: - goto tr62 - case 13: - goto tr61 - case 32: - goto tr60 - case 44: - goto tr63 - case 61: - goto tr61 - case 92: - goto st36 - case 117: - goto st115 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr60 - } - goto st31 -tr178: -//line plugins/parsers/influx/machine.go.rl:18 + goto st40 +tr632: + ( m.cs) = 401 +//line plugins/parsers/influx/machine.go.rl:86 - m.pb = m.p + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- - goto st401 + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr769: + ( m.cs) = 401 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr627: + ( m.cs) = 401 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr766: + ( m.cs) = 401 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st401: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof401 } st_case_401: -//line plugins/parsers/influx/machine.go:12516 +//line plugins/parsers/influx/machine.go:12798 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto tr219 case 11: - goto tr581 + goto tr631 case 13: - goto tr397 + goto st72 case 32: - goto tr580 + goto st401 + case 34: + goto tr95 case 44: - goto tr582 + goto st6 case 61: - goto tr207 + goto st6 case 92: - goto st36 - case 97: - goto st113 + goto tr96 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr580 + goto st401 } - goto st31 -tr179: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr92 +tr631: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -12546,134 +12828,5213 @@ tr179: goto _test_eof402 } st_case_402: -//line plugins/parsers/influx/machine.go:12550 +//line plugins/parsers/influx/machine.go:12832 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto tr219 case 11: - goto tr581 + goto tr631 case 13: - goto tr397 + goto st72 case 32: - goto tr580 + goto st401 + case 34: + goto tr95 case 44: - goto tr582 + goto st6 case 61: - goto tr207 + goto tr99 case 92: - goto st36 - case 114: - goto st117 + goto tr96 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr580 + goto st401 } - goto st31 -tr167: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr92 +tr633: + ( m.cs) = 403 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st118 + goto _again +tr628: + ( m.cs) = 403 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st403: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof403 + } + st_case_403: +//line plugins/parsers/influx/machine.go:12900 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto tr633 + case 13: + goto st72 + case 32: + goto tr632 + case 34: + goto tr122 + case 44: + goto tr90 + case 61: + goto tr127 + case 92: + goto tr123 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr632 + } + goto tr119 +tr127: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st90 +tr381: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st90 + st90: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof90 + } + st_case_90: +//line plugins/parsers/influx/machine.go:12944 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr210 + case 44: + goto tr90 + case 45: + goto tr243 + case 46: + goto tr244 + case 48: + goto tr245 + case 70: + goto tr247 + case 84: + goto tr248 + case 92: + goto st140 + case 102: + goto tr249 + case 116: + goto tr250 + } + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr246 + } + case ( m.data)[( m.p)] >= 9: + goto tr87 + } + goto st29 +tr88: + ( m.cs) = 91 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr82: + ( m.cs) = 91 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st91: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof91 + } + st_case_91: +//line plugins/parsers/influx/machine.go:13019 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr129 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr122 + case 44: + goto tr90 + case 61: + goto st29 + case 92: + goto tr123 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto tr119 +tr123: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st92 + st92: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof92 + } + st_case_92: +//line plugins/parsers/influx/machine.go:13053 + switch ( m.data)[( m.p)] { + case 34: + goto st40 + case 92: + goto st40 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st10 +tr243: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st93 + st93: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof93 + } + st_case_93: +//line plugins/parsers/influx/machine.go:13080 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 46: + goto st95 + case 48: + goto st532 + case 92: + goto st140 + } + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st535 + } + case ( m.data)[( m.p)] >= 9: + goto tr87 + } + goto st29 +tr83: + ( m.cs) = 404 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr89: + ( m.cs) = 404 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr116: + ( m.cs) = 404 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st404: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof404 + } + st_case_404: +//line plugins/parsers/influx/machine.go:13162 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr634 + case 13: + goto st32 + case 32: + goto tr499 + case 44: + goto tr501 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr499 + } + goto st1 +tr634: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr812: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1006: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1010: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1014: + ( m.cs) = 405 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st405: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof405 + } + st_case_405: +//line plugins/parsers/influx/machine.go:13291 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr504 + case 13: + goto st32 + case 32: + goto tr499 + case 44: + goto tr4 + case 45: + goto tr505 + case 61: + goto st1 + case 92: + goto tr43 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr506 + } + case ( m.data)[( m.p)] >= 9: + goto tr499 + } + goto tr39 +tr35: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st94 +tr458: +//line plugins/parsers/influx/machine.go.rl:82 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st94 + st94: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof94 + } + st_case_94: +//line plugins/parsers/influx/machine.go:13340 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st0 + } + case ( m.data)[( m.p)] >= 9: + goto st0 + } + goto st1 +tr244: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st95 + st95: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof95 + } + st_case_95: +//line plugins/parsers/influx/machine.go:13361 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 92: + goto st140 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st406 + } + case ( m.data)[( m.p)] >= 9: + goto tr87 + } + goto st29 + st406: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof406 + } + st_case_406: + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 11: + goto tr637 + case 13: + goto tr638 + case 32: + goto tr635 + case 34: + goto tr89 + case 44: + goto tr639 + case 69: + goto st138 + case 92: + goto st140 + case 101: + goto st138 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st406 + } + case ( m.data)[( m.p)] >= 9: + goto tr635 + } + goto st29 +tr594: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr639: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr760: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr794: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr800: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr806: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr819: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr824: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr828: + ( m.cs) = 96 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st96: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof96 + } + st_case_96: +//line plugins/parsers/influx/machine.go:13627 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr256 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr257 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr255 +tr255: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st97 + st97: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof97 + } + st_case_97: +//line plugins/parsers/influx/machine.go:13659 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr259 + case 44: + goto st6 + case 61: + goto tr260 + case 92: + goto st136 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st97 +tr256: + ( m.cs) = 407 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr259: + ( m.cs) = 407 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st407: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof407 + } + st_case_407: +//line plugins/parsers/influx/machine.go:13715 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st408 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto st35 + case 61: + goto tr135 + case 92: + goto st99 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st271 + } + goto st44 + st408: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof408 + } + st_case_408: + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st408 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto tr130 + case 45: + goto tr642 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr643 + } + case ( m.data)[( m.p)] >= 9: + goto st271 + } + goto st44 +tr642: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st98 + st98: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof98 + } + st_case_98: +//line plugins/parsers/influx/machine.go:13779 + switch ( m.data)[( m.p)] { + case 32: + goto tr130 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] < 12: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { + goto tr130 + } + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st409 + } + default: + goto tr130 + } + goto st44 +tr643: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st409 + st409: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof409 + } + st_case_409: +//line plugins/parsers/influx/machine.go:13814 + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st411 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 +tr644: + ( m.cs) = 410 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st410: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof410 + } + st_case_410: +//line plugins/parsers/influx/machine.go:13858 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto st410 + case 13: + goto st32 + case 32: + goto st276 + case 44: + goto tr45 + case 61: + goto tr135 + case 92: + goto st99 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st276 + } + goto st44 +tr133: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st99 + st99: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof99 + } + st_case_99: +//line plugins/parsers/influx/machine.go:13890 + if ( m.data)[( m.p)] == 92 { + goto st100 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st44 + st100: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof100 + } + st_case_100: +//line plugins/parsers/influx/machine.go:13911 + switch ( m.data)[( m.p)] { + case 32: + goto tr45 + case 44: + goto tr45 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 + } + goto st44 + st411: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof411 + } + st_case_411: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st412 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st412: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof412 + } + st_case_412: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st413 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st413: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof413 + } + st_case_413: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st414 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st414: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof414 + } + st_case_414: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st415 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st415: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof415 + } + st_case_415: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st416 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st416: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof416 + } + st_case_416: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st417 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st417: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof417 + } + st_case_417: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st418 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st418: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof418 + } + st_case_418: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st419 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st419: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof419 + } + st_case_419: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st420 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st420: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof420 + } + st_case_420: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st421 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st421: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof421 + } + st_case_421: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st422 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st422: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof422 + } + st_case_422: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st423 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st423: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof423 + } + st_case_423: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st424 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st424: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof424 + } + st_case_424: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st425 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st425: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof425 + } + st_case_425: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st426 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st426: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof426 + } + st_case_426: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st427 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st427: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof427 + } + st_case_427: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st428 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto st44 + st428: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof428 + } + st_case_428: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 11: + goto tr644 + case 13: + goto tr470 + case 32: + goto tr467 + case 44: + goto tr130 + case 61: + goto tr135 + case 92: + goto st99 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr467 + } + goto st44 +tr260: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st101 + st101: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof101 + } + st_case_101: +//line plugins/parsers/influx/machine.go:14481 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr264 + case 44: + goto st6 + case 45: + goto tr265 + case 46: + goto tr266 + case 48: + goto tr267 + case 61: + goto st6 + case 70: + goto tr269 + case 84: + goto tr270 + case 92: + goto tr227 + case 102: + goto tr271 + case 116: + goto tr272 + } + switch { + case ( m.data)[( m.p)] > 13: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr268 + } + case ( m.data)[( m.p)] >= 12: + goto st6 + } + goto tr226 +tr264: + ( m.cs) = 429 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st429: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof429 + } + st_case_429: +//line plugins/parsers/influx/machine.go:14543 + switch ( m.data)[( m.p)] { + case 10: + goto tr665 + case 11: + goto tr666 + case 13: + goto tr667 + case 32: + goto tr664 + case 34: + goto tr149 + case 44: + goto tr668 + case 61: + goto tr23 + case 92: + goto tr151 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr664 + } + goto tr146 +tr854: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr697: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr664: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr850: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr725: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr736: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr742: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr748: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr882: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr886: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr890: + ( m.cs) = 430 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st430: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof430 + } + st_case_430: +//line plugins/parsers/influx/machine.go:14798 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr670 + case 13: + goto st102 + case 32: + goto st430 + case 34: + goto tr95 + case 44: + goto st6 + case 45: + goto tr671 + case 61: + goto st6 + case 92: + goto tr161 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr672 + } + case ( m.data)[( m.p)] >= 9: + goto st430 + } + goto tr158 +tr670: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st431 + st431: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof431 + } + st_case_431: +//line plugins/parsers/influx/machine.go:14839 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr670 + case 13: + goto st102 + case 32: + goto st430 + case 34: + goto tr95 + case 44: + goto st6 + case 45: + goto tr671 + case 61: + goto tr163 + case 92: + goto tr161 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr672 + } + case ( m.data)[( m.p)] >= 9: + goto st430 + } + goto tr158 +tr667: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st102 +tr676: + ( m.cs) = 102 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr533: + ( m.cs) = 102 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr739: + ( m.cs) = 102 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr745: + ( m.cs) = 102 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr751: + ( m.cs) = 102 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st102: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof102 + } + st_case_102: +//line plugins/parsers/influx/machine.go:14945 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 34: + goto tr29 + case 92: + goto st73 + } + goto st6 +tr671: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st103 + st103: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof103 + } + st_case_103: +//line plugins/parsers/influx/machine.go:14966 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 13: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st432 + } + case ( m.data)[( m.p)] >= 12: + goto st6 + } + goto st49 +tr672: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st432 + st432: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof432 + } + st_case_432: +//line plugins/parsers/influx/machine.go:15003 + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st435 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 +tr673: + ( m.cs) = 433 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st433: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof433 + } + st_case_433: +//line plugins/parsers/influx/machine.go:15049 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 13: + goto st102 + case 32: + goto st433 + case 34: + goto tr29 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st433 + } + goto st6 +tr675: + ( m.cs) = 434 +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st434: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof434 + } + st_case_434: +//line plugins/parsers/influx/machine.go:15084 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto st434 + case 13: + goto st102 + case 32: + goto st433 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st433 + } + goto st49 +tr161: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st104 + st104: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof104 + } + st_case_104: +//line plugins/parsers/influx/machine.go:15118 + switch ( m.data)[( m.p)] { + case 34: + goto st49 + case 92: + goto st49 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st3 + st435: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof435 + } + st_case_435: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st436 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st436: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof436 + } + st_case_436: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st437 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st437: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof437 + } + st_case_437: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st438 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st438: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof438 + } + st_case_438: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st439 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st439: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof439 + } + st_case_439: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st440 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st440: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof440 + } + st_case_440: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st441 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st441: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof441 + } + st_case_441: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st442 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st442: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof442 + } + st_case_442: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st443 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st443: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof443 + } + st_case_443: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st444 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st444: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof444 + } + st_case_444: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st445 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st445: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof445 + } + st_case_445: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st446 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st446: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof446 + } + st_case_446: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st447 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st447: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof447 + } + st_case_447: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st448 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st448: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof448 + } + st_case_448: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st449 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st449: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof449 + } + st_case_449: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st450 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st450: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof450 + } + st_case_450: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st451 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st451: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof451 + } + st_case_451: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st452 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st49 + st452: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof452 + } + st_case_452: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr675 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto st104 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr673 + } + goto st49 +tr666: + ( m.cs) = 453 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr726: + ( m.cs) = 453 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr738: + ( m.cs) = 453 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr744: + ( m.cs) = 453 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr750: + ( m.cs) = 453 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st453: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof453 + } + st_case_453: +//line plugins/parsers/influx/machine.go:15819 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr698 + case 13: + goto st102 + case 32: + goto tr697 + case 34: + goto tr202 + case 44: + goto tr156 + case 45: + goto tr699 + case 61: + goto st6 + case 92: + goto tr203 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr700 + } + case ( m.data)[( m.p)] >= 9: + goto tr697 + } + goto tr200 +tr698: + ( m.cs) = 454 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st454: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof454 + } + st_case_454: +//line plugins/parsers/influx/machine.go:15871 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr698 + case 13: + goto st102 + case 32: + goto tr697 + case 34: + goto tr202 + case 44: + goto tr156 + case 45: + goto tr699 + case 61: + goto tr163 + case 92: + goto tr203 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr700 + } + case ( m.data)[( m.p)] >= 9: + goto tr697 + } + goto tr200 +tr699: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st105 + st105: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof105 + } + st_case_105: +//line plugins/parsers/influx/machine.go:15912 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr205 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st455 + } + case ( m.data)[( m.p)] >= 9: + goto tr153 + } + goto st65 +tr700: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st455 + st455: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof455 + } + st_case_455: +//line plugins/parsers/influx/machine.go:15951 + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st459 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 +tr861: + ( m.cs) = 456 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr706: + ( m.cs) = 456 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr858: + ( m.cs) = 456 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr701: + ( m.cs) = 456 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st456: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof456 + } + st_case_456: +//line plugins/parsers/influx/machine.go:16056 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr705 + case 13: + goto st102 + case 32: + goto st456 + case 34: + goto tr95 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr161 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st456 + } + goto tr158 +tr705: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st457 + st457: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof457 + } + st_case_457: +//line plugins/parsers/influx/machine.go:16090 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr705 + case 13: + goto st102 + case 32: + goto st456 + case 34: + goto tr95 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto tr161 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st456 + } + goto tr158 +tr707: + ( m.cs) = 458 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr702: + ( m.cs) = 458 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st458: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof458 + } + st_case_458: +//line plugins/parsers/influx/machine.go:16158 + switch ( m.data)[( m.p)] { + case 10: + goto tr273 + case 11: + goto tr707 + case 13: + goto st102 + case 32: + goto tr706 + case 34: + goto tr202 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto tr203 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr706 + } + goto tr200 + st459: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof459 + } + st_case_459: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st460 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st460: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof460 + } + st_case_460: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st461 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st461: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof461 + } + st_case_461: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st462 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st462: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof462 + } + st_case_462: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st463 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st463: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof463 + } + st_case_463: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st464 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st464: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof464 + } + st_case_464: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st465 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st465: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof465 + } + st_case_465: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st466 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st466: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof466 + } + st_case_466: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st467 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st467: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof467 + } + st_case_467: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st468 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st468: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof468 + } + st_case_468: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st469 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st469: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof469 + } + st_case_469: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st470 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st470: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof470 + } + st_case_470: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st471 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st471: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof471 + } + st_case_471: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st472 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st472: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof472 + } + st_case_472: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st473 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st473: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof473 + } + st_case_473: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st474 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st474: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof474 + } + st_case_474: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st475 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st475: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof475 + } + st_case_475: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st476 + } + case ( m.data)[( m.p)] >= 9: + goto tr701 + } + goto st65 + st476: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof476 + } + st_case_476: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 11: + goto tr702 + case 13: + goto tr676 + case 32: + goto tr701 + case 34: + goto tr206 + case 44: + goto tr156 + case 61: + goto tr163 + case 92: + goto st67 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr701 + } + goto st65 +tr668: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again +tr852: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr727: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr740: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr746: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr752: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr884: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr888: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr893: + ( m.cs) = 106 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st106: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof106 + } + st_case_106: +//line plugins/parsers/influx/machine.go:16958 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr256 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr277 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr276 +tr276: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st107 + st107: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof107 + } + st_case_107: +//line plugins/parsers/influx/machine.go:16990 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr259 + case 44: + goto st6 + case 61: + goto tr279 + case 92: + goto st121 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st107 +tr279: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st108 + st108: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof108 + } + st_case_108: +//line plugins/parsers/influx/machine.go:17026 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr264 + case 44: + goto st6 + case 45: + goto tr281 + case 46: + goto tr282 + case 48: + goto tr283 + case 61: + goto st6 + case 70: + goto tr285 + case 84: + goto tr286 + case 92: + goto tr151 + case 102: + goto tr287 + case 116: + goto tr288 + } + switch { + case ( m.data)[( m.p)] > 13: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr284 + } + case ( m.data)[( m.p)] >= 12: + goto st6 + } + goto tr146 +tr281: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st109 + st109: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof109 + } + st_case_109: +//line plugins/parsers/influx/machine.go:17077 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 46: + goto st110 + case 48: + goto st481 + case 61: + goto st6 + case 92: + goto st62 + } + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st484 + } + case ( m.data)[( m.p)] >= 9: + goto tr153 + } + goto st47 +tr282: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st110 + st110: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof110 + } + st_case_110: +//line plugins/parsers/influx/machine.go:17120 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 92: + goto st62 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st477 + } + case ( m.data)[( m.p)] >= 9: + goto tr153 + } + goto st47 + st477: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof477 + } + st_case_477: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr726 + case 13: + goto tr533 + case 32: + goto tr725 + case 34: + goto tr155 + case 44: + goto tr727 + case 61: + goto st6 + case 69: + goto st111 + case 92: + goto st62 + case 101: + goto st111 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st477 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 + } + goto st47 + st111: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof111 + } + st_case_111: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr293 + case 44: + goto tr156 + case 61: + goto st6 + case 92: + goto st62 + } + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st480 + } + default: + goto st112 + } + goto st47 +tr293: + ( m.cs) = 478 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st478: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof478 + } + st_case_478: +//line plugins/parsers/influx/machine.go:17238 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 11: + goto tr564 + case 13: + goto st32 + case 32: + goto tr563 + case 44: + goto tr565 + case 61: + goto tr130 + case 92: + goto st21 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st479 + } + case ( m.data)[( m.p)] >= 9: + goto tr563 + } + goto st15 + st479: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof479 + } + st_case_479: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr731 + case 13: + goto tr732 + case 32: + goto tr729 + case 44: + goto tr733 + case 61: + goto tr130 + case 92: + goto st21 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st479 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 + } + goto st15 + st112: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof112 + } + st_case_112: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 92: + goto st62 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st480 + } + case ( m.data)[( m.p)] >= 9: + goto tr153 + } + goto st47 + st480: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof480 + } + st_case_480: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr726 + case 13: + goto tr533 + case 32: + goto tr725 + case 34: + goto tr155 + case 44: + goto tr727 + case 61: + goto st6 + case 92: + goto st62 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st480 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 + } + goto st47 + st481: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof481 + } + st_case_481: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr726 + case 13: + goto tr533 + case 32: + goto tr725 + case 34: + goto tr155 + case 44: + goto tr727 + case 46: + goto st477 + case 61: + goto st6 + case 69: + goto st111 + case 92: + goto st62 + case 101: + goto st111 + case 105: + goto st483 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st482 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 + } + goto st47 + st482: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof482 + } + st_case_482: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr726 + case 13: + goto tr533 + case 32: + goto tr725 + case 34: + goto tr155 + case 44: + goto tr727 + case 46: + goto st477 + case 61: + goto st6 + case 69: + goto st111 + case 92: + goto st62 + case 101: + goto st111 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st482 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 + } + goto st47 + st483: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof483 + } + st_case_483: + switch ( m.data)[( m.p)] { + case 10: + goto tr737 + case 11: + goto tr738 + case 13: + goto tr739 + case 32: + goto tr736 + case 34: + goto tr155 + case 44: + goto tr740 + case 61: + goto st6 + case 92: + goto st62 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr736 + } + goto st47 + st484: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof484 + } + st_case_484: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr726 + case 13: + goto tr533 + case 32: + goto tr725 + case 34: + goto tr155 + case 44: + goto tr727 + case 46: + goto st477 + case 61: + goto st6 + case 69: + goto st111 + case 92: + goto st62 + case 101: + goto st111 + case 105: + goto st483 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st484 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 + } + goto st47 +tr283: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st485 + st485: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof485 + } + st_case_485: +//line plugins/parsers/influx/machine.go:17514 + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr726 + case 13: + goto tr533 + case 32: + goto tr725 + case 34: + goto tr155 + case 44: + goto tr727 + case 46: + goto st477 + case 61: + goto st6 + case 69: + goto st111 + case 92: + goto st62 + case 101: + goto st111 + case 105: + goto st483 + case 117: + goto st486 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st482 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 + } + goto st47 + st486: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof486 + } + st_case_486: + switch ( m.data)[( m.p)] { + case 10: + goto tr743 + case 11: + goto tr744 + case 13: + goto tr745 + case 32: + goto tr742 + case 34: + goto tr155 + case 44: + goto tr746 + case 61: + goto st6 + case 92: + goto st62 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr742 + } + goto st47 +tr284: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st487 + st487: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof487 + } + st_case_487: +//line plugins/parsers/influx/machine.go:17590 + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 11: + goto tr726 + case 13: + goto tr533 + case 32: + goto tr725 + case 34: + goto tr155 + case 44: + goto tr727 + case 46: + goto st477 + case 61: + goto st6 + case 69: + goto st111 + case 92: + goto st62 + case 101: + goto st111 + case 105: + goto st483 + case 117: + goto st486 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st487 + } + case ( m.data)[( m.p)] >= 9: + goto tr725 + } + goto st47 +tr285: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st488 + st488: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof488 + } + st_case_488: +//line plugins/parsers/influx/machine.go:17639 + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 11: + goto tr750 + case 13: + goto tr751 + case 32: + goto tr748 + case 34: + goto tr155 + case 44: + goto tr752 + case 61: + goto st6 + case 65: + goto st113 + case 92: + goto st62 + case 97: + goto st116 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr748 + } + goto st47 + st113: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof113 + } + st_case_113: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 76: + goto st114 + case 92: + goto st62 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 + st114: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof114 + } + st_case_114: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 83: + goto st115 + case 92: + goto st62 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 + st115: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof115 + } + st_case_115: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 69: + goto st489 + case 92: + goto st62 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 + st489: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof489 + } + st_case_489: + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 11: + goto tr750 + case 13: + goto tr751 + case 32: + goto tr748 + case 34: + goto tr155 + case 44: + goto tr752 + case 61: + goto st6 + case 92: + goto st62 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr748 + } + goto st47 + st116: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof116 + } + st_case_116: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 92: + goto st62 + case 108: + goto st117 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 + st117: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof117 + } + st_case_117: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 92: + goto st62 + case 115: + goto st118 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 st118: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof118 } st_case_118: -//line plugins/parsers/influx/machine.go:12584 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 + case 32: + goto tr153 + case 34: + goto tr155 + case 44: + goto tr156 + case 61: + goto st6 + case 92: + goto st62 + case 101: + goto st489 } - goto st86 -tr90: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st119 -tr84: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 + } + goto st47 +tr286: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st119 -tr239: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st119 + goto st490 + st490: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof490 + } + st_case_490: +//line plugins/parsers/influx/machine.go:17878 + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 11: + goto tr750 + case 13: + goto tr751 + case 32: + goto tr748 + case 34: + goto tr155 + case 44: + goto tr752 + case 61: + goto st6 + case 82: + goto st119 + case 92: + goto st62 + case 114: + goto st120 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr748 + } + goto st47 st119: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof119 } st_case_119: -//line plugins/parsers/influx/machine.go:12621 switch ( m.data)[( m.p)] { - case 9: - goto st7 case 10: - goto tr61 + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 case 32: - goto st7 + goto tr153 case 34: - goto tr210 + goto tr155 case 44: - goto st7 + goto tr156 case 61: - goto st7 + goto st6 + case 85: + goto st115 case 92: - goto tr230 + goto st62 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 } - goto tr229 -tr229: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st120 + goto st47 st120: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof120 } st_case_120: -//line plugins/parsers/influx/machine.go:12653 switch ( m.data)[( m.p)] { - case 9: - goto st7 case 10: - goto tr61 + goto tr28 + case 11: + goto tr154 + case 13: + goto st6 case 32: - goto st7 + goto tr153 case 34: - goto tr213 + goto tr155 case 44: - goto st7 + goto tr156 case 61: - goto tr232 + goto st6 case 92: - goto st128 + goto st62 + case 117: + goto st118 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr153 } - goto st120 -tr232: -//line plugins/parsers/influx/machine.go.rl:76 + goto st47 +tr287: +//line plugins/parsers/influx/machine.go.rl:28 - key = m.text() + m.pb = m.p + + goto st491 + st491: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof491 + } + st_case_491: +//line plugins/parsers/influx/machine.go:17974 + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 11: + goto tr750 + case 13: + goto tr751 + case 32: + goto tr748 + case 34: + goto tr155 + case 44: + goto tr752 + case 61: + goto st6 + case 92: + goto st62 + case 97: + goto st116 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr748 + } + goto st47 +tr288: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st492 + st492: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof492 + } + st_case_492: +//line plugins/parsers/influx/machine.go:18010 + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 11: + goto tr750 + case 13: + goto tr751 + case 32: + goto tr748 + case 34: + goto tr155 + case 44: + goto tr752 + case 61: + goto st6 + case 92: + goto st62 + case 114: + goto st120 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr748 + } + goto st47 +tr277: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p goto st121 st121: @@ -12681,65 +18042,55 @@ tr232: goto _test_eof121 } st_case_121: -//line plugins/parsers/influx/machine.go:12685 +//line plugins/parsers/influx/machine.go:18046 switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 case 34: - goto tr183 - case 44: - goto st7 - case 61: - goto st7 + goto st107 case 92: - goto tr235 + goto st122 } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 + } + case ( m.data)[( m.p)] >= 9: + goto tr45 } - goto tr234 -tr234: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st122 + goto st44 st122: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof122 } st_case_122: -//line plugins/parsers/influx/machine.go:12717 +//line plugins/parsers/influx/machine.go:18070 switch ( m.data)[( m.p)] { case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 + goto st6 + case 10: + goto tr28 case 32: - goto tr237 + goto st6 case 34: - goto tr189 + goto tr259 case 44: - goto tr239 + goto st6 case 61: - goto st7 + goto tr279 case 92: - goto st127 + goto st121 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 } - goto st122 -tr238: -//line plugins/parsers/influx/machine.go.rl:80 + goto st107 +tr265: +//line plugins/parsers/influx/machine.go.rl:28 - m.handler.AddTag(key, m.text()) + m.pb = m.p goto st123 st123: @@ -12747,31 +18098,40 @@ tr238: goto _test_eof123 } st_case_123: -//line plugins/parsers/influx/machine.go:12751 +//line plugins/parsers/influx/machine.go:18102 switch ( m.data)[( m.p)] { - case 9: - goto tr237 + case 10: + goto tr28 case 11: - goto tr242 - case 12: - goto tr60 + goto tr230 + case 13: + goto st6 case 32: - goto tr237 + goto tr229 case 34: - goto tr201 + goto tr155 case 44: - goto tr239 + goto tr231 + case 46: + goto st124 + case 48: + goto st517 case 61: - goto st7 + goto st6 case 92: - goto tr243 + goto st85 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st520 + } + case ( m.data)[( m.p)] >= 9: + goto tr229 } - goto tr241 -tr241: -//line plugins/parsers/influx/machine.go.rl:18 + goto st79 +tr266: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -12781,41 +18141,251 @@ tr241: goto _test_eof124 } st_case_124: -//line plugins/parsers/influx/machine.go:12785 +//line plugins/parsers/influx/machine.go:18145 switch ( m.data)[( m.p)] { - case 9: - goto tr237 + case 10: + goto tr28 case 11: - goto tr245 - case 12: - goto tr60 + goto tr230 + case 13: + goto st6 case 32: - goto tr237 + goto tr229 case 34: - goto tr205 + goto tr155 case 44: - goto tr239 + goto tr231 + case 61: + goto st6 + case 92: + goto st85 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st493 + } + case ( m.data)[( m.p)] >= 9: + goto tr229 + } + goto st79 + st493: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof493 + } + st_case_493: + switch ( m.data)[( m.p)] { + case 10: + goto tr758 + case 11: + goto tr759 + case 13: + goto tr638 + case 32: + goto tr757 + case 34: + goto tr155 + case 44: + goto tr760 + case 61: + goto st6 + case 69: + goto st126 + case 92: + goto st85 + case 101: + goto st126 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st493 + } + case ( m.data)[( m.p)] >= 9: + goto tr757 + } + goto st79 +tr759: + ( m.cs) = 494 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr792: + ( m.cs) = 494 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr798: + ( m.cs) = 494 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr804: + ( m.cs) = 494 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st494: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof494 + } + st_case_494: +//line plugins/parsers/influx/machine.go:18306 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto tr763 + case 13: + goto st72 + case 32: + goto tr762 + case 34: + goto tr202 + case 44: + goto tr231 + case 45: + goto tr764 + case 61: + goto st6 + case 92: + goto tr235 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr765 + } + case ( m.data)[( m.p)] >= 9: + goto tr762 + } + goto tr233 +tr763: + ( m.cs) = 495 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto _again + st495: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof495 + } + st_case_495: +//line plugins/parsers/influx/machine.go:18358 + switch ( m.data)[( m.p)] { + case 10: + goto tr219 + case 11: + goto tr763 + case 13: + goto st72 + case 32: + goto tr762 + case 34: + goto tr202 + case 44: + goto tr231 + case 45: + goto tr764 case 61: goto tr99 case 92: - goto st126 + goto tr235 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr765 + } + case ( m.data)[( m.p)] >= 9: + goto tr762 } - goto st124 -tr245: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st125 -tr242: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 + goto tr233 +tr764: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -12825,5377 +18395,1455 @@ tr242: goto _test_eof125 } st_case_125: -//line plugins/parsers/influx/machine.go:12829 +//line plugins/parsers/influx/machine.go:18399 switch ( m.data)[( m.p)] { - case 9: + case 10: + goto tr28 + case 11: goto tr237 - case 11: - goto tr242 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr201 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto tr243 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr241 -tr243: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st126 - st126: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof126 - } - st_case_126: -//line plugins/parsers/influx/machine.go:12863 - switch ( m.data)[( m.p)] { - case 34: - goto st124 - case 92: - goto st124 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st33 -tr235: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st127 - st127: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof127 - } - st_case_127: -//line plugins/parsers/influx/machine.go:12890 - switch ( m.data)[( m.p)] { - case 34: - goto st122 - case 92: - goto st122 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st31 -tr230: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st128 - st128: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof128 - } - st_case_128: -//line plugins/parsers/influx/machine.go:12917 - switch ( m.data)[( m.p)] { - case 34: - goto st120 - case 92: - goto st120 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st29 -tr163: -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st129 - st129: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof129 - } - st_case_129: -//line plugins/parsers/influx/machine.go:12944 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr247 - case 44: - goto tr90 - case 45: - goto tr248 - case 46: - goto tr249 - case 48: - goto tr250 - case 70: - goto tr252 - case 84: - goto tr253 - case 92: - goto st170 - case 102: - goto tr254 - case 116: - goto tr255 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr251 - } - case ( m.data)[( m.p)] >= 10: - goto tr5 - } - goto st40 -tr247: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st403 - st403: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof403 - } - st_case_403: -//line plugins/parsers/influx/machine.go:12995 - switch ( m.data)[( m.p)] { - case 9: - goto tr587 - case 11: - goto tr588 - case 12: - goto tr482 - case 32: - goto tr587 - case 34: - goto tr83 - case 44: - goto tr589 - case 92: - goto tr85 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto tr80 -tr614: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st404 -tr587: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st404 -tr746: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st404 -tr742: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st404 -tr774: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st404 -tr778: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st404 -tr782: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st404 -tr789: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st404 -tr798: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st404 -tr803: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st404 -tr808: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st404 - st404: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof404 - } - st_case_404: -//line plugins/parsers/influx/machine.go:13123 - switch ( m.data)[( m.p)] { - case 9: - goto st404 - case 11: - goto tr591 - case 12: - goto st318 - case 32: - goto st404 - case 34: - goto tr95 - case 44: - goto st7 - case 45: - goto tr592 - case 61: - goto st7 - case 92: - goto tr96 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr593 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr92 -tr591: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st405 - st405: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof405 - } - st_case_405: -//line plugins/parsers/influx/machine.go:13164 - switch ( m.data)[( m.p)] { - case 9: - goto st404 - case 11: - goto tr591 - case 12: - goto st318 - case 32: - goto st404 - case 34: - goto tr95 - case 44: - goto st7 - case 45: - goto tr592 - case 61: - goto tr99 - case 92: - goto tr96 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr593 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr92 -tr592: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st130 - st130: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof130 - } - st_case_130: -//line plugins/parsers/influx/machine.go:13205 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr101 - case 32: - goto st7 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st406 - } - case ( m.data)[( m.p)] >= 12: - goto tr101 - } - goto st42 -tr593: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st406 - st406: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof406 - } - st_case_406: -//line plugins/parsers/influx/machine.go:13242 - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st408 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 -tr594: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st407 - st407: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof407 - } - st_case_407: -//line plugins/parsers/influx/machine.go:13281 - switch ( m.data)[( m.p)] { - case 9: - goto st268 - case 11: - goto st407 - case 12: - goto st210 - case 32: - goto st268 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto st42 - st408: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof408 - } - st_case_408: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st409 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st409: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof409 - } - st_case_409: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st410 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st410: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof410 - } - st_case_410: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st411 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st411: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof411 - } - st_case_411: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st412 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st412: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof412 - } - st_case_412: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st413 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st413: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof413 - } - st_case_413: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st414 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st414: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof414 - } - st_case_414: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st415 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st415: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof415 - } - st_case_415: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st416 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st416: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof416 - } - st_case_416: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st417 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st417: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof417 - } - st_case_417: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st418 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st418: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof418 - } - st_case_418: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st419 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st419: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof419 - } - st_case_419: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st420 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st420: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof420 - } - st_case_420: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st421 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st421: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof421 - } - st_case_421: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st422 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st422: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof422 - } - st_case_422: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st423 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st423: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof423 - } - st_case_423: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st424 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st424: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof424 - } - st_case_424: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st425 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st42 - st425: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof425 - } - st_case_425: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr594 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto st78 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr362 - } - goto st42 -tr588: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st426 -tr790: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st426 -tr799: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st426 -tr804: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st426 -tr809: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st426 - st426: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof426 - } - st_case_426: -//line plugins/parsers/influx/machine.go:13930 - switch ( m.data)[( m.p)] { - case 9: - goto tr614 - case 11: - goto tr615 - case 12: - goto tr482 - case 32: - goto tr614 - case 34: - goto tr158 - case 44: - goto tr90 - case 45: - goto tr616 - case 61: - goto st40 - case 92: - goto tr159 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr617 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr156 -tr615: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st427 - st427: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof427 - } - st_case_427: -//line plugins/parsers/influx/machine.go:13975 - switch ( m.data)[( m.p)] { - case 9: - goto tr614 - case 11: - goto tr615 - case 12: - goto tr482 - case 32: - goto tr614 - case 34: - goto tr158 - case 44: - goto tr90 - case 45: - goto tr616 - case 61: - goto tr163 - case 92: - goto tr159 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr617 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr156 -tr616: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st131 - st131: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof131 - } - st_case_131: -//line plugins/parsers/influx/machine.go:14016 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr161 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st428 - } - case ( m.data)[( m.p)] >= 10: - goto tr101 - } - goto st81 -tr617: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st428 - st428: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof428 - } - st_case_428: -//line plugins/parsers/influx/machine.go:14055 - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st432 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 -tr623: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st429 -tr753: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st429 -tr618: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st429 -tr750: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st429 - st429: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof429 - } - st_case_429: -//line plugins/parsers/influx/machine.go:14120 - switch ( m.data)[( m.p)] { - case 9: - goto st429 - case 11: - goto tr622 - case 12: - goto st322 - case 32: - goto st429 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr96 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto tr92 -tr622: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st430 - st430: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof430 - } - st_case_430: -//line plugins/parsers/influx/machine.go:14154 - switch ( m.data)[( m.p)] { - case 9: - goto st429 - case 11: - goto tr622 - case 12: - goto st322 - case 32: - goto st429 - case 34: - goto tr95 - case 44: - goto st7 - case 61: - goto tr99 - case 92: - goto tr96 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto tr92 -tr624: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st431 -tr619: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st431 - st431: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof431 - } - st_case_431: -//line plugins/parsers/influx/machine.go:14202 - switch ( m.data)[( m.p)] { - case 9: - goto tr623 - case 11: - goto tr624 - case 12: - goto tr495 - case 32: - goto tr623 - case 34: - goto tr158 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto tr159 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto tr156 -tr159: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st132 - st132: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof132 - } - st_case_132: -//line plugins/parsers/influx/machine.go:14236 - switch ( m.data)[( m.p)] { - case 34: - goto st81 - case 92: - goto st81 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st26 - st432: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof432 - } - st_case_432: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st433 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st433: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof433 - } - st_case_433: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st434 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st434: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof434 - } - st_case_434: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st435 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st435: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof435 - } - st_case_435: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st436 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st436: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof436 - } - st_case_436: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st437 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st437: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof437 - } - st_case_437: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st438 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st438: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof438 - } - st_case_438: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st439 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st439: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof439 - } - st_case_439: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st440 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st440: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof440 - } - st_case_440: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st441 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st441: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof441 - } - st_case_441: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st442 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st442: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof442 - } - st_case_442: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st443 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st443: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof443 - } - st_case_443: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st444 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st444: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof444 - } - st_case_444: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st445 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st445: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof445 - } - st_case_445: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st446 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st446: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof446 - } - st_case_446: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st447 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st447: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof447 - } - st_case_447: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st448 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st448: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof448 - } - st_case_448: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st449 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st81 - st449: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof449 - } - st_case_449: - switch ( m.data)[( m.p)] { - case 9: - goto tr618 - case 11: - goto tr619 - case 12: - goto tr490 - case 32: - goto tr618 - case 34: - goto tr162 - case 44: - goto tr90 - case 61: - goto tr163 - case 92: - goto st132 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr362 - } - goto st81 -tr83: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st450 -tr89: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st450 - st450: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof450 - } - st_case_450: -//line plugins/parsers/influx/machine.go:14844 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr642 case 13: - goto tr357 + goto st6 case 32: - goto tr482 + goto tr229 + case 34: + goto tr206 case 44: - goto tr484 - case 92: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr482 - } - goto st2 -tr642: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st451 -tr794: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st451 -tr819: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st451 -tr822: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st451 -tr825: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st451 - st451: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof451 - } - st_case_451: -//line plugins/parsers/influx/machine.go:14914 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto tr487 - case 13: - goto tr357 - case 32: - goto tr482 - case 44: - goto tr7 - case 45: - goto tr488 + goto tr231 case 61: - goto st2 + goto tr99 case 92: - goto tr46 + goto st83 } switch { case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr489 - } - case ( m.data)[( m.p)] >= 9: - goto tr482 - } - goto tr44 -tr2: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st133 - st133: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof133 - } - st_case_133: -//line plugins/parsers/influx/machine.go:14953 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr1 - } - case ( m.data)[( m.p)] >= 9: - goto tr1 - } - goto st2 -tr589: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st134 -tr744: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st134 -tr776: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st134 -tr780: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st134 -tr784: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st134 -tr792: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st134 -tr801: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st134 -tr806: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st134 -tr811: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st134 - st134: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof134 - } - st_case_134: -//line plugins/parsers/influx/machine.go:15058 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr259 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr260 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr258 -tr258: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st135 - st135: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof135 - } - st_case_135: -//line plugins/parsers/influx/machine.go:15090 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr262 - case 44: - goto st7 - case 61: - goto tr263 - case 92: - goto st169 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st135 -tr259: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st452 -tr262: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st452 - st452: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof452 - } - st_case_452: -//line plugins/parsers/influx/machine.go:15132 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st453 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto st9 - case 61: - goto tr169 - case 92: - goto st118 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st207 - } - goto st86 - st453: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof453 - } - st_case_453: - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st453 - case 13: - goto tr357 - case 32: - goto st207 - case 44: - goto tr207 - case 45: - goto tr644 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr645 - } - case ( m.data)[( m.p)] >= 9: - goto st207 - } - goto st86 -tr644: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st136 - st136: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof136 - } - st_case_136: -//line plugins/parsers/influx/machine.go:15196 - switch ( m.data)[( m.p)] { - case 32: - goto tr207 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] < 12: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 10 { - goto tr207 - } - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st454 - } - default: - goto tr207 - } - goto st86 -tr645: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st454 - st454: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof454 - } - st_case_454: -//line plugins/parsers/influx/machine.go:15231 - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st456 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 -tr646: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st455 - st455: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof455 - } - st_case_455: -//line plugins/parsers/influx/machine.go:15268 - switch ( m.data)[( m.p)] { - case 10: - goto tr357 - case 11: - goto st455 - case 13: - goto tr357 - case 32: - goto st210 - case 44: - goto tr61 - case 61: - goto tr169 - case 92: - goto st118 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st210 - } - goto st86 - st456: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof456 - } - st_case_456: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st457 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st457: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof457 - } - st_case_457: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st458 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st458: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof458 - } - st_case_458: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st459 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st459: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof459 - } - st_case_459: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st460 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st460: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof460 - } - st_case_460: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st461 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st461: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof461 - } - st_case_461: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st462 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st462: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof462 - } - st_case_462: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st463 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st463: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof463 - } - st_case_463: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st464 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st464: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof464 - } - st_case_464: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st465 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st465: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof465 - } - st_case_465: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st466 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st466: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof466 - } - st_case_466: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st467 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st467: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof467 - } - st_case_467: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st468 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st468: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof468 - } - st_case_468: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st469 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st469: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof469 - } - st_case_469: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st470 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st470: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof470 - } - st_case_470: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st471 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st471: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof471 - } - st_case_471: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st472 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st472: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof472 - } - st_case_472: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st473 - } - case ( m.data)[( m.p)] >= 9: - goto tr361 - } - goto st86 - st473: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof473 - } - st_case_473: - switch ( m.data)[( m.p)] { - case 10: - goto tr362 - case 11: - goto tr646 - case 13: - goto tr362 - case 32: - goto tr361 - case 44: - goto tr207 - case 61: - goto tr169 - case 92: - goto st118 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr361 - } - goto st86 -tr263: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st137 - st137: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof137 - } - st_case_137: -//line plugins/parsers/influx/machine.go:15839 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr266 - case 44: - goto st7 - case 45: - goto tr267 - case 46: - goto tr268 - case 48: - goto tr269 - case 61: - goto st7 - case 70: - goto tr271 - case 84: - goto tr272 - case 92: - goto tr235 - case 102: - goto tr273 - case 116: - goto tr274 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr270 - } - case ( m.data)[( m.p)] >= 12: - goto tr61 - } - goto tr234 -tr266: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st474 - st474: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof474 - } - st_case_474: -//line plugins/parsers/influx/machine.go:15894 - switch ( m.data)[( m.p)] { - case 9: - goto tr666 - case 11: - goto tr667 - case 12: - goto tr514 - case 32: - goto tr666 - case 34: - goto tr183 - case 44: - goto tr668 - case 61: - goto tr25 - case 92: - goto tr185 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto tr180 -tr693: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st475 -tr666: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st475 -tr721: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st475 -tr727: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st475 -tr731: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st475 -tr735: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st475 - st475: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof475 - } - st_case_475: -//line plugins/parsers/influx/machine.go:15978 - switch ( m.data)[( m.p)] { - case 9: - goto st475 - case 11: - goto tr670 - case 12: - goto st318 - case 32: - goto st475 - case 34: - goto tr95 - case 44: - goto st7 - case 45: - goto tr671 - case 61: - goto st7 - case 92: - goto tr195 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr672 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr192 -tr670: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st476 - st476: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof476 - } - st_case_476: -//line plugins/parsers/influx/machine.go:16019 - switch ( m.data)[( m.p)] { - case 9: - goto st475 - case 11: - goto tr670 - case 12: - goto st318 - case 32: - goto st475 - case 34: - goto tr95 - case 44: - goto st7 - case 45: - goto tr671 - case 61: - goto tr197 - case 92: - goto tr195 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr672 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr192 -tr671: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st138 - st138: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof138 - } - st_case_138: -//line plugins/parsers/influx/machine.go:16060 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr101 - case 32: - goto st7 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st477 - } - case ( m.data)[( m.p)] >= 12: - goto tr101 - } - goto st91 -tr672: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st477 - st477: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof477 - } - st_case_477: -//line plugins/parsers/influx/machine.go:16097 - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st479 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 -tr673: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st478 - st478: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof478 - } - st_case_478: -//line plugins/parsers/influx/machine.go:16136 - switch ( m.data)[( m.p)] { - case 9: - goto st268 - case 11: - goto st478 - case 12: - goto st210 - case 32: - goto st268 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 - } - goto st91 - st479: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof479 - } - st_case_479: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st480 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st480: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof480 - } - st_case_480: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st481 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st481: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof481 - } - st_case_481: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st482 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st482: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof482 - } - st_case_482: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st483 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st483: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof483 - } - st_case_483: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st484 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st484: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof484 - } - st_case_484: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st485 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st485: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof485 - } - st_case_485: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st486 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st486: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof486 - } - st_case_486: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st487 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st487: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof487 - } - st_case_487: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st488 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st488: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof488 - } - st_case_488: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st489 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st489: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof489 - } - st_case_489: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st490 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st490: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof490 - } - st_case_490: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st491 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st491: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof491 - } - st_case_491: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st492 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st492: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof492 - } - st_case_492: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st493 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st493: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof493 - } - st_case_493: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st494 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st494: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof494 - } - st_case_494: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st495 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 - } - goto st91 - st495: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof495 - } - st_case_495: - switch ( m.data)[( m.p)] { - case 9: - goto tr431 - case 11: - goto tr673 - case 12: - goto tr361 - case 32: - goto tr431 - case 34: - goto tr98 - case 44: - goto st7 - case 61: - goto tr197 - case 92: - goto st93 - } - switch { - case ( m.data)[( m.p)] > 13: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st496 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr229 } - goto st91 + goto st81 +tr765: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st496 st496: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof496 } st_case_496: +//line plugins/parsers/influx/machine.go:18438 switch ( m.data)[( m.p)] { - case 9: - goto tr431 + case 10: + goto tr600 case 11: - goto tr673 - case 12: - goto tr361 + goto tr767 + case 13: + goto tr602 case 32: - goto tr431 + goto tr766 case 34: - goto tr98 + goto tr206 case 44: - goto st7 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st93 + goto st83 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr362 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st498 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st91 -tr667: -//line plugins/parsers/influx/machine.go.rl:80 + goto st81 +tr770: + ( m.cs) = 497 +//line plugins/parsers/influx/machine.go.rl:99 - m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- -//line plugins/parsers/influx/machine.go.rl:18 + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st497 -tr722: -//line plugins/parsers/influx/machine.go.rl:80 + goto _again +tr767: + ( m.cs) = 497 +//line plugins/parsers/influx/machine.go.rl:99 - m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- -//line plugins/parsers/influx/machine.go.rl:96 + ( m.cs) = 257; + {( m.p)++; goto _out } + } - m.handler.AddFloat(key, m.text()) +//line plugins/parsers/influx/machine.go.rl:157 - goto st497 -tr728: -//line plugins/parsers/influx/machine.go.rl:80 + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- - m.handler.AddTag(key, m.text()) + ( m.cs) = 257; + {( m.p)++; goto _out } + } -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st497 -tr732: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st497 -tr736: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st497 + goto _again st497: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof497 } st_case_497: -//line plugins/parsers/influx/machine.go:16785 +//line plugins/parsers/influx/machine.go:18511 switch ( m.data)[( m.p)] { - case 9: - goto tr693 + case 10: + goto tr219 case 11: - goto tr694 - case 12: - goto tr514 + goto tr770 + case 13: + goto st72 case 32: - goto tr693 + goto tr769 case 34: - goto tr201 - case 44: - goto tr190 - case 45: - goto tr695 - case 61: - goto st7 - case 92: goto tr202 + case 44: + goto tr231 + case 61: + goto tr99 + case 92: + goto tr235 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr696 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr769 } - goto tr199 -tr694: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st498 + goto tr233 st498: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof498 } st_case_498: -//line plugins/parsers/influx/machine.go:16830 switch ( m.data)[( m.p)] { - case 9: - goto tr693 + case 10: + goto tr600 case 11: - goto tr694 - case 12: - goto tr514 + goto tr767 + case 13: + goto tr602 case 32: - goto tr693 + goto tr766 case 34: - goto tr201 + goto tr206 case 44: - goto tr190 - case 45: - goto tr695 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto tr202 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr696 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 - } - goto tr199 -tr695: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st139 - st139: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof139 - } - st_case_139: -//line plugins/parsers/influx/machine.go:16871 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr204 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr205 - case 44: - goto tr190 - case 61: - goto tr197 - case 92: - goto st105 - } - switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st499 } - case ( m.data)[( m.p)] >= 10: - goto tr207 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 -tr696: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st499 + goto st81 st499: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof499 } st_case_499: -//line plugins/parsers/influx/machine.go:16910 switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st503 + goto st500 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 -tr702: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st500 -tr697: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st500 + goto st81 st500: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof500 } st_case_500: -//line plugins/parsers/influx/machine.go:16959 switch ( m.data)[( m.p)] { - case 9: - goto st500 + case 10: + goto tr600 case 11: - goto tr701 - case 12: - goto st322 + goto tr767 + case 13: + goto tr602 case 32: - goto st500 + goto tr766 case 34: - goto tr95 + goto tr206 case 44: - goto st7 + goto tr231 case 61: - goto st7 + goto tr99 case 92: - goto tr195 + goto st83 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st501 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto tr192 -tr701: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st501 + goto st81 st501: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof501 } st_case_501: -//line plugins/parsers/influx/machine.go:16993 switch ( m.data)[( m.p)] { - case 9: - goto st500 + case 10: + goto tr600 case 11: - goto tr701 - case 12: - goto st322 + goto tr767 + case 13: + goto tr602 case 32: - goto st500 + goto tr766 case 34: - goto tr95 + goto tr206 case 44: - goto st7 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto tr195 + goto st83 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st502 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto tr192 -tr703: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st502 -tr698: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st502 + goto st81 st502: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof502 } st_case_502: -//line plugins/parsers/influx/machine.go:17041 switch ( m.data)[( m.p)] { - case 9: - goto tr702 + case 10: + goto tr600 case 11: - goto tr703 - case 12: - goto tr523 + goto tr767 + case 13: + goto tr602 case 32: - goto tr702 + goto tr766 case 34: - goto tr201 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto tr202 + goto st83 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st503 + } + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto tr199 + goto st81 st503: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof503 } st_case_503: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st504 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st504: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof504 } st_case_504: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st505 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st505: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof505 } st_case_505: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st506 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st506: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof506 } st_case_506: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st507 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st507: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof507 } st_case_507: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st508 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st508: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof508 } st_case_508: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st509 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st509: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof509 } st_case_509: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st510 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st510: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof510 } st_case_510: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st511 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st511: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof511 } st_case_511: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st512 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st512: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof512 } st_case_512: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st513 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st513: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof513 } st_case_513: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st514 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st514: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof514 } st_case_514: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st515 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr766 } - goto st96 + goto st81 st515: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof515 } st_case_515: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr600 case 11: - goto tr698 - case 12: - goto tr520 + goto tr767 + case 13: + goto tr602 case 32: - goto tr697 + goto tr766 case 34: - goto tr205 + goto tr206 case 44: - goto tr190 + goto tr231 case 61: - goto tr197 + goto tr99 case 92: - goto st105 + goto st83 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr766 + } + goto st81 + st126: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof126 + } + st_case_126: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr293 + case 44: + goto tr231 + case 61: + goto st6 + case 92: + goto st85 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + case ( m.data)[( m.p)] > 45: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st516 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + default: + goto st127 } - goto st96 + goto st79 + st127: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof127 + } + st_case_127: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 92: + goto st85 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st516 + } + case ( m.data)[( m.p)] >= 9: + goto tr229 + } + goto st79 st516: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof516 } st_case_516: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr758 case 11: - goto tr698 - case 12: - goto tr520 + goto tr759 + case 13: + goto tr638 case 32: - goto tr697 + goto tr757 case 34: - goto tr205 + goto tr155 case 44: - goto tr190 + goto tr760 case 61: - goto tr197 + goto st6 case 92: - goto st105 + goto st85 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st517 + goto st516 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr757 } - goto st96 + goto st79 st517: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof517 } st_case_517: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr758 case 11: - goto tr698 - case 12: - goto tr520 + goto tr759 + case 13: + goto tr638 case 32: - goto tr697 + goto tr757 case 34: - goto tr205 + goto tr155 case 44: - goto tr190 + goto tr760 + case 46: + goto st493 case 61: - goto tr197 + goto st6 + case 69: + goto st126 case 92: - goto st105 + goto st85 + case 101: + goto st126 + case 105: + goto st519 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st518 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr757 } - goto st96 + goto st79 st518: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof518 } st_case_518: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr758 case 11: - goto tr698 - case 12: - goto tr520 + goto tr759 + case 13: + goto tr638 case 32: - goto tr697 + goto tr757 case 34: - goto tr205 + goto tr155 case 44: - goto tr190 + goto tr760 + case 46: + goto st493 case 61: - goto tr197 + goto st6 + case 69: + goto st126 case 92: - goto st105 + goto st85 + case 101: + goto st126 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st519 + goto st518 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr757 } - goto st96 + goto st79 st519: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof519 } st_case_519: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr791 case 11: - goto tr698 - case 12: - goto tr520 + goto tr792 + case 13: + goto tr793 case 32: - goto tr697 + goto tr790 case 34: - goto tr205 + goto tr155 case 44: - goto tr190 + goto tr794 case 61: - goto tr197 + goto st6 case 92: - goto st105 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st520 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr790 } - goto st96 + goto st79 st520: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof520 } st_case_520: switch ( m.data)[( m.p)] { - case 9: - goto tr697 + case 10: + goto tr758 case 11: - goto tr698 - case 12: - goto tr520 + goto tr759 + case 13: + goto tr638 case 32: - goto tr697 + goto tr757 case 34: - goto tr205 + goto tr155 case 44: - goto tr190 - case 61: - goto tr197 - case 92: - goto st105 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr362 - } - goto st96 -tr668: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st140 -tr723: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st140 -tr729: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st140 -tr733: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st140 -tr737: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st140 - st140: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof140 - } - st_case_140: -//line plugins/parsers/influx/machine.go:17690 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr259 - case 44: - goto st7 - case 61: - goto st7 - case 92: - goto tr278 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto tr277 -tr277: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st141 - st141: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof141 - } - st_case_141: -//line plugins/parsers/influx/machine.go:17722 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr262 - case 44: - goto st7 - case 61: - goto tr280 - case 92: - goto st155 - } - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st141 -tr280: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - -//line plugins/parsers/influx/machine.go.rl:84 - - key = m.text() - - goto st142 - st142: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof142 - } - st_case_142: -//line plugins/parsers/influx/machine.go:17758 - switch ( m.data)[( m.p)] { - case 9: - goto st7 - case 10: - goto tr61 - case 32: - goto st7 - case 34: - goto tr266 - case 44: - goto st7 - case 45: - goto tr282 + goto tr760 case 46: - goto tr283 - case 48: - goto tr284 + goto st493 case 61: - goto st7 - case 70: - goto tr286 - case 84: - goto tr287 + goto st6 + case 69: + goto st126 case 92: - goto tr185 - case 102: - goto tr288 - case 116: - goto tr289 + goto st85 + case 101: + goto st126 + case 105: + goto st519 } switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr285 - } - case ( m.data)[( m.p)] >= 12: - goto tr61 - } - goto tr180 -tr282: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st143 - st143: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof143 - } - st_case_143: -//line plugins/parsers/influx/machine.go:17809 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 46: - goto st144 - case 48: - goto st524 - case 61: - goto st7 - case 92: - goto st103 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st527 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 - } - goto st89 -tr283: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st144 - st144: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof144 - } - st_case_144: -//line plugins/parsers/influx/machine.go:17852 - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 - } - switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st521 + goto st520 } - case ( m.data)[( m.p)] >= 10: - goto tr61 + case ( m.data)[( m.p)] >= 9: + goto tr757 } - goto st89 + goto st79 +tr267: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st521 st521: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof521 } st_case_521: +//line plugins/parsers/influx/machine.go:19361 switch ( m.data)[( m.p)] { - case 9: - goto tr721 + case 10: + goto tr758 case 11: - goto tr722 - case 12: - goto tr566 + goto tr759 + case 13: + goto tr638 case 32: - goto tr721 + goto tr757 case 34: - goto tr189 + goto tr155 case 44: - goto tr723 + goto tr760 + case 46: + goto st493 case 61: - goto st7 + goto st6 case 69: - goto st145 + goto st126 case 92: - goto st103 + goto st85 case 101: - goto st145 + goto st126 + case 105: + goto st519 + case 117: + goto st522 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st521 + goto st518 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr757 } - goto st89 - st145: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof145 - } - st_case_145: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr294 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 - } - switch { - case ( m.data)[( m.p)] < 43: - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st523 - } - default: - goto st146 - } - goto st89 -tr294: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st522 + goto st79 st522: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof522 } st_case_522: -//line plugins/parsers/influx/machine.go:17963 switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto tr797 case 11: - goto tr565 + goto tr798 case 13: - goto tr357 + goto tr799 case 32: - goto tr514 - case 44: - goto tr516 - case 61: - goto tr207 - case 92: - goto st36 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st390 - } - case ( m.data)[( m.p)] >= 9: - goto tr514 - } - goto st31 - st146: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof146 - } - st_case_146: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 + goto tr796 case 34: - goto tr189 + goto tr155 case 44: - goto tr190 + goto tr800 case 61: - goto st7 + goto st6 case 92: - goto st103 + goto st85 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st523 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr796 } - goto st89 + goto st79 +tr268: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st523 st523: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof523 } st_case_523: +//line plugins/parsers/influx/machine.go:19437 switch ( m.data)[( m.p)] { - case 9: - goto tr721 + case 10: + goto tr758 case 11: - goto tr722 - case 12: - goto tr566 + goto tr759 + case 13: + goto tr638 case 32: - goto tr721 + goto tr757 case 34: - goto tr189 + goto tr155 case 44: - goto tr723 + goto tr760 + case 46: + goto st493 case 61: - goto st7 + goto st6 + case 69: + goto st126 case 92: - goto st103 + goto st85 + case 101: + goto st126 + case 105: + goto st519 + case 117: + goto st522 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st523 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr757 } - goto st89 + goto st79 +tr269: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st524 st524: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof524 } st_case_524: +//line plugins/parsers/influx/machine.go:19486 switch ( m.data)[( m.p)] { - case 9: - goto tr721 + case 10: + goto tr803 case 11: - goto tr722 - case 12: - goto tr566 + goto tr804 + case 13: + goto tr805 case 32: - goto tr721 + goto tr802 case 34: - goto tr189 + goto tr155 case 44: - goto tr723 - case 46: - goto st521 + goto tr806 case 61: - goto st7 - case 69: - goto st145 + goto st6 + case 65: + goto st128 case 92: - goto st103 - case 101: - goto st145 - case 105: - goto st526 + goto st85 + case 97: + goto st131 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st525 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr802 } - goto st89 + goto st79 + st128: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof128 + } + st_case_128: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 76: + goto st129 + case 92: + goto st85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 + st129: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof129 + } + st_case_129: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 83: + goto st130 + case 92: + goto st85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 + st130: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof130 + } + st_case_130: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 69: + goto st525 + case 92: + goto st85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 st525: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof525 } st_case_525: switch ( m.data)[( m.p)] { - case 9: - goto tr721 + case 10: + goto tr803 case 11: - goto tr722 - case 12: - goto tr566 + goto tr804 + case 13: + goto tr805 case 32: - goto tr721 + goto tr802 case 34: - goto tr189 + goto tr155 case 44: - goto tr723 - case 46: - goto st521 + goto tr806 case 61: - goto st7 - case 69: - goto st145 + goto st6 case 92: - goto st103 + goto st85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr802 + } + goto st79 + st131: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof131 + } + st_case_131: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 92: + goto st85 + case 108: + goto st132 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 + st132: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof132 + } + st_case_132: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 92: + goto st85 + case 115: + goto st133 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 + st133: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof133 + } + st_case_133: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 92: + goto st85 case 101: - goto st145 + goto st525 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st525 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 } - goto st89 + goto st79 +tr270: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st526 st526: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof526 } st_case_526: +//line plugins/parsers/influx/machine.go:19725 switch ( m.data)[( m.p)] { - case 9: - goto tr727 + case 10: + goto tr803 case 11: - goto tr728 - case 12: - goto tr572 + goto tr804 + case 13: + goto tr805 case 32: - goto tr727 + goto tr802 case 34: - goto tr189 + goto tr155 case 44: - goto tr729 + goto tr806 case 61: - goto st7 + goto st6 + case 82: + goto st134 case 92: - goto st103 + goto st85 + case 114: + goto st135 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr389 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr802 } - goto st89 + goto st79 + st134: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof134 + } + st_case_134: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 85: + goto st130 + case 92: + goto st85 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 + st135: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof135 + } + st_case_135: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr230 + case 13: + goto st6 + case 32: + goto tr229 + case 34: + goto tr155 + case 44: + goto tr231 + case 61: + goto st6 + case 92: + goto st85 + case 117: + goto st133 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr229 + } + goto st79 +tr271: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st527 st527: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof527 } st_case_527: +//line plugins/parsers/influx/machine.go:19821 switch ( m.data)[( m.p)] { - case 9: - goto tr721 + case 10: + goto tr803 case 11: - goto tr722 - case 12: - goto tr566 + goto tr804 + case 13: + goto tr805 case 32: - goto tr721 + goto tr802 case 34: - goto tr189 + goto tr155 case 44: - goto tr723 - case 46: - goto st521 + goto tr806 case 61: - goto st7 - case 69: - goto st145 + goto st6 case 92: - goto st103 - case 101: - goto st145 - case 105: - goto st526 + goto st85 + case 97: + goto st131 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st527 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr802 } - goto st89 -tr284: -//line plugins/parsers/influx/machine.go.rl:18 + goto st79 +tr272: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -18205,753 +19853,489 @@ tr284: goto _test_eof528 } st_case_528: -//line plugins/parsers/influx/machine.go:18209 +//line plugins/parsers/influx/machine.go:19857 switch ( m.data)[( m.p)] { - case 9: - goto tr721 + case 10: + goto tr803 case 11: - goto tr722 - case 12: - goto tr566 + goto tr804 + case 13: + goto tr805 case 32: - goto tr721 + goto tr802 case 34: - goto tr189 + goto tr155 case 44: - goto tr723 - case 46: - goto st521 + goto tr806 case 61: - goto st7 - case 69: - goto st145 + goto st6 case 92: - goto st103 - case 101: - goto st145 - case 105: - goto st526 - case 117: - goto st529 + goto st85 + case 114: + goto st135 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr802 + } + goto st79 +tr257: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st136 + st136: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof136 + } + st_case_136: +//line plugins/parsers/influx/machine.go:19893 + switch ( m.data)[( m.p)] { + case 34: + goto st97 + case 92: + goto st137 } switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st525 + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr45 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr45 } - goto st89 + goto st44 + st137: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof137 + } + st_case_137: +//line plugins/parsers/influx/machine.go:19917 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr259 + case 44: + goto st6 + case 61: + goto tr260 + case 92: + goto st136 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st97 + st138: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof138 + } + st_case_138: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr315 + case 44: + goto tr90 + case 92: + goto st140 + } + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st531 + } + default: + goto st139 + } + goto st29 +tr315: + ( m.cs) = 529 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st529: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof529 } st_case_529: +//line plugins/parsers/influx/machine.go:19990 switch ( m.data)[( m.p)] { - case 9: - goto tr731 + case 10: + goto tr101 case 11: - goto tr732 - case 12: - goto tr576 + goto tr634 + case 13: + goto st32 case 32: - goto tr731 - case 34: - goto tr189 + goto tr499 case 44: - goto tr733 - case 61: - goto st7 + goto tr501 case 92: - goto st103 + goto st94 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr393 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st530 + } + case ( m.data)[( m.p)] >= 9: + goto tr499 } - goto st89 -tr285: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st530 + goto st1 st530: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof530 } st_case_530: -//line plugins/parsers/influx/machine.go:18285 switch ( m.data)[( m.p)] { - case 9: - goto tr721 + case 10: + goto tr730 case 11: - goto tr722 - case 12: - goto tr566 + goto tr812 + case 13: + goto tr732 case 32: - goto tr721 - case 34: - goto tr189 + goto tr811 case 44: - goto tr723 - case 46: - goto st521 - case 61: - goto st7 - case 69: - goto st145 + goto tr813 case 92: - goto st103 - case 101: - goto st145 - case 105: - goto st526 - case 117: - goto st529 + goto st94 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st530 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr811 } - goto st89 -tr286: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st531 + goto st1 + st139: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof139 + } + st_case_139: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 92: + goto st140 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st531 + } + case ( m.data)[( m.p)] >= 9: + goto tr87 + } + goto st29 st531: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof531 } st_case_531: -//line plugins/parsers/influx/machine.go:18334 switch ( m.data)[( m.p)] { - case 9: - goto tr735 + case 10: + goto tr636 case 11: - goto tr736 - case 12: - goto tr580 + goto tr637 + case 13: + goto tr638 case 32: - goto tr735 + goto tr635 case 34: - goto tr189 + goto tr89 case 44: - goto tr737 - case 61: - goto st7 - case 65: - goto st147 + goto tr639 case 92: - goto st103 - case 97: - goto st150 + goto st140 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st531 + } + case ( m.data)[( m.p)] >= 9: + goto tr635 } - goto st89 - st147: + goto st29 +tr85: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st140 + st140: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof147 + goto _test_eof140 } - st_case_147: + st_case_140: +//line plugins/parsers/influx/machine.go:20113 switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 76: - goto st148 + goto st29 case 92: - goto st103 + goto st29 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 } - goto st89 - st148: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof148 - } - st_case_148: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 83: - goto st149 - case 92: - goto st103 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 - st149: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof149 - } - st_case_149: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 69: - goto st532 - case 92: - goto st103 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 + goto st1 st532: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof532 } st_case_532: switch ( m.data)[( m.p)] { - case 9: - goto tr735 + case 10: + goto tr636 case 11: - goto tr736 - case 12: - goto tr580 + goto tr637 + case 13: + goto tr638 case 32: - goto tr735 + goto tr635 case 34: - goto tr189 + goto tr89 case 44: - goto tr737 - case 61: - goto st7 + goto tr639 + case 46: + goto st406 + case 69: + goto st138 case 92: - goto st103 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st89 - st150: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof150 - } - st_case_150: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 - case 108: - goto st151 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 - st151: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof151 - } - st_case_151: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 - case 115: - goto st152 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 - st152: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof152 - } - st_case_152: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 + goto st140 case 101: - goto st532 + goto st138 + case 105: + goto st534 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st533 + } + case ( m.data)[( m.p)] >= 9: + goto tr635 } - goto st89 -tr287: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st533 + goto st29 st533: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof533 } st_case_533: -//line plugins/parsers/influx/machine.go:18573 switch ( m.data)[( m.p)] { - case 9: - goto tr735 + case 10: + goto tr636 case 11: - goto tr736 - case 12: - goto tr580 + goto tr637 + case 13: + goto tr638 case 32: - goto tr735 + goto tr635 case 34: - goto tr189 + goto tr89 case 44: - goto tr737 - case 61: - goto st7 - case 82: - goto st153 + goto tr639 + case 46: + goto st406 + case 69: + goto st138 case 92: - goto st103 - case 114: - goto st154 + goto st140 + case 101: + goto st138 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st533 + } + case ( m.data)[( m.p)] >= 9: + goto tr635 } - goto st89 - st153: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof153 - } - st_case_153: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 85: - goto st149 - case 92: - goto st103 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 - st154: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof154 - } - st_case_154: - switch ( m.data)[( m.p)] { - case 9: - goto tr187 - case 11: - goto tr188 - case 12: - goto tr60 - case 32: - goto tr187 - case 34: - goto tr189 - case 44: - goto tr190 - case 61: - goto st7 - case 92: - goto st103 - case 117: - goto st152 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st89 -tr288: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st534 + goto st29 st534: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof534 } st_case_534: -//line plugins/parsers/influx/machine.go:18669 switch ( m.data)[( m.p)] { - case 9: - goto tr735 + case 10: + goto tr817 case 11: - goto tr736 - case 12: - goto tr580 + goto tr818 + case 13: + goto tr793 case 32: - goto tr735 + goto tr816 case 34: - goto tr189 + goto tr89 case 44: - goto tr737 - case 61: - goto st7 + goto tr819 case 92: - goto st103 - case 97: - goto st150 + goto st140 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr816 } - goto st89 -tr289: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st535 + goto st29 st535: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof535 } st_case_535: -//line plugins/parsers/influx/machine.go:18705 switch ( m.data)[( m.p)] { - case 9: - goto tr735 + case 10: + goto tr636 case 11: - goto tr736 - case 12: - goto tr580 + goto tr637 + case 13: + goto tr638 case 32: - goto tr735 + goto tr635 case 34: - goto tr189 + goto tr89 case 44: - goto tr737 - case 61: - goto st7 + goto tr639 + case 46: + goto st406 + case 69: + goto st138 case 92: - goto st103 - case 114: - goto st154 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st89 -tr278: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st155 - st155: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof155 - } - st_case_155: -//line plugins/parsers/influx/machine.go:18741 - switch ( m.data)[( m.p)] { - case 34: - goto st141 - case 92: - goto st141 + goto st140 + case 101: + goto st138 + case 105: + goto st534 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st535 } case ( m.data)[( m.p)] >= 9: - goto tr61 + goto tr635 } - goto st86 -tr267: -//line plugins/parsers/influx/machine.go.rl:18 + goto st29 +tr245: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st156 - st156: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof156 - } - st_case_156: -//line plugins/parsers/influx/machine.go:18768 - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 46: - goto st157 - case 48: - goto st560 - case 61: - goto st7 - case 92: - goto st127 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st563 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 - } - goto st122 -tr268: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st157 - st157: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof157 - } - st_case_157: -//line plugins/parsers/influx/machine.go:18811 - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st536 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 - } - goto st122 + goto st536 st536: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof536 } st_case_536: +//line plugins/parsers/influx/machine.go:20277 switch ( m.data)[( m.p)] { - case 9: - goto tr742 + case 10: + goto tr636 case 11: - goto tr743 - case 12: - goto tr566 + goto tr637 + case 13: + goto tr638 case 32: - goto tr742 + goto tr635 case 34: - goto tr189 + goto tr89 case 44: - goto tr744 - case 61: - goto st7 + goto tr639 + case 46: + goto st406 case 69: - goto st159 + goto st138 case 92: - goto st127 + goto st140 case 101: - goto st159 + goto st138 + case 105: + goto st534 + case 117: + goto st537 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st536 + goto st533 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr635 } - goto st122 -tr743: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - - goto st537 -tr775: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - - goto st537 -tr779: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - - goto st537 -tr783: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - - goto st537 + goto st29 st537: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof537 } st_case_537: -//line plugins/parsers/influx/machine.go:18920 switch ( m.data)[( m.p)] { - case 9: - goto tr746 + case 10: + goto tr822 case 11: - goto tr747 - case 12: - goto tr514 + goto tr823 + case 13: + goto tr799 case 32: - goto tr746 + goto tr821 case 34: - goto tr201 + goto tr89 case 44: - goto tr239 - case 45: - goto tr748 - case 61: - goto st7 + goto tr824 case 92: - goto tr243 + goto st140 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr749 - } - case ( m.data)[( m.p)] >= 10: - goto tr357 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr821 } - goto tr241 -tr747: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 + goto st29 +tr246: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -18961,77 +20345,44 @@ tr747: goto _test_eof538 } st_case_538: -//line plugins/parsers/influx/machine.go:18965 +//line plugins/parsers/influx/machine.go:20349 switch ( m.data)[( m.p)] { - case 9: - goto tr746 + case 10: + goto tr636 case 11: - goto tr747 - case 12: - goto tr514 + goto tr637 + case 13: + goto tr638 case 32: - goto tr746 + goto tr635 case 34: - goto tr201 + goto tr89 case 44: - goto tr239 - case 45: - goto tr748 - case 61: - goto tr99 + goto tr639 + case 46: + goto st406 + case 69: + goto st138 case 92: - goto tr243 + goto st140 + case 101: + goto st138 + case 105: + goto st534 + case 117: + goto st537 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto tr749 + goto st538 } - case ( m.data)[( m.p)] >= 10: - goto tr357 + case ( m.data)[( m.p)] >= 9: + goto tr635 } - goto tr241 -tr748: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st158 - st158: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof158 - } - st_case_158: -//line plugins/parsers/influx/machine.go:19006 - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr245 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr205 - case 44: - goto tr239 - case 61: - goto tr99 - case 92: - goto st126 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st539 - } - case ( m.data)[( m.p)] >= 10: - goto tr207 - } - goto st124 -tr749: -//line plugins/parsers/influx/machine.go.rl:18 + goto st29 +tr247: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -19041,1655 +20392,1625 @@ tr749: goto _test_eof539 } st_case_539: -//line plugins/parsers/influx/machine.go:19045 +//line plugins/parsers/influx/machine.go:20396 switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr803 case 11: - goto tr751 - case 12: - goto tr520 + goto tr827 + case 13: + goto tr805 case 32: - goto tr750 + goto tr826 case 34: - goto tr205 + goto tr89 case 44: - goto tr239 - case 61: - goto tr99 + goto tr828 + case 65: + goto st141 case 92: - goto st126 + goto st140 + case 97: + goto st144 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st541 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr826 } - goto st124 -tr754: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st540 -tr751: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - - goto st540 + goto st29 + st141: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof141 + } + st_case_141: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 76: + goto st142 + case 92: + goto st140 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 + st142: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof142 + } + st_case_142: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 83: + goto st143 + case 92: + goto st140 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 + st143: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof143 + } + st_case_143: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 69: + goto st540 + case 92: + goto st140 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 st540: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof540 } st_case_540: -//line plugins/parsers/influx/machine.go:19098 switch ( m.data)[( m.p)] { - case 9: - goto tr753 + case 10: + goto tr803 case 11: - goto tr754 - case 12: - goto tr523 + goto tr827 + case 13: + goto tr805 case 32: - goto tr753 + goto tr826 case 34: - goto tr201 + goto tr89 case 44: - goto tr239 - case 61: - goto tr99 + goto tr828 case 92: - goto tr243 + goto st140 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr357 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr826 } - goto tr241 + goto st29 + st144: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof144 + } + st_case_144: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 92: + goto st140 + case 108: + goto st145 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 + st145: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof145 + } + st_case_145: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 92: + goto st140 + case 115: + goto st146 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 + st146: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof146 + } + st_case_146: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 92: + goto st140 + case 101: + goto st540 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 +tr248: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st541 st541: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof541 } st_case_541: +//line plugins/parsers/influx/machine.go:20619 switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr803 case 11: - goto tr751 - case 12: - goto tr520 + goto tr827 + case 13: + goto tr805 case 32: - goto tr750 + goto tr826 case 34: - goto tr205 + goto tr89 case 44: - goto tr239 - case 61: - goto tr99 + goto tr828 + case 82: + goto st147 case 92: - goto st126 + goto st140 + case 114: + goto st148 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st542 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr826 } - goto st124 + goto st29 + st147: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof147 + } + st_case_147: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 85: + goto st143 + case 92: + goto st140 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 + st148: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof148 + } + st_case_148: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr88 + case 13: + goto st6 + case 32: + goto tr87 + case 34: + goto tr89 + case 44: + goto tr90 + case 92: + goto st140 + case 117: + goto st146 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr87 + } + goto st29 +tr249: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st542 st542: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof542 } st_case_542: +//line plugins/parsers/influx/machine.go:20709 switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr803 case 11: - goto tr751 - case 12: - goto tr520 + goto tr827 + case 13: + goto tr805 case 32: - goto tr750 + goto tr826 case 34: - goto tr205 + goto tr89 case 44: - goto tr239 - case 61: - goto tr99 + goto tr828 case 92: - goto st126 + goto st140 + case 97: + goto st144 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st543 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr826 } - goto st124 + goto st29 +tr250: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st543 st543: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof543 } st_case_543: +//line plugins/parsers/influx/machine.go:20743 switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr803 case 11: - goto tr751 - case 12: - goto tr520 + goto tr827 + case 13: + goto tr805 case 32: - goto tr750 + goto tr826 case 34: - goto tr205 + goto tr89 case 44: - goto tr239 - case 61: - goto tr99 + goto tr828 case 92: - goto st126 + goto st140 + case 114: + goto st148 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st544 - } - case ( m.data)[( m.p)] >= 10: - goto tr362 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr826 } - goto st124 + goto st29 st544: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof544 } st_case_544: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st545 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st545: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof545 } st_case_545: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st546 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st546: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof546 } st_case_546: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st547 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st547: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof547 } st_case_547: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st548 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st548: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof548 } st_case_548: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st549 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st549: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof549 } st_case_549: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st550 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st550: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof550 } st_case_550: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st551 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st551: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof551 } st_case_551: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st552 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st552: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof552 } st_case_552: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st553 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st553: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof553 } st_case_553: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st554 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st554: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof554 } st_case_554: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st555 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st555: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof555 } st_case_555: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st556 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st556: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof556 } st_case_556: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st557 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st557: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof557 } st_case_557: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st558 } - case ( m.data)[( m.p)] >= 10: - goto tr362 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st124 + goto st40 st558: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof558 } st_case_558: switch ( m.data)[( m.p)] { - case 9: - goto tr750 + case 10: + goto tr600 case 11: - goto tr751 - case 12: - goto tr520 + goto tr628 + case 13: + goto tr602 case 32: - goto tr750 + goto tr627 case 34: - goto tr205 + goto tr126 case 44: - goto tr239 + goto tr90 case 61: - goto tr99 + goto tr127 case 92: - goto st126 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr362 - } - goto st124 - st159: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof159 - } - st_case_159: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr294 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 + goto st92 } switch { - case ( m.data)[( m.p)] < 43: - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] > 45: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st559 } - default: - goto st160 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st122 - st160: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof160 - } - st_case_160: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st559 - } - case ( m.data)[( m.p)] >= 10: - goto tr61 - } - goto st122 + goto st40 st559: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof559 } st_case_559: switch ( m.data)[( m.p)] { - case 9: - goto tr742 + case 10: + goto tr600 case 11: - goto tr743 - case 12: - goto tr566 + goto tr628 + case 13: + goto tr602 case 32: - goto tr742 + goto tr627 case 34: - goto tr189 + goto tr126 case 44: - goto tr744 + goto tr90 case 61: - goto st7 + goto tr127 case 92: - goto st127 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st559 + goto st560 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st122 + goto st40 st560: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof560 } st_case_560: switch ( m.data)[( m.p)] { - case 9: - goto tr742 + case 10: + goto tr600 case 11: - goto tr743 - case 12: - goto tr566 + goto tr628 + case 13: + goto tr602 case 32: - goto tr742 + goto tr627 case 34: - goto tr189 + goto tr126 case 44: - goto tr744 - case 46: - goto st536 + goto tr90 case 61: - goto st7 - case 69: - goto st159 + goto tr127 case 92: - goto st127 - case 101: - goto st159 - case 105: - goto st562 + goto st92 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st561 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr627 } - goto st122 + goto st40 st561: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof561 } st_case_561: switch ( m.data)[( m.p)] { - case 9: - goto tr742 + case 10: + goto tr600 case 11: - goto tr743 - case 12: - goto tr566 + goto tr628 + case 13: + goto tr602 case 32: - goto tr742 + goto tr627 case 34: - goto tr189 + goto tr126 case 44: - goto tr744 - case 46: - goto st536 + goto tr90 case 61: - goto st7 - case 69: - goto st159 + goto tr127 case 92: - goto st127 - case 101: - goto st159 + goto st92 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr627 + } + goto st40 +tr211: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st149 + st149: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof149 + } + st_case_149: +//line plugins/parsers/influx/machine.go:21348 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 46: + goto st150 + case 48: + goto st586 + case 92: + goto st155 } switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st561 + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st589 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr178 } - goto st122 + goto st53 +tr212: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st150 + st150: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof150 + } + st_case_150: +//line plugins/parsers/influx/machine.go:21389 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 92: + goto st155 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st562 + } + case ( m.data)[( m.p)] >= 9: + goto tr178 + } + goto st53 st562: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof562 } st_case_562: switch ( m.data)[( m.p)] { - case 9: - goto tr774 + case 10: + goto tr532 case 11: - goto tr775 - case 12: - goto tr572 + goto tr851 + case 13: + goto tr533 case 32: - goto tr774 + goto tr850 case 34: - goto tr189 + goto tr89 case 44: - goto tr776 - case 61: - goto st7 + goto tr852 + case 69: + goto st153 case 92: - goto st127 + goto st155 + case 101: + goto st153 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr389 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st562 + } + case ( m.data)[( m.p)] >= 9: + goto tr850 } - goto st122 + goto st53 +tr851: + ( m.cs) = 563 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr883: + ( m.cs) = 563 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr887: + ( m.cs) = 563 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr892: + ( m.cs) = 563 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st563: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof563 } st_case_563: +//line plugins/parsers/influx/machine.go:21546 switch ( m.data)[( m.p)] { - case 9: - goto tr742 + case 10: + goto tr273 case 11: - goto tr743 - case 12: - goto tr566 + goto tr855 + case 13: + goto st102 case 32: - goto tr742 + goto tr854 case 34: - goto tr189 + goto tr122 case 44: - goto tr744 - case 46: - goto st536 + goto tr180 + case 45: + goto tr856 case 61: - goto st7 - case 69: - goto st159 + goto st53 case 92: - goto st127 - case 101: - goto st159 - case 105: - goto st562 + goto tr184 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st563 + goto tr857 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr854 } - goto st122 -tr269: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr182 +tr855: + ( m.cs) = 564 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st564 + goto _again st564: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof564 } st_case_564: -//line plugins/parsers/influx/machine.go:19948 +//line plugins/parsers/influx/machine.go:21598 switch ( m.data)[( m.p)] { - case 9: - goto tr742 + case 10: + goto tr273 case 11: - goto tr743 - case 12: - goto tr566 + goto tr855 + case 13: + goto st102 case 32: - goto tr742 + goto tr854 case 34: - goto tr189 + goto tr122 case 44: - goto tr744 - case 46: - goto st536 + goto tr180 + case 45: + goto tr856 case 61: - goto st7 - case 69: - goto st159 + goto tr187 case 92: - goto st127 - case 101: - goto st159 - case 105: - goto st562 - case 117: - goto st565 + goto tr184 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st561 + goto tr857 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr854 } - goto st122 + goto tr182 +tr856: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st151 + st151: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof151 + } + st_case_151: +//line plugins/parsers/influx/machine.go:21639 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr186 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr126 + case 44: + goto tr180 + case 61: + goto tr187 + case 92: + goto st152 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st565 + } + case ( m.data)[( m.p)] >= 9: + goto tr178 + } + goto st55 +tr857: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st565 st565: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof565 } st_case_565: +//line plugins/parsers/influx/machine.go:21678 switch ( m.data)[( m.p)] { - case 9: - goto tr778 + case 10: + goto tr674 case 11: - goto tr779 - case 12: - goto tr576 + goto tr859 + case 13: + goto tr676 case 32: - goto tr778 + goto tr858 case 34: - goto tr189 + goto tr126 case 44: - goto tr780 + goto tr180 case 61: - goto st7 + goto tr187 case 92: - goto st127 + goto st152 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr393 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st567 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st122 -tr270: -//line plugins/parsers/influx/machine.go.rl:18 + goto st55 +tr862: + ( m.cs) = 566 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st566 + goto _again +tr859: + ( m.cs) = 566 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st566: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof566 } st_case_566: -//line plugins/parsers/influx/machine.go:20024 +//line plugins/parsers/influx/machine.go:21751 switch ( m.data)[( m.p)] { - case 9: - goto tr742 + case 10: + goto tr273 case 11: - goto tr743 - case 12: - goto tr566 + goto tr862 + case 13: + goto st102 case 32: - goto tr742 + goto tr861 case 34: - goto tr189 + goto tr122 case 44: - goto tr744 - case 46: - goto st536 + goto tr180 case 61: - goto st7 - case 69: - goto st159 + goto tr187 case 92: - goto st127 - case 101: - goto st159 - case 105: - goto st562 - case 117: - goto st565 + goto tr184 } - switch { - case ( m.data)[( m.p)] > 13: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st566 - } - case ( m.data)[( m.p)] >= 10: - goto tr383 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr861 } - goto st122 -tr271: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr182 +tr184: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st567 + goto st152 + st152: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof152 + } + st_case_152: +//line plugins/parsers/influx/machine.go:21785 + switch ( m.data)[( m.p)] { + case 34: + goto st55 + case 92: + goto st55 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st10 st567: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof567 } st_case_567: -//line plugins/parsers/influx/machine.go:20073 switch ( m.data)[( m.p)] { - case 9: - goto tr782 + case 10: + goto tr674 case 11: - goto tr783 - case 12: - goto tr580 + goto tr859 + case 13: + goto tr676 case 32: - goto tr782 + goto tr858 case 34: - goto tr189 + goto tr126 case 44: - goto tr784 + goto tr180 case 61: - goto st7 - case 65: - goto st161 + goto tr187 case 92: - goto st127 - case 97: - goto st164 + goto st152 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st568 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st122 - st161: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof161 - } - st_case_161: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 76: - goto st162 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st162: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof162 - } - st_case_162: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 83: - goto st163 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st163: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof163 - } - st_case_163: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 69: - goto st568 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 + goto st55 st568: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof568 } st_case_568: switch ( m.data)[( m.p)] { - case 9: - goto tr782 + case 10: + goto tr674 case 11: - goto tr783 - case 12: - goto tr580 + goto tr859 + case 13: + goto tr676 case 32: - goto tr782 + goto tr858 case 34: - goto tr189 + goto tr126 case 44: - goto tr784 + goto tr180 case 61: - goto st7 + goto tr187 case 92: - goto st127 + goto st152 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st569 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st122 - st164: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof164 - } - st_case_164: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - case 108: - goto st165 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st165: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof165 - } - st_case_165: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - case 115: - goto st166 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st166: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof166 - } - st_case_166: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - case 101: - goto st568 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 -tr272: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st569 + goto st55 st569: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof569 } st_case_569: -//line plugins/parsers/influx/machine.go:20312 switch ( m.data)[( m.p)] { - case 9: - goto tr782 + case 10: + goto tr674 case 11: - goto tr783 - case 12: - goto tr580 + goto tr859 + case 13: + goto tr676 case 32: - goto tr782 + goto tr858 case 34: - goto tr189 + goto tr126 case 44: - goto tr784 + goto tr180 case 61: - goto st7 - case 82: - goto st167 + goto tr187 case 92: - goto st127 - case 114: - goto st168 + goto st152 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st570 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st122 - st167: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof167 - } - st_case_167: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 85: - goto st163 - case 92: - goto st127 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 - st168: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof168 - } - st_case_168: - switch ( m.data)[( m.p)] { - case 9: - goto tr237 - case 11: - goto tr238 - case 12: - goto tr60 - case 32: - goto tr237 - case 34: - goto tr189 - case 44: - goto tr239 - case 61: - goto st7 - case 92: - goto st127 - case 117: - goto st166 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - goto st122 -tr273: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st570 + goto st55 st570: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof570 } st_case_570: -//line plugins/parsers/influx/machine.go:20408 switch ( m.data)[( m.p)] { - case 9: - goto tr782 + case 10: + goto tr674 case 11: - goto tr783 - case 12: - goto tr580 + goto tr859 + case 13: + goto tr676 case 32: - goto tr782 + goto tr858 case 34: - goto tr189 + goto tr126 case 44: - goto tr784 + goto tr180 case 61: - goto st7 + goto tr187 case 92: - goto st127 - case 97: - goto st164 + goto st152 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st571 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st122 -tr274: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st571 + goto st55 st571: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof571 } st_case_571: -//line plugins/parsers/influx/machine.go:20444 switch ( m.data)[( m.p)] { - case 9: - goto tr782 + case 10: + goto tr674 case 11: - goto tr783 - case 12: - goto tr580 + goto tr859 + case 13: + goto tr676 case 32: - goto tr782 + goto tr858 case 34: - goto tr189 + goto tr126 case 44: - goto tr784 + goto tr180 case 61: - goto st7 + goto tr187 case 92: - goto st127 - case 114: - goto st168 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st122 -tr260: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st169 - st169: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof169 - } - st_case_169: -//line plugins/parsers/influx/machine.go:20480 - switch ( m.data)[( m.p)] { - case 34: - goto st135 - case 92: - goto st135 + goto st152 } switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr61 - } - case ( m.data)[( m.p)] >= 9: - goto tr61 - } - goto st86 -tr85: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st170 - st170: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof170 - } - st_case_170: -//line plugins/parsers/influx/machine.go:20507 - switch ( m.data)[( m.p)] { - case 34: - goto st40 - case 92: - goto st40 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] >= 9: - goto tr5 - } - goto st2 -tr248: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st171 - st171: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof171 - } - st_case_171: -//line plugins/parsers/influx/machine.go:20534 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 46: - goto st172 - case 48: - goto st576 - case 92: - goto st170 - } - switch { - case ( m.data)[( m.p)] > 13: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st579 - } - case ( m.data)[( m.p)] >= 10: - goto tr5 - } - goto st40 -tr249: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st172 - st172: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof172 - } - st_case_172: -//line plugins/parsers/influx/machine.go:20575 - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st170 - } - switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st572 } - case ( m.data)[( m.p)] >= 10: - goto tr5 + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 + goto st55 st572: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof572 } st_case_572: switch ( m.data)[( m.p)] { - case 9: - goto tr789 + case 10: + goto tr674 case 11: - goto tr790 - case 12: - goto tr791 + goto tr859 + case 13: + goto tr676 case 32: - goto tr789 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr792 - case 69: - goto st173 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 - case 101: - goto st173 + goto st152 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st572 + goto st573 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 - st173: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof173 - } - st_case_173: - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr318 - case 44: - goto tr90 - case 92: - goto st170 - } - switch { - case ( m.data)[( m.p)] < 43: - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 - } - default: - goto st174 - } - goto st40 -tr318: -//line plugins/parsers/influx/machine.go.rl:104 - - m.handler.AddString(key, m.text()) - - goto st573 + goto st55 st573: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof573 } st_case_573: -//line plugins/parsers/influx/machine.go:20680 switch ( m.data)[( m.p)] { case 10: - goto tr357 + goto tr674 case 11: - goto tr642 + goto tr859 case 13: - goto tr357 + goto tr676 case 32: - goto tr482 + goto tr858 + case 34: + goto tr126 case 44: - goto tr484 + goto tr180 + case 61: + goto tr187 case 92: - goto st133 + goto st152 } switch { case ( m.data)[( m.p)] > 12: @@ -20697,9 +22018,9 @@ tr318: goto st574 } case ( m.data)[( m.p)] >= 9: - goto tr482 + goto tr858 } - goto st2 + goto st55 st574: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof574 @@ -20707,798 +22028,541 @@ tr318: st_case_574: switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr674 case 11: - goto tr794 + goto tr859 case 13: - goto tr383 + goto tr676 case 32: - goto tr791 + goto tr858 + case 34: + goto tr126 case 44: - goto tr795 + goto tr180 + case 61: + goto tr187 case 92: - goto st133 + goto st152 } switch { case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st574 - } - case ( m.data)[( m.p)] >= 9: - goto tr791 - } - goto st2 - st174: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof174 - } - st_case_174: - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st170 - } - switch { - case ( m.data)[( m.p)] > 13: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st575 } - case ( m.data)[( m.p)] >= 10: - goto tr5 + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 + goto st55 st575: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof575 } st_case_575: switch ( m.data)[( m.p)] { - case 9: - goto tr789 + case 10: + goto tr674 case 11: - goto tr790 - case 12: - goto tr791 + goto tr859 + case 13: + goto tr676 case 32: - goto tr789 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr792 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 + goto st152 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st575 + goto st576 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 + goto st55 st576: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof576 } st_case_576: switch ( m.data)[( m.p)] { - case 9: - goto tr789 + case 10: + goto tr674 case 11: - goto tr790 - case 12: - goto tr791 + goto tr859 + case 13: + goto tr676 case 32: - goto tr789 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr792 - case 46: - goto st572 - case 69: - goto st173 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 - case 101: - goto st173 - case 105: - goto st578 + goto st152 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { goto st577 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 + goto st55 st577: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof577 } st_case_577: switch ( m.data)[( m.p)] { - case 9: - goto tr789 + case 10: + goto tr674 case 11: - goto tr790 - case 12: - goto tr791 + goto tr859 + case 13: + goto tr676 case 32: - goto tr789 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr792 - case 46: - goto st572 - case 69: - goto st173 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 - case 101: - goto st173 + goto st152 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 + goto st578 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 + goto st55 st578: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof578 } st_case_578: switch ( m.data)[( m.p)] { - case 9: - goto tr798 + case 10: + goto tr674 case 11: - goto tr799 - case 12: - goto tr800 + goto tr859 + case 13: + goto tr676 case 32: - goto tr798 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr801 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 + goto st152 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr389 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st579 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 + goto st55 st579: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof579 } st_case_579: switch ( m.data)[( m.p)] { - case 9: - goto tr789 + case 10: + goto tr674 case 11: - goto tr790 - case 12: - goto tr791 + goto tr859 + case 13: + goto tr676 case 32: - goto tr789 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr792 - case 46: - goto st572 - case 69: - goto st173 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 - case 101: - goto st173 - case 105: - goto st578 + goto st152 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st579 + goto st580 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 -tr250: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st580 + goto st55 st580: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof580 } st_case_580: -//line plugins/parsers/influx/machine.go:20940 switch ( m.data)[( m.p)] { - case 9: - goto tr789 + case 10: + goto tr674 case 11: - goto tr790 - case 12: - goto tr791 + goto tr859 + case 13: + goto tr676 case 32: - goto tr789 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr792 - case 46: - goto st572 - case 69: - goto st173 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 - case 101: - goto st173 - case 105: - goto st578 - case 117: - goto st581 + goto st152 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st577 + goto st581 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 + goto st55 st581: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof581 } st_case_581: switch ( m.data)[( m.p)] { - case 9: - goto tr803 + case 10: + goto tr674 case 11: - goto tr804 - case 12: - goto tr805 + goto tr859 + case 13: + goto tr676 case 32: - goto tr803 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr806 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 + goto st152 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr393 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st582 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 -tr251: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st582 + goto st55 st582: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof582 } st_case_582: -//line plugins/parsers/influx/machine.go:21012 switch ( m.data)[( m.p)] { - case 9: - goto tr789 + case 10: + goto tr674 case 11: - goto tr790 - case 12: - goto tr791 + goto tr859 + case 13: + goto tr676 case 32: - goto tr789 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr792 - case 46: - goto st572 - case 69: - goto st173 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 - case 101: - goto st173 - case 105: - goto st578 - case 117: - goto st581 + goto st152 } switch { - case ( m.data)[( m.p)] > 13: + case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st582 + goto st583 } - case ( m.data)[( m.p)] >= 10: - goto tr383 + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 -tr252: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st583 + goto st55 st583: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof583 } st_case_583: -//line plugins/parsers/influx/machine.go:21059 switch ( m.data)[( m.p)] { - case 9: - goto tr808 + case 10: + goto tr674 case 11: - goto tr809 - case 12: - goto tr810 + goto tr859 + case 13: + goto tr676 case 32: - goto tr808 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr811 - case 65: - goto st175 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 - case 97: - goto st178 + goto st152 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st584 + } + case ( m.data)[( m.p)] >= 9: + goto tr858 } - goto st40 - st175: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof175 - } - st_case_175: - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 76: - goto st176 - case 92: - goto st170 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 - st176: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof176 - } - st_case_176: - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 83: - goto st177 - case 92: - goto st170 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 - st177: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof177 - } - st_case_177: - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 69: - goto st584 - case 92: - goto st170 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 + goto st55 st584: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof584 } st_case_584: switch ( m.data)[( m.p)] { - case 9: - goto tr808 + case 10: + goto tr674 case 11: - goto tr809 - case 12: - goto tr810 + goto tr859 + case 13: + goto tr676 case 32: - goto tr808 + goto tr858 case 34: - goto tr89 + goto tr126 case 44: - goto tr811 + goto tr180 + case 61: + goto tr187 case 92: - goto st170 + goto st152 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr858 } - goto st40 - st178: + goto st55 + st153: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof178 + goto _test_eof153 } - st_case_178: + st_case_153: switch ( m.data)[( m.p)] { - case 9: - goto tr87 + case 10: + goto tr28 case 11: - goto tr88 - case 12: - goto tr4 + goto tr179 + case 13: + goto st6 case 32: - goto tr87 + goto tr178 case 34: - goto tr89 + goto tr315 case 44: - goto tr90 + goto tr180 case 92: - goto st170 - case 108: - goto st179 + goto st155 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st585 + } + default: + goto st154 } - goto st40 - st179: + goto st53 + st154: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof179 + goto _test_eof154 } - st_case_179: + st_case_154: switch ( m.data)[( m.p)] { - case 9: - goto tr87 + case 10: + goto tr28 case 11: - goto tr88 - case 12: - goto tr4 + goto tr179 + case 13: + goto st6 case 32: - goto tr87 + goto tr178 case 34: goto tr89 case 44: - goto tr90 + goto tr180 case 92: - goto st170 - case 115: - goto st180 + goto st155 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st585 + } + case ( m.data)[( m.p)] >= 9: + goto tr178 } - goto st40 - st180: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof180 - } - st_case_180: - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st170 - case 101: - goto st584 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 -tr253: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st585 + goto st53 st585: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof585 } st_case_585: -//line plugins/parsers/influx/machine.go:21282 switch ( m.data)[( m.p)] { - case 9: - goto tr808 + case 10: + goto tr532 case 11: - goto tr809 - case 12: - goto tr810 + goto tr851 + case 13: + goto tr533 case 32: - goto tr808 + goto tr850 case 34: goto tr89 case 44: - goto tr811 - case 82: - goto st181 + goto tr852 case 92: - goto st170 - case 114: - goto st182 + goto st155 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st585 + } + case ( m.data)[( m.p)] >= 9: + goto tr850 } - goto st40 - st181: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof181 - } - st_case_181: - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 85: - goto st177 - case 92: - goto st170 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 - st182: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof182 - } - st_case_182: - switch ( m.data)[( m.p)] { - case 9: - goto tr87 - case 11: - goto tr88 - case 12: - goto tr4 - case 32: - goto tr87 - case 34: - goto tr89 - case 44: - goto tr90 - case 92: - goto st170 - case 117: - goto st180 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr5 - } - goto st40 -tr254: -//line plugins/parsers/influx/machine.go.rl:18 + goto st53 +tr338: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st586 + goto st155 + st155: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof155 + } + st_case_155: +//line plugins/parsers/influx/machine.go:22477 + switch ( m.data)[( m.p)] { + case 34: + goto st53 + case 92: + goto st53 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st1 st586: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof586 } st_case_586: -//line plugins/parsers/influx/machine.go:21372 switch ( m.data)[( m.p)] { - case 9: - goto tr808 + case 10: + goto tr532 case 11: - goto tr809 - case 12: - goto tr810 + goto tr851 + case 13: + goto tr533 case 32: - goto tr808 + goto tr850 case 34: goto tr89 case 44: - goto tr811 + goto tr852 + case 46: + goto st562 + case 69: + goto st153 case 92: - goto st170 - case 97: - goto st178 + goto st155 + case 101: + goto st153 + case 105: + goto st588 } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st587 + } + case ( m.data)[( m.p)] >= 9: + goto tr850 } - goto st40 -tr255: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st587 + goto st53 st587: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof587 } st_case_587: -//line plugins/parsers/influx/machine.go:21406 switch ( m.data)[( m.p)] { - case 9: - goto tr808 + case 10: + goto tr532 case 11: - goto tr809 - case 12: - goto tr810 + goto tr851 + case 13: + goto tr533 case 32: - goto tr808 + goto tr850 case 34: goto tr89 case 44: - goto tr811 - case 92: - goto st170 - case 114: - goto st182 - } - if 10 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr397 - } - goto st40 -tr72: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st183 - st183: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof183 - } - st_case_183: -//line plugins/parsers/influx/machine.go:21440 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 + goto tr852 case 46: - goto st184 - case 48: - goto st589 + goto st562 + case 69: + goto st153 case 92: - goto st133 - } - switch { - case ( m.data)[( m.p)] > 12: - if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st592 - } - case ( m.data)[( m.p)] >= 9: - goto tr4 - } - goto st2 -tr73: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st184 - st184: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof184 - } - st_case_184: -//line plugins/parsers/influx/machine.go:21479 - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 92: - goto st133 + goto st155 + case 101: + goto st153 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st588 + goto st587 } case ( m.data)[( m.p)] >= 9: - goto tr4 + goto tr850 } - goto st2 + goto st53 st588: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof588 @@ -21506,93 +22570,24 @@ tr73: st_case_588: switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr737 case 11: - goto tr794 + goto tr883 case 13: - goto tr383 + goto tr739 case 32: - goto tr791 - case 44: - goto tr795 - case 69: - goto st185 - case 92: - goto st133 - case 101: - goto st185 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st588 - } - case ( m.data)[( m.p)] >= 9: - goto tr791 - } - goto st2 - st185: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof185 - } - st_case_185: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 + goto tr882 case 34: - goto st186 + goto tr89 case 44: - goto tr7 + goto tr884 case 92: - goto st133 + goto st155 } - switch { - case ( m.data)[( m.p)] < 43: - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - case ( m.data)[( m.p)] > 45: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st574 - } - default: - goto st186 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr882 } - goto st2 - st186: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof186 - } - st_case_186: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 92: - goto st133 - } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st574 - } - case ( m.data)[( m.p)] >= 9: - goto tr4 - } - goto st2 + goto st53 st589: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof589 @@ -21600,69 +22595,84 @@ tr73: st_case_589: switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr532 case 11: - goto tr794 + goto tr851 case 13: - goto tr383 + goto tr533 case 32: - goto tr791 + goto tr850 + case 34: + goto tr89 case 44: - goto tr795 + goto tr852 case 46: - goto st588 + goto st562 case 69: - goto st185 + goto st153 case 92: - goto st133 + goto st155 case 101: - goto st185 + goto st153 case 105: - goto st591 + goto st588 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st590 + goto st589 } case ( m.data)[( m.p)] >= 9: - goto tr791 + goto tr850 } - goto st2 + goto st53 +tr213: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st590 st590: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof590 } st_case_590: +//line plugins/parsers/influx/machine.go:22641 switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr532 case 11: - goto tr794 + goto tr851 case 13: - goto tr383 + goto tr533 case 32: - goto tr791 + goto tr850 + case 34: + goto tr89 case 44: - goto tr795 + goto tr852 case 46: - goto st588 + goto st562 case 69: - goto st185 + goto st153 case 92: - goto st133 + goto st155 case 101: - goto st185 + goto st153 + case 105: + goto st588 + case 117: + goto st591 } switch { case ( m.data)[( m.p)] > 12: if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st590 + goto st587 } case ( m.data)[( m.p)] >= 9: - goto tr791 + goto tr850 } - goto st2 + goto st53 st591: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof591 @@ -21670,47 +22680,60 @@ tr73: st_case_591: switch ( m.data)[( m.p)] { case 10: - goto tr389 + goto tr743 case 11: - goto tr819 + goto tr887 case 13: - goto tr389 + goto tr745 case 32: - goto tr800 + goto tr886 + case 34: + goto tr89 case 44: - goto tr820 + goto tr888 case 92: - goto st133 + goto st155 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr800 + goto tr886 } - goto st2 + goto st53 +tr214: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st592 st592: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof592 } st_case_592: +//line plugins/parsers/influx/machine.go:22713 switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr532 case 11: - goto tr794 + goto tr851 case 13: - goto tr383 + goto tr533 case 32: - goto tr791 + goto tr850 + case 34: + goto tr89 case 44: - goto tr795 + goto tr852 case 46: - goto st588 + goto st562 case 69: - goto st185 + goto st153 case 92: - goto st133 + goto st155 case 101: - goto st185 + goto st153 case 105: + goto st588 + case 117: goto st591 } switch { @@ -21719,11 +22742,11 @@ tr73: goto st592 } case ( m.data)[( m.p)] >= 9: - goto tr791 + goto tr850 } - goto st2 -tr74: -//line plugins/parsers/influx/machine.go.rl:18 + goto st53 +tr215: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -21733,40 +22756,112 @@ tr74: goto _test_eof593 } st_case_593: -//line plugins/parsers/influx/machine.go:21737 +//line plugins/parsers/influx/machine.go:22760 switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr891 case 11: - goto tr794 + goto tr892 case 13: - goto tr383 + goto tr751 case 32: - goto tr791 + goto tr890 + case 34: + goto tr89 case 44: - goto tr795 - case 46: - goto st588 - case 69: - goto st185 + goto tr893 + case 65: + goto st156 case 92: - goto st133 - case 101: - goto st185 - case 105: - goto st591 - case 117: + goto st155 + case 97: + goto st159 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr890 + } + goto st53 + st156: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof156 + } + st_case_156: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 76: + goto st157 + case 92: + goto st155 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 + st157: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof157 + } + st_case_157: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 83: + goto st158 + case 92: + goto st155 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 + st158: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof158 + } + st_case_158: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 69: goto st594 + case 92: + goto st155 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st590 - } - case ( m.data)[( m.p)] >= 9: - goto tr791 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 } - goto st2 + goto st53 st594: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof594 @@ -21774,24 +22869,107 @@ tr74: st_case_594: switch ( m.data)[( m.p)] { case 10: - goto tr393 + goto tr891 case 11: - goto tr822 + goto tr892 case 13: - goto tr393 + goto tr751 case 32: - goto tr805 + goto tr890 + case 34: + goto tr89 case 44: - goto tr823 + goto tr893 case 92: - goto st133 + goto st155 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr805 + goto tr890 } - goto st2 -tr75: -//line plugins/parsers/influx/machine.go.rl:18 + goto st53 + st159: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof159 + } + st_case_159: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 92: + goto st155 + case 108: + goto st160 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 + st160: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof160 + } + st_case_160: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 92: + goto st155 + case 115: + goto st161 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 + st161: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof161 + } + st_case_161: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 92: + goto st155 + case 101: + goto st594 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 +tr216: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -21801,42 +22979,87 @@ tr75: goto _test_eof595 } st_case_595: -//line plugins/parsers/influx/machine.go:21805 +//line plugins/parsers/influx/machine.go:22983 switch ( m.data)[( m.p)] { case 10: - goto tr383 + goto tr891 case 11: - goto tr794 + goto tr892 case 13: - goto tr383 + goto tr751 case 32: - goto tr791 + goto tr890 + case 34: + goto tr89 case 44: - goto tr795 - case 46: - goto st588 - case 69: - goto st185 + goto tr893 + case 82: + goto st162 case 92: - goto st133 - case 101: - goto st185 - case 105: - goto st591 + goto st155 + case 114: + goto st163 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr890 + } + goto st53 + st162: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof162 + } + st_case_162: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 85: + goto st158 + case 92: + goto st155 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 + } + goto st53 + st163: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof163 + } + st_case_163: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr179 + case 13: + goto st6 + case 32: + goto tr178 + case 34: + goto tr89 + case 44: + goto tr180 + case 92: + goto st155 case 117: - goto st594 + goto st161 } - switch { - case ( m.data)[( m.p)] > 12: - if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { - goto st595 - } - case ( m.data)[( m.p)] >= 9: - goto tr791 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr178 } - goto st2 -tr76: -//line plugins/parsers/influx/machine.go.rl:18 + goto st53 +tr217: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -21846,204 +23069,289 @@ tr76: goto _test_eof596 } st_case_596: -//line plugins/parsers/influx/machine.go:21850 +//line plugins/parsers/influx/machine.go:23073 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto tr891 case 11: - goto tr825 + goto tr892 case 13: - goto tr397 + goto tr751 case 32: - goto tr810 + goto tr890 + case 34: + goto tr89 case 44: - goto tr826 - case 65: - goto st187 + goto tr893 case 92: - goto st133 + goto st155 case 97: - goto st190 + goto st159 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr810 + goto tr890 } - goto st2 - st187: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof187 - } - st_case_187: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 76: - goto st188 - case 92: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto st2 - st188: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof188 - } - st_case_188: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 83: - goto st189 - case 92: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto st2 - st189: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof189 - } - st_case_189: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 69: - goto st597 - case 92: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto st2 + goto st53 +tr218: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st597 st597: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof597 } st_case_597: +//line plugins/parsers/influx/machine.go:23107 switch ( m.data)[( m.p)] { case 10: - goto tr397 + goto tr891 case 11: - goto tr825 + goto tr892 case 13: - goto tr397 + goto tr751 case 32: - goto tr810 + goto tr890 + case 34: + goto tr89 case 44: - goto tr826 + goto tr893 case 92: - goto st133 + goto st155 + case 114: + goto st163 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr810 + goto tr890 } - goto st2 - st190: + goto st53 + st164: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof190 + goto _test_eof164 } - st_case_190: + st_case_164: switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr28 case 11: - goto tr6 + goto tr337 case 13: - goto tr5 + goto st6 case 32: - goto tr4 + goto st164 + case 34: + goto tr116 + case 35: + goto st6 case 44: - goto tr7 + goto st6 case 92: - goto st133 - case 108: - goto st191 + goto tr338 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + goto st164 } - goto st2 - st191: + goto tr335 +tr337: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st165 + st165: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof191 + goto _test_eof165 } - st_case_191: + st_case_165: +//line plugins/parsers/influx/machine.go:23168 switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr28 case 11: - goto tr6 + goto tr340 case 13: - goto tr5 + goto st6 case 32: - goto tr4 + goto tr339 + case 34: + goto tr83 + case 35: + goto st53 case 44: - goto tr7 + goto tr180 case 92: - goto st133 - case 115: - goto st192 + goto tr338 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + goto tr339 } - goto st2 - st192: + goto tr335 +tr339: + ( m.cs) = 166 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st166: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof192 + goto _test_eof166 } - st_case_192: + st_case_166: +//line plugins/parsers/influx/machine.go:23209 switch ( m.data)[( m.p)] { case 10: - goto tr5 + goto tr28 case 11: - goto tr6 + goto tr342 case 13: - goto tr5 + goto st6 case 32: - goto tr4 + goto st166 + case 34: + goto tr122 + case 35: + goto tr158 case 44: - goto tr7 + goto st6 + case 61: + goto tr335 case 92: - goto st133 - case 101: - goto st597 + goto tr184 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 + goto st166 } - goto st2 -tr77: -//line plugins/parsers/influx/machine.go.rl:18 + goto tr182 +tr342: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st167 +tr343: + ( m.cs) = 167 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st167: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof167 + } + st_case_167: +//line plugins/parsers/influx/machine.go:23262 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr343 + case 13: + goto st6 + case 32: + goto tr339 + case 34: + goto tr122 + case 44: + goto tr180 + case 61: + goto tr344 + case 92: + goto tr184 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr339 + } + goto tr182 +tr340: + ( m.cs) = 168 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st168: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof168 + } + st_case_168: +//line plugins/parsers/influx/machine.go:23307 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr343 + case 13: + goto st6 + case 32: + goto tr339 + case 34: + goto tr122 + case 44: + goto tr180 + case 61: + goto tr335 + case 92: + goto tr184 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr339 + } + goto tr182 +tr538: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st169 + st169: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof169 + } + st_case_169: +//line plugins/parsers/influx/machine.go:23341 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st598 + } + goto st6 +tr539: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p @@ -22053,143 +23361,1791 @@ tr77: goto _test_eof598 } st_case_598: -//line plugins/parsers/influx/machine.go:22057 +//line plugins/parsers/influx/machine.go:23365 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 11: - goto tr825 + goto tr674 case 13: - goto tr397 + goto tr676 case 32: - goto tr810 - case 44: - goto tr826 - case 82: - goto st193 + goto tr673 + case 34: + goto tr29 case 92: - goto st133 - case 114: - goto st194 + goto st73 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr810 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st599 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st2 - st193: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof193 - } - st_case_193: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 85: - goto st189 - case 92: - goto st133 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto st2 - st194: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof194 - } - st_case_194: - switch ( m.data)[( m.p)] { - case 10: - goto tr5 - case 11: - goto tr6 - case 13: - goto tr5 - case 32: - goto tr4 - case 44: - goto tr7 - case 92: - goto st133 - case 117: - goto st192 - } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr4 - } - goto st2 -tr78: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st599 + goto st6 st599: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof599 } st_case_599: -//line plugins/parsers/influx/machine.go:22141 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 11: - goto tr825 + goto tr674 case 13: - goto tr397 + goto tr676 case 32: - goto tr810 - case 44: - goto tr826 + goto tr673 + case 34: + goto tr29 case 92: - goto st133 - case 97: - goto st190 + goto st73 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr810 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st600 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 } - goto st2 -tr79: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st600 + goto st6 st600: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof600 } st_case_600: -//line plugins/parsers/influx/machine.go:22173 switch ( m.data)[( m.p)] { case 10: - goto tr397 - case 11: - goto tr825 + goto tr674 case 13: - goto tr397 + goto tr676 case 32: - goto tr810 - case 44: - goto tr826 + goto tr673 + case 34: + goto tr29 case 92: - goto st133 - case 114: - goto st194 + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st601 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st601: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof601 + } + st_case_601: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st602 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st602: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof602 + } + st_case_602: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st603 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st603: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof603 + } + st_case_603: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st604 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st604: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof604 + } + st_case_604: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st605 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st605: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof605 + } + st_case_605: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st606 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st606: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof606 + } + st_case_606: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st607 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st607: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof607 + } + st_case_607: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st608 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st608: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof608 + } + st_case_608: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st609 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st609: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof609 + } + st_case_609: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st610 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st610: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof610 + } + st_case_610: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st611 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st611: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof611 + } + st_case_611: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st612 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st612: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof612 + } + st_case_612: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st613 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st613: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof613 + } + st_case_613: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st614 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st614: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof614 + } + st_case_614: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st615 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st615: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof615 + } + st_case_615: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st616 + } + case ( m.data)[( m.p)] >= 9: + goto tr673 + } + goto st6 + st616: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof616 + } + st_case_616: + switch ( m.data)[( m.p)] { + case 10: + goto tr674 + case 13: + goto tr676 + case 32: + goto tr673 + case 34: + goto tr29 + case 92: + goto st73 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr810 + goto tr673 } - goto st2 + goto st6 +tr917: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st170 +tr534: + ( m.cs) = 170 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr924: + ( m.cs) = 170 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr926: + ( m.cs) = 170 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr929: + ( m.cs) = 170 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st170: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof170 + } + st_case_170: +//line plugins/parsers/influx/machine.go:23913 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr95 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr347 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr346 +tr346: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st171 + st171: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof171 + } + st_case_171: +//line plugins/parsers/influx/machine.go:23945 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr349 + case 92: + goto st183 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st171 +tr349: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st172 + st172: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof172 + } + st_case_172: +//line plugins/parsers/influx/machine.go:23977 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr351 + case 45: + goto tr165 + case 46: + goto tr166 + case 48: + goto tr167 + case 70: + goto tr352 + case 84: + goto tr353 + case 92: + goto st73 + case 102: + goto tr354 + case 116: + goto tr355 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr168 + } + goto st6 +tr351: + ( m.cs) = 617 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st617: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof617 + } + st_case_617: +//line plugins/parsers/influx/machine.go:24022 + switch ( m.data)[( m.p)] { + case 10: + goto tr665 + case 13: + goto tr667 + case 32: + goto tr916 + case 34: + goto tr25 + case 44: + goto tr917 + case 92: + goto tr26 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr916 + } + goto tr23 +tr167: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st618 + st618: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof618 + } + st_case_618: +//line plugins/parsers/influx/machine.go:24052 + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 13: + goto tr533 + case 32: + goto tr531 + case 34: + goto tr29 + case 44: + goto tr534 + case 46: + goto st325 + case 69: + goto st173 + case 92: + goto st73 + case 101: + goto st173 + case 105: + goto st623 + case 117: + goto st624 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st619 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 + } + goto st6 + st619: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof619 + } + st_case_619: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 13: + goto tr533 + case 32: + goto tr531 + case 34: + goto tr29 + case 44: + goto tr534 + case 46: + goto st325 + case 69: + goto st173 + case 92: + goto st73 + case 101: + goto st173 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st619 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 + } + goto st6 + st173: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof173 + } + st_case_173: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr356 + case 43: + goto st174 + case 45: + goto st174 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st622 + } + goto st6 +tr356: + ( m.cs) = 620 +//line plugins/parsers/influx/machine.go.rl:148 + + err = m.handler.AddString(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st620: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof620 + } + st_case_620: +//line plugins/parsers/influx/machine.go:24159 + switch ( m.data)[( m.p)] { + case 10: + goto tr101 + case 13: + goto st32 + case 32: + goto st271 + case 44: + goto st35 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st621 + } + case ( m.data)[( m.p)] >= 9: + goto st271 + } + goto tr103 + st621: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof621 + } + st_case_621: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 13: + goto tr732 + case 32: + goto tr921 + case 44: + goto tr922 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st621 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 + } + goto tr103 + st174: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof174 + } + st_case_174: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st622 + } + goto st6 + st622: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof622 + } + st_case_622: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 13: + goto tr533 + case 32: + goto tr531 + case 34: + goto tr29 + case 44: + goto tr534 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st622 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 + } + goto st6 + st623: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof623 + } + st_case_623: + switch ( m.data)[( m.p)] { + case 10: + goto tr737 + case 13: + goto tr739 + case 32: + goto tr923 + case 34: + goto tr29 + case 44: + goto tr924 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr923 + } + goto st6 + st624: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof624 + } + st_case_624: + switch ( m.data)[( m.p)] { + case 10: + goto tr743 + case 13: + goto tr745 + case 32: + goto tr925 + case 34: + goto tr29 + case 44: + goto tr926 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr925 + } + goto st6 +tr168: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st625 + st625: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof625 + } + st_case_625: +//line plugins/parsers/influx/machine.go:24305 + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 13: + goto tr533 + case 32: + goto tr531 + case 34: + goto tr29 + case 44: + goto tr534 + case 46: + goto st325 + case 69: + goto st173 + case 92: + goto st73 + case 101: + goto st173 + case 105: + goto st623 + case 117: + goto st624 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st625 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 + } + goto st6 +tr352: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st626 + st626: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof626 + } + st_case_626: +//line plugins/parsers/influx/machine.go:24350 + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 65: + goto st175 + case 92: + goto st73 + case 97: + goto st178 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 + } + goto st6 + st175: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof175 + } + st_case_175: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 76: + goto st176 + case 92: + goto st73 + } + goto st6 + st176: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof176 + } + st_case_176: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 83: + goto st177 + case 92: + goto st73 + } + goto st6 + st177: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof177 + } + st_case_177: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 69: + goto st627 + case 92: + goto st73 + } + goto st6 + st627: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof627 + } + st_case_627: + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 + } + goto st6 + st178: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof178 + } + st_case_178: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 108: + goto st179 + } + goto st6 + st179: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof179 + } + st_case_179: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 115: + goto st180 + } + goto st6 + st180: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof180 + } + st_case_180: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 101: + goto st627 + } + goto st6 +tr353: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st628 + st628: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof628 + } + st_case_628: +//line plugins/parsers/influx/machine.go:24503 + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 82: + goto st181 + case 92: + goto st73 + case 114: + goto st182 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 + } + goto st6 + st181: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof181 + } + st_case_181: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 85: + goto st177 + case 92: + goto st73 + } + goto st6 + st182: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof182 + } + st_case_182: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 117: + goto st180 + } + goto st6 +tr354: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st629 + st629: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof629 + } + st_case_629: +//line plugins/parsers/influx/machine.go:24569 + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 92: + goto st73 + case 97: + goto st178 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 + } + goto st6 +tr355: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st630 + st630: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof630 + } + st_case_630: +//line plugins/parsers/influx/machine.go:24601 + switch ( m.data)[( m.p)] { + case 10: + goto tr749 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 92: + goto st73 + case 114: + goto st182 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 + } + goto st6 +tr347: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st183 + st183: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof183 + } + st_case_183: +//line plugins/parsers/influx/machine.go:24633 + switch ( m.data)[( m.p)] { + case 34: + goto st171 + case 92: + goto st171 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st3 + st631: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof631 + } + st_case_631: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 13: + goto tr533 + case 32: + goto tr531 + case 34: + goto tr29 + case 44: + goto tr534 + case 46: + goto st325 + case 69: + goto st173 + case 92: + goto st73 + case 101: + goto st173 + case 105: + goto st623 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st619 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 + } + goto st6 + st632: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof632 + } + st_case_632: + switch ( m.data)[( m.p)] { + case 10: + goto tr532 + case 13: + goto tr533 + case 32: + goto tr531 + case 34: + goto tr29 + case 44: + goto tr534 + case 46: + goto st325 + case 69: + goto st173 + case 92: + goto st73 + case 101: + goto st173 + case 105: + goto st623 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st632 + } + case ( m.data)[( m.p)] >= 9: + goto tr531 + } + goto st6 +tr169: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st633 + st633: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof633 + } + st_case_633: +//line plugins/parsers/influx/machine.go:24732 + switch ( m.data)[( m.p)] { + case 10: + goto tr891 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 65: + goto st184 + case 92: + goto st73 + case 97: + goto st187 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 + } + goto st6 + st184: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof184 + } + st_case_184: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 76: + goto st185 + case 92: + goto st73 + } + goto st6 + st185: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof185 + } + st_case_185: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 83: + goto st186 + case 92: + goto st73 + } + goto st6 + st186: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof186 + } + st_case_186: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 69: + goto st634 + case 92: + goto st73 + } + goto st6 + st634: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof634 + } + st_case_634: + switch ( m.data)[( m.p)] { + case 10: + goto tr891 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 + } + goto st6 + st187: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof187 + } + st_case_187: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 108: + goto st188 + } + goto st6 + st188: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof188 + } + st_case_188: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 115: + goto st189 + } + goto st6 + st189: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof189 + } + st_case_189: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 101: + goto st634 + } + goto st6 +tr170: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st635 + st635: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof635 + } + st_case_635: +//line plugins/parsers/influx/machine.go:24885 + switch ( m.data)[( m.p)] { + case 10: + goto tr891 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 82: + goto st190 + case 92: + goto st73 + case 114: + goto st191 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 + } + goto st6 + st190: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof190 + } + st_case_190: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 85: + goto st186 + case 92: + goto st73 + } + goto st6 + st191: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof191 + } + st_case_191: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 117: + goto st189 + } + goto st6 +tr171: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st636 + st636: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof636 + } + st_case_636: +//line plugins/parsers/influx/machine.go:24951 + switch ( m.data)[( m.p)] { + case 10: + goto tr891 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 92: + goto st73 + case 97: + goto st187 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 + } + goto st6 +tr172: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st637 + st637: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof637 + } + st_case_637: +//line plugins/parsers/influx/machine.go:24983 + switch ( m.data)[( m.p)] { + case 10: + goto tr891 + case 13: + goto tr751 + case 32: + goto tr928 + case 34: + goto tr29 + case 44: + goto tr929 + case 92: + goto st73 + case 114: + goto st191 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr928 + } + goto st6 +tr160: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st192 + st192: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof192 + } + st_case_192: +//line plugins/parsers/influx/machine.go:25015 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr160 + case 13: + goto st6 + case 32: + goto st48 + case 34: + goto tr95 + case 44: + goto st6 + case 61: + goto tr163 + case 92: + goto tr161 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st48 + } + goto tr158 +tr138: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st193 + st193: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof193 + } + st_case_193: +//line plugins/parsers/influx/machine.go:25049 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr59 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 46: + goto st194 + case 48: + goto st639 + case 61: + goto tr45 + case 92: + goto st21 + } + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st642 + } + case ( m.data)[( m.p)] >= 9: + goto tr58 + } + goto st15 +tr139: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st194 + st194: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof194 + } + st_case_194: +//line plugins/parsers/influx/machine.go:25090 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr59 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr45 + case 92: + goto st21 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st638 + } + case ( m.data)[( m.p)] >= 9: + goto tr58 + } + goto st15 + st638: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof638 + } + st_case_638: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr731 + case 13: + goto tr732 + case 32: + goto tr729 + case 44: + goto tr733 + case 61: + goto tr130 + case 69: + goto st195 + case 92: + goto st21 + case 101: + goto st195 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st638 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 + } + goto st15 st195: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof195 @@ -22197,90 +25153,357 @@ tr79: st_case_195: switch ( m.data)[( m.p)] { case 10: - goto tr338 + goto tr45 + case 11: + goto tr59 case 13: - goto tr338 + goto tr45 + case 32: + goto tr58 + case 34: + goto st196 + case 44: + goto tr60 + case 61: + goto tr45 + case 92: + goto st21 } - goto st195 -tr338: -//line plugins/parsers/influx/machine.go.rl:68 - - {goto st196 } - - goto st601 - st601: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof601 + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st479 + } + default: + goto st196 } - st_case_601: -//line plugins/parsers/influx/machine.go:22217 - goto st0 + goto st15 st196: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof196 } st_case_196: switch ( m.data)[( m.p)] { + case 10: + goto tr45 case 11: - goto tr341 + goto tr59 + case 13: + goto tr45 case 32: - goto st196 - case 35: - goto st197 + goto tr58 case 44: - goto st0 + goto tr60 + case 61: + goto tr45 case 92: - goto st198 + goto st21 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st196 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st479 + } + case ( m.data)[( m.p)] >= 9: + goto tr58 } - goto tr339 -tr339: -//line plugins/parsers/influx/machine.go.rl:63 - - ( m.p)-- - - {goto st1 } - - goto st602 - st602: + goto st15 + st639: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof602 + goto _test_eof639 } - st_case_602: -//line plugins/parsers/influx/machine.go:22253 - goto st0 -tr341: -//line plugins/parsers/influx/machine.go.rl:63 - - ( m.p)-- - - {goto st1 } - - goto st603 - st603: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof603 - } - st_case_603: -//line plugins/parsers/influx/machine.go:22268 + st_case_639: switch ( m.data)[( m.p)] { + case 10: + goto tr730 case 11: - goto tr341 + goto tr731 + case 13: + goto tr732 case 32: - goto st196 - case 35: - goto st197 + goto tr729 case 44: - goto st0 + goto tr733 + case 46: + goto st638 + case 61: + goto tr130 + case 69: + goto st195 case 92: - goto st198 + goto st21 + case 101: + goto st195 + case 105: + goto st641 } - if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st196 + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st640 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 } - goto tr339 + goto st15 + st640: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof640 + } + st_case_640: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr731 + case 13: + goto tr732 + case 32: + goto tr729 + case 44: + goto tr733 + case 46: + goto st638 + case 61: + goto tr130 + case 69: + goto st195 + case 92: + goto st21 + case 101: + goto st195 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st640 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 + } + goto st15 + st641: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof641 + } + st_case_641: + switch ( m.data)[( m.p)] { + case 10: + goto tr942 + case 11: + goto tr943 + case 13: + goto tr944 + case 32: + goto tr941 + case 44: + goto tr945 + case 61: + goto tr130 + case 92: + goto st21 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr941 + } + goto st15 + st642: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof642 + } + st_case_642: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr731 + case 13: + goto tr732 + case 32: + goto tr729 + case 44: + goto tr733 + case 46: + goto st638 + case 61: + goto tr130 + case 69: + goto st195 + case 92: + goto st21 + case 101: + goto st195 + case 105: + goto st641 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st642 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 + } + goto st15 +tr140: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st643 + st643: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof643 + } + st_case_643: +//line plugins/parsers/influx/machine.go:25364 + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr731 + case 13: + goto tr732 + case 32: + goto tr729 + case 44: + goto tr733 + case 46: + goto st638 + case 61: + goto tr130 + case 69: + goto st195 + case 92: + goto st21 + case 101: + goto st195 + case 105: + goto st641 + case 117: + goto st644 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st640 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 + } + goto st15 + st644: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof644 + } + st_case_644: + switch ( m.data)[( m.p)] { + case 10: + goto tr948 + case 11: + goto tr949 + case 13: + goto tr950 + case 32: + goto tr947 + case 44: + goto tr951 + case 61: + goto tr130 + case 92: + goto st21 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr947 + } + goto st15 +tr141: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st645 + st645: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof645 + } + st_case_645: +//line plugins/parsers/influx/machine.go:25436 + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr731 + case 13: + goto tr732 + case 32: + goto tr729 + case 44: + goto tr733 + case 46: + goto st638 + case 61: + goto tr130 + case 69: + goto st195 + case 92: + goto st21 + case 101: + goto st195 + case 105: + goto st641 + case 117: + goto st644 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st645 + } + case ( m.data)[( m.p)] >= 9: + goto tr729 + } + goto st15 +tr142: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st646 + st646: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof646 + } + st_case_646: +//line plugins/parsers/influx/machine.go:25483 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr955 + case 13: + goto tr956 + case 32: + goto tr953 + case 44: + goto tr957 + case 61: + goto tr130 + case 65: + goto st197 + case 92: + goto st21 + case 97: + goto st200 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr953 + } + goto st15 st197: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof197 @@ -22288,348 +25511,4524 @@ tr341: st_case_197: switch ( m.data)[( m.p)] { case 10: - goto st196 + goto tr45 + case 11: + goto tr59 case 13: - goto st196 + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr45 + case 76: + goto st198 + case 92: + goto st21 } - goto st197 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st15 st198: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof198 } st_case_198: - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st0 - } - case ( m.data)[( m.p)] >= 9: - goto st0 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr59 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr45 + case 83: + goto st199 + case 92: + goto st21 } - goto tr339 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st15 st199: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof199 } st_case_199: - switch ( m.data)[( m.p)] { - case 32: - goto st0 - case 35: - goto st0 - case 44: - goto st0 - case 92: - goto tr346 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto st0 - } - case ( m.data)[( m.p)] >= 9: - goto st0 - } - goto tr345 -tr345: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st604 -tr833: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st604 - st604: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof604 - } - st_case_604: -//line plugins/parsers/influx/machine.go:22352 switch ( m.data)[( m.p)] { case 10: - goto tr832 + goto tr45 case 11: - goto tr833 + goto tr59 case 13: - goto tr832 + goto tr45 case 32: - goto tr831 + goto tr58 case 44: - goto tr834 + goto tr60 + case 61: + goto tr45 + case 69: + goto st647 case 92: - goto st205 + goto st21 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr831 + goto tr58 } - goto st604 -tr831: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st605 -tr838: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st605 - st605: + goto st15 + st647: if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof605 + goto _test_eof647 } - st_case_605: -//line plugins/parsers/influx/machine.go:22388 + st_case_647: switch ( m.data)[( m.p)] { case 10: - goto tr837 + goto tr954 + case 11: + goto tr955 case 13: - goto tr837 + goto tr956 case 32: - goto st605 + goto tr953 + case 44: + goto tr957 + case 61: + goto tr130 + case 92: + goto st21 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto st605 + goto tr953 } - goto st0 -tr837: - m.cs = 606 -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr832: - m.cs = 606 -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again -tr839: - m.cs = 606 -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; goto _out } - - goto _again - st606: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof606 - } - st_case_606: -//line plugins/parsers/influx/machine.go:22441 - goto st0 -tr834: -//line plugins/parsers/influx/machine.go.rl:72 - - m.handler.SetMeasurement(m.text()) - - goto st200 -tr841: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st200 + goto st15 st200: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof200 } st_case_200: -//line plugins/parsers/influx/machine.go:22460 switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr59 + case 13: + goto tr45 case 32: - goto tr52 + goto tr58 case 44: - goto tr52 + goto tr60 case 61: - goto tr52 + goto tr45 case 92: - goto tr348 + goto st21 + case 108: + goto st201 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 } - goto tr347 -tr347: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st201 + goto st15 st201: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof201 } st_case_201: -//line plugins/parsers/influx/machine.go:22491 switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr59 + case 13: + goto tr45 case 32: - goto tr52 + goto tr58 case 44: - goto tr52 + goto tr60 case 61: - goto tr350 + goto tr45 case 92: - goto st204 + goto st21 + case 115: + goto st202 } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 } - goto st201 -tr350: -//line plugins/parsers/influx/machine.go.rl:76 - - key = m.text() - - goto st202 + goto st15 st202: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof202 } st_case_202: -//line plugins/parsers/influx/machine.go:22522 - switch ( m.data)[( m.p)] { - case 32: - goto tr52 - case 44: - goto tr52 - case 61: - goto tr52 - case 92: - goto tr353 - } - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 - } - goto tr352 -tr352: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st607 -tr840: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - - goto st607 - st607: - if ( m.p)++; ( m.p) == ( m.pe) { - goto _test_eof607 - } - st_case_607: -//line plugins/parsers/influx/machine.go:22559 switch ( m.data)[( m.p)] { case 10: - goto tr839 + goto tr45 case 11: - goto tr840 + goto tr59 case 13: - goto tr839 + goto tr45 case 32: - goto tr838 + goto tr58 case 44: - goto tr841 + goto tr60 case 61: - goto tr52 + goto tr45 case 92: - goto st203 + goto st21 + case 101: + goto st647 } if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { - goto tr838 + goto tr58 } - goto st607 -tr353: -//line plugins/parsers/influx/machine.go.rl:18 + goto st15 +tr143: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p - goto st203 + goto st648 + st648: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof648 + } + st_case_648: +//line plugins/parsers/influx/machine.go:25706 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr955 + case 13: + goto tr956 + case 32: + goto tr953 + case 44: + goto tr957 + case 61: + goto tr130 + case 82: + goto st203 + case 92: + goto st21 + case 114: + goto st204 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr953 + } + goto st15 st203: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof203 } st_case_203: -//line plugins/parsers/influx/machine.go:22591 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr59 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr45 + case 85: + goto st199 + case 92: + goto st21 } - goto st607 -tr348: -//line plugins/parsers/influx/machine.go.rl:18 - - m.pb = m.p - - goto st204 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st15 st204: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof204 } st_case_204: -//line plugins/parsers/influx/machine.go:22612 - switch { - case ( m.data)[( m.p)] > 10: - if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { - goto tr52 - } - case ( m.data)[( m.p)] >= 9: - goto tr52 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr59 + case 13: + goto tr45 + case 32: + goto tr58 + case 44: + goto tr60 + case 61: + goto tr45 + case 92: + goto st21 + case 117: + goto st202 } - goto st201 -tr346: -//line plugins/parsers/influx/machine.go.rl:18 + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr58 + } + goto st15 +tr144: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st649 + st649: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof649 + } + st_case_649: +//line plugins/parsers/influx/machine.go:25796 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr955 + case 13: + goto tr956 + case 32: + goto tr953 + case 44: + goto tr957 + case 61: + goto tr130 + case 92: + goto st21 + case 97: + goto st200 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr953 + } + goto st15 +tr145: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st650 + st650: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof650 + } + st_case_650: +//line plugins/parsers/influx/machine.go:25830 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr955 + case 13: + goto tr956 + case 32: + goto tr953 + case 44: + goto tr957 + case 61: + goto tr130 + case 92: + goto st21 + case 114: + goto st204 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr953 + } + goto st15 +tr121: +//line plugins/parsers/influx/machine.go.rl:28 m.pb = m.p goto st205 +tr380: + ( m.cs) = 205 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again st205: if ( m.p)++; ( m.p) == ( m.pe) { goto _test_eof205 } st_case_205: -//line plugins/parsers/influx/machine.go:22633 +//line plugins/parsers/influx/machine.go:25881 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr380 + case 13: + goto st6 + case 32: + goto tr117 + case 34: + goto tr122 + case 44: + goto tr90 + case 61: + goto tr381 + case 92: + goto tr123 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr117 + } + goto tr119 +tr118: + ( m.cs) = 206 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st206: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof206 + } + st_case_206: +//line plugins/parsers/influx/machine.go:25926 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr380 + case 13: + goto st6 + case 32: + goto tr117 + case 34: + goto tr122 + case 44: + goto tr90 + case 61: + goto tr80 + case 92: + goto tr123 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr117 + } + goto tr119 +tr497: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st207 + st207: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof207 + } + st_case_207: +//line plugins/parsers/influx/machine.go:25960 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st651 + } + goto st6 +tr498: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st651 + st651: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof651 + } + st_case_651: +//line plugins/parsers/influx/machine.go:25984 + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st652 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st652: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof652 + } + st_case_652: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st653 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st653: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof653 + } + st_case_653: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st654 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st654: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof654 + } + st_case_654: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st655 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st655: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof655 + } + st_case_655: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st656 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st656: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof656 + } + st_case_656: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st657 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st657: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof657 + } + st_case_657: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st658 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st658: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof658 + } + st_case_658: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st659 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st659: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof659 + } + st_case_659: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st660 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st660: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof660 + } + st_case_660: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st661 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st661: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof661 + } + st_case_661: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st662 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st662: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof662 + } + st_case_662: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st663 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st663: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof663 + } + st_case_663: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st664 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st664: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof664 + } + st_case_664: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st665 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st665: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof665 + } + st_case_665: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st666 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st666: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof666 + } + st_case_666: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st667 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st667: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof667 + } + st_case_667: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st668 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st668: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof668 + } + st_case_668: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st669 + } + case ( m.data)[( m.p)] >= 9: + goto tr599 + } + goto st6 + st669: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof669 + } + st_case_669: + switch ( m.data)[( m.p)] { + case 10: + goto tr600 + case 13: + goto tr602 + case 32: + goto tr599 + case 34: + goto tr29 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr599 + } + goto st6 +tr494: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st208 +tr981: + ( m.cs) = 208 +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr986: + ( m.cs) = 208 +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr989: + ( m.cs) = 208 +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr992: + ( m.cs) = 208 +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st208: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof208 + } + st_case_208: +//line plugins/parsers/influx/machine.go:26532 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr384 + case 44: + goto st6 + case 61: + goto st6 + case 92: + goto tr385 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto tr383 +tr383: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st209 + st209: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof209 + } + st_case_209: +//line plugins/parsers/influx/machine.go:26564 + switch ( m.data)[( m.p)] { + case 9: + goto st6 + case 10: + goto tr28 + case 32: + goto st6 + case 34: + goto tr98 + case 44: + goto st6 + case 61: + goto tr387 + case 92: + goto st223 + } + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto st6 + } + goto st209 +tr387: +//line plugins/parsers/influx/machine.go.rl:108 + + m.key = m.text() + + goto st210 + st210: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof210 + } + st_case_210: +//line plugins/parsers/influx/machine.go:26596 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr351 + case 45: + goto tr389 + case 46: + goto tr390 + case 48: + goto tr391 + case 70: + goto tr110 + case 84: + goto tr111 + case 92: + goto st73 + case 102: + goto tr112 + case 116: + goto tr113 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto tr392 + } + goto st6 +tr389: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st211 + st211: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof211 + } + st_case_211: +//line plugins/parsers/influx/machine.go:26634 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 46: + goto st212 + case 48: + goto st672 + case 92: + goto st73 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st675 + } + goto st6 +tr390: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st212 + st212: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof212 + } + st_case_212: +//line plugins/parsers/influx/machine.go:26662 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st670 + } + goto st6 + st670: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof670 + } + st_case_670: + switch ( m.data)[( m.p)] { + case 10: + goto tr758 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 69: + goto st213 + case 92: + goto st73 + case 101: + goto st213 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st670 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 + st213: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof213 + } + st_case_213: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr356 + case 43: + goto st214 + case 45: + goto st214 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st671 + } + goto st6 + st214: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof214 + } + st_case_214: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st671 + } + goto st6 + st671: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof671 + } + st_case_671: + switch ( m.data)[( m.p)] { + case 10: + goto tr758 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st671 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 + st672: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof672 + } + st_case_672: + switch ( m.data)[( m.p)] { + case 10: + goto tr758 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 46: + goto st670 + case 69: + goto st213 + case 92: + goto st73 + case 101: + goto st213 + case 105: + goto st674 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st673 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 + st673: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof673 + } + st_case_673: + switch ( m.data)[( m.p)] { + case 10: + goto tr758 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 46: + goto st670 + case 69: + goto st213 + case 92: + goto st73 + case 101: + goto st213 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st673 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 + st674: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof674 + } + st_case_674: + switch ( m.data)[( m.p)] { + case 10: + goto tr791 + case 13: + goto tr793 + case 32: + goto tr985 + case 34: + goto tr29 + case 44: + goto tr986 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr985 + } + goto st6 + st675: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof675 + } + st_case_675: + switch ( m.data)[( m.p)] { + case 10: + goto tr758 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 46: + goto st670 + case 69: + goto st213 + case 92: + goto st73 + case 101: + goto st213 + case 105: + goto st674 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st675 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 +tr391: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st676 + st676: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof676 + } + st_case_676: +//line plugins/parsers/influx/machine.go:26913 + switch ( m.data)[( m.p)] { + case 10: + goto tr758 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 46: + goto st670 + case 69: + goto st213 + case 92: + goto st73 + case 101: + goto st213 + case 105: + goto st674 + case 117: + goto st677 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st673 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 + st677: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof677 + } + st_case_677: + switch ( m.data)[( m.p)] { + case 10: + goto tr797 + case 13: + goto tr799 + case 32: + goto tr988 + case 34: + goto tr29 + case 44: + goto tr989 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr988 + } + goto st6 +tr392: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st678 + st678: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof678 + } + st_case_678: +//line plugins/parsers/influx/machine.go:26981 + switch ( m.data)[( m.p)] { + case 10: + goto tr758 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 46: + goto st670 + case 69: + goto st213 + case 92: + goto st73 + case 101: + goto st213 + case 105: + goto st674 + case 117: + goto st677 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st678 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 +tr110: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st679 + st679: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof679 + } + st_case_679: +//line plugins/parsers/influx/machine.go:27026 + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 13: + goto tr805 + case 32: + goto tr991 + case 34: + goto tr29 + case 44: + goto tr992 + case 65: + goto st215 + case 92: + goto st73 + case 97: + goto st218 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr991 + } + goto st6 + st215: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof215 + } + st_case_215: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 76: + goto st216 + case 92: + goto st73 + } + goto st6 + st216: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof216 + } + st_case_216: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 83: + goto st217 + case 92: + goto st73 + } + goto st6 + st217: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof217 + } + st_case_217: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 69: + goto st680 + case 92: + goto st73 + } + goto st6 + st680: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof680 + } + st_case_680: + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 13: + goto tr805 + case 32: + goto tr991 + case 34: + goto tr29 + case 44: + goto tr992 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr991 + } + goto st6 + st218: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof218 + } + st_case_218: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 108: + goto st219 + } + goto st6 + st219: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof219 + } + st_case_219: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 115: + goto st220 + } + goto st6 + st220: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof220 + } + st_case_220: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 101: + goto st680 + } + goto st6 +tr111: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st681 + st681: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof681 + } + st_case_681: +//line plugins/parsers/influx/machine.go:27179 + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 13: + goto tr805 + case 32: + goto tr991 + case 34: + goto tr29 + case 44: + goto tr992 + case 82: + goto st221 + case 92: + goto st73 + case 114: + goto st222 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr991 + } + goto st6 + st221: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof221 + } + st_case_221: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 85: + goto st217 + case 92: + goto st73 + } + goto st6 + st222: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof222 + } + st_case_222: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + case 117: + goto st220 + } + goto st6 +tr112: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st682 + st682: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof682 + } + st_case_682: +//line plugins/parsers/influx/machine.go:27245 + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 13: + goto tr805 + case 32: + goto tr991 + case 34: + goto tr29 + case 44: + goto tr992 + case 92: + goto st73 + case 97: + goto st218 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr991 + } + goto st6 +tr113: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st683 + st683: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof683 + } + st_case_683: +//line plugins/parsers/influx/machine.go:27277 + switch ( m.data)[( m.p)] { + case 10: + goto tr803 + case 13: + goto tr805 + case 32: + goto tr991 + case 34: + goto tr29 + case 44: + goto tr992 + case 92: + goto st73 + case 114: + goto st222 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr991 + } + goto st6 +tr385: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st223 + st223: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof223 + } + st_case_223: +//line plugins/parsers/influx/machine.go:27309 + switch ( m.data)[( m.p)] { + case 34: + goto st209 + case 92: + goto st209 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr8 + } + case ( m.data)[( m.p)] >= 9: + goto tr8 + } + goto st3 +tr106: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st224 + st224: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof224 + } + st_case_224: +//line plugins/parsers/influx/machine.go:27336 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 46: + goto st225 + case 48: + goto st686 + case 92: + goto st73 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st689 + } + goto st6 +tr107: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st225 + st225: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof225 + } + st_case_225: +//line plugins/parsers/influx/machine.go:27364 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st684 + } + goto st6 + st684: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof684 + } + st_case_684: + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 69: + goto st226 + case 92: + goto st73 + case 101: + goto st226 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st684 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 + st226: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof226 + } + st_case_226: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr356 + case 43: + goto st227 + case 45: + goto st227 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st685 + } + goto st6 + st227: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof227 + } + st_case_227: + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 34: + goto tr29 + case 92: + goto st73 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st685 + } + goto st6 + st685: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof685 + } + st_case_685: + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 92: + goto st73 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st685 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 + st686: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof686 + } + st_case_686: + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 46: + goto st684 + case 69: + goto st226 + case 92: + goto st73 + case 101: + goto st226 + case 105: + goto st688 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st687 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 + st687: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof687 + } + st_case_687: + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 46: + goto st684 + case 69: + goto st226 + case 92: + goto st73 + case 101: + goto st226 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st687 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 + st688: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof688 + } + st_case_688: + switch ( m.data)[( m.p)] { + case 10: + goto tr817 + case 13: + goto tr793 + case 32: + goto tr985 + case 34: + goto tr29 + case 44: + goto tr986 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr985 + } + goto st6 + st689: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof689 + } + st_case_689: + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 46: + goto st684 + case 69: + goto st226 + case 92: + goto st73 + case 101: + goto st226 + case 105: + goto st688 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st689 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 +tr108: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st690 + st690: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof690 + } + st_case_690: +//line plugins/parsers/influx/machine.go:27615 + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 46: + goto st684 + case 69: + goto st226 + case 92: + goto st73 + case 101: + goto st226 + case 105: + goto st688 + case 117: + goto st691 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st687 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 + st691: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof691 + } + st_case_691: + switch ( m.data)[( m.p)] { + case 10: + goto tr822 + case 13: + goto tr799 + case 32: + goto tr988 + case 34: + goto tr29 + case 44: + goto tr989 + case 92: + goto st73 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr988 + } + goto st6 +tr109: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st692 + st692: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof692 + } + st_case_692: +//line plugins/parsers/influx/machine.go:27683 + switch ( m.data)[( m.p)] { + case 10: + goto tr636 + case 13: + goto tr638 + case 32: + goto tr980 + case 34: + goto tr29 + case 44: + goto tr981 + case 46: + goto st684 + case 69: + goto st226 + case 92: + goto st73 + case 101: + goto st226 + case 105: + goto st688 + case 117: + goto st691 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st692 + } + case ( m.data)[( m.p)] >= 9: + goto tr980 + } + goto st6 +tr94: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st228 + st228: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof228 + } + st_case_228: +//line plugins/parsers/influx/machine.go:27728 + switch ( m.data)[( m.p)] { + case 10: + goto tr28 + case 11: + goto tr94 + case 13: + goto st6 + case 32: + goto st30 + case 34: + goto tr95 + case 44: + goto st6 + case 61: + goto tr99 + case 92: + goto tr96 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st30 + } + goto tr92 +tr72: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st229 + st229: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof229 + } + st_case_229: +//line plugins/parsers/influx/machine.go:27762 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 46: + goto st230 + case 48: + goto st694 + case 92: + goto st94 + } + switch { + case ( m.data)[( m.p)] > 12: + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st697 + } + case ( m.data)[( m.p)] >= 9: + goto tr1 + } + goto st1 +tr73: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st230 + st230: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof230 + } + st_case_230: +//line plugins/parsers/influx/machine.go:27801 + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st94 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st693 + } + case ( m.data)[( m.p)] >= 9: + goto tr1 + } + goto st1 + st693: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof693 + } + st_case_693: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr812 + case 13: + goto tr732 + case 32: + goto tr811 + case 44: + goto tr813 + case 69: + goto st231 + case 92: + goto st94 + case 101: + goto st231 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st693 + } + case ( m.data)[( m.p)] >= 9: + goto tr811 + } + goto st1 + st231: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof231 + } + st_case_231: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 34: + goto st232 + case 44: + goto tr4 + case 92: + goto st94 + } + switch { + case ( m.data)[( m.p)] < 43: + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + case ( m.data)[( m.p)] > 45: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st530 + } + default: + goto st232 + } + goto st1 + st232: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof232 + } + st_case_232: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st94 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st530 + } + case ( m.data)[( m.p)] >= 9: + goto tr1 + } + goto st1 + st694: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof694 + } + st_case_694: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr812 + case 13: + goto tr732 + case 32: + goto tr811 + case 44: + goto tr813 + case 46: + goto st693 + case 69: + goto st231 + case 92: + goto st94 + case 101: + goto st231 + case 105: + goto st696 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st695 + } + case ( m.data)[( m.p)] >= 9: + goto tr811 + } + goto st1 + st695: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof695 + } + st_case_695: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr812 + case 13: + goto tr732 + case 32: + goto tr811 + case 44: + goto tr813 + case 46: + goto st693 + case 69: + goto st231 + case 92: + goto st94 + case 101: + goto st231 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st695 + } + case ( m.data)[( m.p)] >= 9: + goto tr811 + } + goto st1 + st696: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof696 + } + st_case_696: + switch ( m.data)[( m.p)] { + case 10: + goto tr942 + case 11: + goto tr1006 + case 13: + goto tr944 + case 32: + goto tr1005 + case 44: + goto tr1007 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1005 + } + goto st1 + st697: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof697 + } + st_case_697: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr812 + case 13: + goto tr732 + case 32: + goto tr811 + case 44: + goto tr813 + case 46: + goto st693 + case 69: + goto st231 + case 92: + goto st94 + case 101: + goto st231 + case 105: + goto st696 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st697 + } + case ( m.data)[( m.p)] >= 9: + goto tr811 + } + goto st1 +tr74: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st698 + st698: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof698 + } + st_case_698: +//line plugins/parsers/influx/machine.go:28059 + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr812 + case 13: + goto tr732 + case 32: + goto tr811 + case 44: + goto tr813 + case 46: + goto st693 + case 69: + goto st231 + case 92: + goto st94 + case 101: + goto st231 + case 105: + goto st696 + case 117: + goto st699 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st695 + } + case ( m.data)[( m.p)] >= 9: + goto tr811 + } + goto st1 + st699: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof699 + } + st_case_699: + switch ( m.data)[( m.p)] { + case 10: + goto tr948 + case 11: + goto tr1010 + case 13: + goto tr950 + case 32: + goto tr1009 + case 44: + goto tr1011 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1009 + } + goto st1 +tr75: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st700 + st700: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof700 + } + st_case_700: +//line plugins/parsers/influx/machine.go:28127 + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 11: + goto tr812 + case 13: + goto tr732 + case 32: + goto tr811 + case 44: + goto tr813 + case 46: + goto st693 + case 69: + goto st231 + case 92: + goto st94 + case 101: + goto st231 + case 105: + goto st696 + case 117: + goto st699 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st700 + } + case ( m.data)[( m.p)] >= 9: + goto tr811 + } + goto st1 +tr76: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st701 + st701: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof701 + } + st_case_701: +//line plugins/parsers/influx/machine.go:28172 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr1014 + case 13: + goto tr956 + case 32: + goto tr1013 + case 44: + goto tr1015 + case 65: + goto st233 + case 92: + goto st94 + case 97: + goto st236 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1013 + } + goto st1 + st233: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof233 + } + st_case_233: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 76: + goto st234 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st234: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof234 + } + st_case_234: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 83: + goto st235 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st235: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof235 + } + st_case_235: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 69: + goto st702 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st702: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof702 + } + st_case_702: + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr1014 + case 13: + goto tr956 + case 32: + goto tr1013 + case 44: + goto tr1015 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1013 + } + goto st1 + st236: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof236 + } + st_case_236: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st94 + case 108: + goto st237 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st237: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof237 + } + st_case_237: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st94 + case 115: + goto st238 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st238: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof238 + } + st_case_238: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st94 + case 101: + goto st702 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 +tr77: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st703 + st703: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof703 + } + st_case_703: +//line plugins/parsers/influx/machine.go:28379 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr1014 + case 13: + goto tr956 + case 32: + goto tr1013 + case 44: + goto tr1015 + case 82: + goto st239 + case 92: + goto st94 + case 114: + goto st240 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1013 + } + goto st1 + st239: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof239 + } + st_case_239: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 85: + goto st235 + case 92: + goto st94 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 + st240: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof240 + } + st_case_240: + switch ( m.data)[( m.p)] { + case 10: + goto tr45 + case 11: + goto tr3 + case 13: + goto tr45 + case 32: + goto tr1 + case 44: + goto tr4 + case 92: + goto st94 + case 117: + goto st238 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1 + } + goto st1 +tr78: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st704 + st704: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof704 + } + st_case_704: +//line plugins/parsers/influx/machine.go:28463 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr1014 + case 13: + goto tr956 + case 32: + goto tr1013 + case 44: + goto tr1015 + case 92: + goto st94 + case 97: + goto st236 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1013 + } + goto st1 +tr79: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st705 + st705: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof705 + } + st_case_705: +//line plugins/parsers/influx/machine.go:28495 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 11: + goto tr1014 + case 13: + goto tr956 + case 32: + goto tr1013 + case 44: + goto tr1015 + case 92: + goto st94 + case 114: + goto st240 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1013 + } + goto st1 +tr42: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st241 +tr422: + ( m.cs) = 241 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st241: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof241 + } + st_case_241: +//line plugins/parsers/influx/machine.go:28544 + switch ( m.data)[( m.p)] { + case 10: + goto tr421 + case 11: + goto tr422 + case 13: + goto tr421 + case 32: + goto tr36 + case 44: + goto tr4 + case 61: + goto tr423 + case 92: + goto tr43 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr36 + } + goto tr39 +tr38: + ( m.cs) = 242 +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st242: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof242 + } + st_case_242: +//line plugins/parsers/influx/machine.go:28587 + switch ( m.data)[( m.p)] { + case 10: + goto tr421 + case 11: + goto tr422 + case 13: + goto tr421 + case 32: + goto tr36 + case 44: + goto tr4 + case 61: + goto tr31 + case 92: + goto tr43 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr36 + } + goto tr39 +tr462: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st243 + st243: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof243 + } + st_case_243: +//line plugins/parsers/influx/machine.go:28619 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st706 + } + goto tr424 +tr463: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st706 + st706: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof706 + } + st_case_706: +//line plugins/parsers/influx/machine.go:28635 + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st707 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st707: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof707 + } + st_case_707: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st708 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st708: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof708 + } + st_case_708: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st709 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st709: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof709 + } + st_case_709: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st710 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st710: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof710 + } + st_case_710: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st711 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st711: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof711 + } + st_case_711: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st712 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st712: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof712 + } + st_case_712: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st713 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st713: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof713 + } + st_case_713: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st714 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st714: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof714 + } + st_case_714: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st715 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st715: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof715 + } + st_case_715: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st716 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st716: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof716 + } + st_case_716: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st717 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st717: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof717 + } + st_case_717: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st718 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st718: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof718 + } + st_case_718: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st719 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st719: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof719 + } + st_case_719: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st720 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st720: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof720 + } + st_case_720: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st721 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st721: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof721 + } + st_case_721: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st722 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st722: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof722 + } + st_case_722: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st723 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st723: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof723 + } + st_case_723: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st724 + } + case ( m.data)[( m.p)] >= 9: + goto tr467 + } + goto tr424 + st724: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof724 + } + st_case_724: + switch ( m.data)[( m.p)] { + case 10: + goto tr468 + case 13: + goto tr470 + case 32: + goto tr467 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr467 + } + goto tr424 +tr15: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st244 + st244: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof244 + } + st_case_244: +//line plugins/parsers/influx/machine.go:29055 + switch ( m.data)[( m.p)] { + case 46: + goto st245 + case 48: + goto st726 + } + if 49 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st729 + } + goto tr8 +tr16: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st245 + st245: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof245 + } + st_case_245: +//line plugins/parsers/influx/machine.go:29077 + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st725 + } + goto tr8 + st725: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof725 + } + st_case_725: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 13: + goto tr732 + case 32: + goto tr921 + case 44: + goto tr922 + case 69: + goto st246 + case 101: + goto st246 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st725 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 + } + goto tr103 + st246: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof246 + } + st_case_246: + switch ( m.data)[( m.p)] { + case 34: + goto st247 + case 43: + goto st247 + case 45: + goto st247 + } + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st621 + } + goto tr8 + st247: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof247 + } + st_case_247: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st621 + } + goto tr8 + st726: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof726 + } + st_case_726: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 13: + goto tr732 + case 32: + goto tr921 + case 44: + goto tr922 + case 46: + goto st725 + case 69: + goto st246 + case 101: + goto st246 + case 105: + goto st728 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st727 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 + } + goto tr103 + st727: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof727 + } + st_case_727: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 13: + goto tr732 + case 32: + goto tr921 + case 44: + goto tr922 + case 46: + goto st725 + case 69: + goto st246 + case 101: + goto st246 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st727 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 + } + goto tr103 + st728: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof728 + } + st_case_728: + switch ( m.data)[( m.p)] { + case 10: + goto tr942 + case 13: + goto tr944 + case 32: + goto tr1041 + case 44: + goto tr1042 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1041 + } + goto tr103 + st729: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof729 + } + st_case_729: + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 13: + goto tr732 + case 32: + goto tr921 + case 44: + goto tr922 + case 46: + goto st725 + case 69: + goto st246 + case 101: + goto st246 + case 105: + goto st728 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st729 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 + } + goto tr103 +tr17: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st730 + st730: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof730 + } + st_case_730: +//line plugins/parsers/influx/machine.go:29260 + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 13: + goto tr732 + case 32: + goto tr921 + case 44: + goto tr922 + case 46: + goto st725 + case 69: + goto st246 + case 101: + goto st246 + case 105: + goto st728 + case 117: + goto st731 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st727 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 + } + goto tr103 + st731: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof731 + } + st_case_731: + switch ( m.data)[( m.p)] { + case 10: + goto tr948 + case 13: + goto tr950 + case 32: + goto tr1044 + case 44: + goto tr1045 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1044 + } + goto tr103 +tr18: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st732 + st732: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof732 + } + st_case_732: +//line plugins/parsers/influx/machine.go:29320 + switch ( m.data)[( m.p)] { + case 10: + goto tr730 + case 13: + goto tr732 + case 32: + goto tr921 + case 44: + goto tr922 + case 46: + goto st725 + case 69: + goto st246 + case 101: + goto st246 + case 105: + goto st728 + case 117: + goto st731 + } + switch { + case ( m.data)[( m.p)] > 12: + if 48 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 57 { + goto st732 + } + case ( m.data)[( m.p)] >= 9: + goto tr921 + } + goto tr103 +tr19: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st733 + st733: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof733 + } + st_case_733: +//line plugins/parsers/influx/machine.go:29361 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 13: + goto tr956 + case 32: + goto tr1047 + case 44: + goto tr1048 + case 65: + goto st248 + case 97: + goto st251 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1047 + } + goto tr103 + st248: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof248 + } + st_case_248: + if ( m.data)[( m.p)] == 76 { + goto st249 + } + goto tr8 + st249: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof249 + } + st_case_249: + if ( m.data)[( m.p)] == 83 { + goto st250 + } + goto tr8 + st250: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof250 + } + st_case_250: + if ( m.data)[( m.p)] == 69 { + goto st734 + } + goto tr8 + st734: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof734 + } + st_case_734: + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 13: + goto tr956 + case 32: + goto tr1047 + case 44: + goto tr1048 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1047 + } + goto tr103 + st251: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof251 + } + st_case_251: + if ( m.data)[( m.p)] == 108 { + goto st252 + } + goto tr8 + st252: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof252 + } + st_case_252: + if ( m.data)[( m.p)] == 115 { + goto st253 + } + goto tr8 + st253: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof253 + } + st_case_253: + if ( m.data)[( m.p)] == 101 { + goto st734 + } + goto tr8 +tr20: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st735 + st735: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof735 + } + st_case_735: +//line plugins/parsers/influx/machine.go:29464 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 13: + goto tr956 + case 32: + goto tr1047 + case 44: + goto tr1048 + case 82: + goto st254 + case 114: + goto st255 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1047 + } + goto tr103 + st254: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof254 + } + st_case_254: + if ( m.data)[( m.p)] == 85 { + goto st250 + } + goto tr8 + st255: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof255 + } + st_case_255: + if ( m.data)[( m.p)] == 117 { + goto st253 + } + goto tr8 +tr21: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st736 + st736: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof736 + } + st_case_736: +//line plugins/parsers/influx/machine.go:29512 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 13: + goto tr956 + case 32: + goto tr1047 + case 44: + goto tr1048 + case 97: + goto st251 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1047 + } + goto tr103 +tr22: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st737 + st737: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof737 + } + st_case_737: +//line plugins/parsers/influx/machine.go:29540 + switch ( m.data)[( m.p)] { + case 10: + goto tr954 + case 13: + goto tr956 + case 32: + goto tr1047 + case 44: + goto tr1048 + case 114: + goto st255 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto tr1047 + } + goto tr103 +tr9: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st256 + st256: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof256 + } + st_case_256: +//line plugins/parsers/influx/machine.go:29568 + switch ( m.data)[( m.p)] { + case 10: + goto tr8 + case 11: + goto tr9 + case 13: + goto tr8 + case 32: + goto st2 + case 44: + goto tr8 + case 61: + goto tr12 + case 92: + goto tr10 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st2 + } + goto tr6 + st257: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof257 + } + st_case_257: + if ( m.data)[( m.p)] == 10 { + goto tr438 + } + goto st257 +tr438: +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + +//line plugins/parsers/influx/machine.go.rl:78 + + {goto st739 } + + goto st738 + st738: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof738 + } + st_case_738: +//line plugins/parsers/influx/machine.go:29615 + goto st0 + st260: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof260 + } + st_case_260: + switch ( m.data)[( m.p)] { + case 32: + goto tr33 + case 35: + goto tr33 + case 44: + goto tr33 + case 92: + goto tr442 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr33 + } + case ( m.data)[( m.p)] >= 9: + goto tr33 + } + goto tr441 +tr441: +//line plugins/parsers/influx/machine.go.rl:82 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st740 + st740: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof740 + } + st_case_740: +//line plugins/parsers/influx/machine.go:29656 + switch ( m.data)[( m.p)] { + case 9: + goto tr2 + case 10: + goto tr1056 + case 12: + goto tr2 + case 13: + goto tr1057 + case 32: + goto tr2 + case 44: + goto tr1058 + case 92: + goto st268 + } + goto st740 +tr443: +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st741 +tr1056: + ( m.cs) = 741 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again +tr1060: + ( m.cs) = 741 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto _again + st741: +//line plugins/parsers/influx/machine.go.rl:172 + + m.finishMetric = true + ( m.cs) = 739; + {( m.p)++; goto _out } + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof741 + } + st_case_741: +//line plugins/parsers/influx/machine.go:29731 + goto st0 +tr1057: + ( m.cs) = 261 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1061: + ( m.cs) = 261 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st261: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof261 + } + st_case_261: +//line plugins/parsers/influx/machine.go:29764 + if ( m.data)[( m.p)] == 10 { + goto tr443 + } + goto st0 +tr1058: + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again +tr1062: + ( m.cs) = 262 +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; goto _out } + } + + goto _again + st262: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof262 + } + st_case_262: +//line plugins/parsers/influx/machine.go:29800 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr2 + case 92: + goto tr445 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto tr444 +tr444: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st263 + st263: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof263 + } + st_case_263: +//line plugins/parsers/influx/machine.go:29831 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr447 + case 92: + goto st266 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st263 +tr447: +//line plugins/parsers/influx/machine.go.rl:95 + + m.key = m.text() + + goto st264 + st264: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof264 + } + st_case_264: +//line plugins/parsers/influx/machine.go:29862 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr2 + case 92: + goto tr450 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto tr449 +tr449: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st742 + st742: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof742 + } + st_case_742: +//line plugins/parsers/influx/machine.go:29893 + switch ( m.data)[( m.p)] { + case 9: + goto tr2 + case 10: + goto tr1060 + case 12: + goto tr2 + case 13: + goto tr1061 + case 32: + goto tr2 + case 44: + goto tr1062 + case 61: + goto tr2 + case 92: + goto st265 + } + goto st742 +tr450: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st265 + st265: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof265 + } + st_case_265: +//line plugins/parsers/influx/machine.go:29924 + if ( m.data)[( m.p)] == 92 { + goto st743 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st742 + st743: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof743 + } + st_case_743: +//line plugins/parsers/influx/machine.go:29945 + switch ( m.data)[( m.p)] { + case 9: + goto tr2 + case 10: + goto tr1060 + case 12: + goto tr2 + case 13: + goto tr1061 + case 32: + goto tr2 + case 44: + goto tr1062 + case 61: + goto tr2 + case 92: + goto st265 + } + goto st742 +tr445: +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st266 + st266: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof266 + } + st_case_266: +//line plugins/parsers/influx/machine.go:29976 + if ( m.data)[( m.p)] == 92 { + goto st267 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st263 + st267: +//line plugins/parsers/influx/machine.go.rl:248 + ( m.p)-- + + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof267 + } + st_case_267: +//line plugins/parsers/influx/machine.go:29997 + switch ( m.data)[( m.p)] { + case 32: + goto tr2 + case 44: + goto tr2 + case 61: + goto tr447 + case 92: + goto st266 + } + switch { + case ( m.data)[( m.p)] > 10: + if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { + goto tr2 + } + case ( m.data)[( m.p)] >= 9: + goto tr2 + } + goto st263 +tr442: +//line plugins/parsers/influx/machine.go.rl:82 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:28 + + m.pb = m.p + + goto st268 + st268: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof268 + } + st_case_268: +//line plugins/parsers/influx/machine.go:30032 switch { case ( m.data)[( m.p)] > 10: if 12 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 13 { @@ -22638,907 +30037,1698 @@ tr346: case ( m.data)[( m.p)] >= 9: goto st0 } - goto st604 + goto st740 +tr439: +//line plugins/parsers/influx/machine.go.rl:166 + + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line + + goto st739 + st739: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof739 + } + st_case_739: +//line plugins/parsers/influx/machine.go:30055 + switch ( m.data)[( m.p)] { + case 10: + goto tr439 + case 13: + goto st258 + case 32: + goto st739 + case 35: + goto st259 + } + if 9 <= ( m.data)[( m.p)] && ( m.data)[( m.p)] <= 12 { + goto st739 + } + goto tr1053 + st258: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof258 + } + st_case_258: + if ( m.data)[( m.p)] == 10 { + goto tr439 + } + goto st0 + st259: + if ( m.p)++; ( m.p) == ( m.pe) { + goto _test_eof259 + } + st_case_259: + if ( m.data)[( m.p)] == 10 { + goto tr439 + } + goto st259 st_out: - _test_eof1: m.cs = 1; goto _test_eof - _test_eof2: m.cs = 2; goto _test_eof - _test_eof3: m.cs = 3; goto _test_eof - _test_eof4: m.cs = 4; goto _test_eof - _test_eof5: m.cs = 5; goto _test_eof - _test_eof6: m.cs = 6; goto _test_eof - _test_eof7: m.cs = 7; goto _test_eof - _test_eof206: m.cs = 206; goto _test_eof - _test_eof207: m.cs = 207; goto _test_eof - _test_eof208: m.cs = 208; goto _test_eof - _test_eof8: m.cs = 8; goto _test_eof - _test_eof209: m.cs = 209; goto _test_eof - _test_eof210: m.cs = 210; goto _test_eof - _test_eof211: m.cs = 211; goto _test_eof - _test_eof212: m.cs = 212; goto _test_eof - _test_eof213: m.cs = 213; goto _test_eof - _test_eof214: m.cs = 214; goto _test_eof - _test_eof215: m.cs = 215; goto _test_eof - _test_eof216: m.cs = 216; goto _test_eof - _test_eof217: m.cs = 217; goto _test_eof - _test_eof218: m.cs = 218; goto _test_eof - _test_eof219: m.cs = 219; goto _test_eof - _test_eof220: m.cs = 220; goto _test_eof - _test_eof221: m.cs = 221; goto _test_eof - _test_eof222: m.cs = 222; goto _test_eof - _test_eof223: m.cs = 223; goto _test_eof - _test_eof224: m.cs = 224; goto _test_eof - _test_eof225: m.cs = 225; goto _test_eof - _test_eof226: m.cs = 226; goto _test_eof - _test_eof227: m.cs = 227; goto _test_eof - _test_eof228: m.cs = 228; goto _test_eof - _test_eof9: m.cs = 9; goto _test_eof - _test_eof10: m.cs = 10; goto _test_eof - _test_eof11: m.cs = 11; goto _test_eof - _test_eof12: m.cs = 12; goto _test_eof - _test_eof13: m.cs = 13; goto _test_eof - _test_eof229: m.cs = 229; goto _test_eof - _test_eof14: m.cs = 14; goto _test_eof - _test_eof15: m.cs = 15; goto _test_eof - _test_eof230: m.cs = 230; goto _test_eof - _test_eof231: m.cs = 231; goto _test_eof - _test_eof232: m.cs = 232; goto _test_eof - _test_eof233: m.cs = 233; goto _test_eof - _test_eof234: m.cs = 234; goto _test_eof - _test_eof235: m.cs = 235; goto _test_eof - _test_eof236: m.cs = 236; goto _test_eof - _test_eof237: m.cs = 237; goto _test_eof - _test_eof238: m.cs = 238; goto _test_eof - _test_eof16: m.cs = 16; goto _test_eof - _test_eof17: m.cs = 17; goto _test_eof - _test_eof18: m.cs = 18; goto _test_eof - _test_eof239: m.cs = 239; goto _test_eof - _test_eof19: m.cs = 19; goto _test_eof - _test_eof20: m.cs = 20; goto _test_eof - _test_eof21: m.cs = 21; goto _test_eof - _test_eof240: m.cs = 240; goto _test_eof - _test_eof22: m.cs = 22; goto _test_eof - _test_eof23: m.cs = 23; goto _test_eof - _test_eof241: m.cs = 241; goto _test_eof - _test_eof242: m.cs = 242; goto _test_eof - _test_eof24: m.cs = 24; goto _test_eof - _test_eof25: m.cs = 25; goto _test_eof - _test_eof26: m.cs = 26; goto _test_eof - _test_eof27: m.cs = 27; goto _test_eof - _test_eof28: m.cs = 28; goto _test_eof - _test_eof29: m.cs = 29; goto _test_eof - _test_eof30: m.cs = 30; goto _test_eof - _test_eof31: m.cs = 31; goto _test_eof - _test_eof32: m.cs = 32; goto _test_eof - _test_eof33: m.cs = 33; goto _test_eof - _test_eof34: m.cs = 34; goto _test_eof - _test_eof35: m.cs = 35; goto _test_eof - _test_eof36: m.cs = 36; goto _test_eof - _test_eof37: m.cs = 37; goto _test_eof - _test_eof38: m.cs = 38; goto _test_eof - _test_eof39: m.cs = 39; goto _test_eof - _test_eof40: m.cs = 40; goto _test_eof - _test_eof41: m.cs = 41; goto _test_eof - _test_eof42: m.cs = 42; goto _test_eof - _test_eof243: m.cs = 243; goto _test_eof - _test_eof244: m.cs = 244; goto _test_eof - _test_eof43: m.cs = 43; goto _test_eof - _test_eof245: m.cs = 245; goto _test_eof - _test_eof246: m.cs = 246; goto _test_eof - _test_eof247: m.cs = 247; goto _test_eof - _test_eof248: m.cs = 248; goto _test_eof - _test_eof249: m.cs = 249; goto _test_eof - _test_eof250: m.cs = 250; goto _test_eof - _test_eof251: m.cs = 251; goto _test_eof - _test_eof252: m.cs = 252; goto _test_eof - _test_eof253: m.cs = 253; goto _test_eof - _test_eof254: m.cs = 254; goto _test_eof - _test_eof255: m.cs = 255; goto _test_eof - _test_eof256: m.cs = 256; goto _test_eof - _test_eof257: m.cs = 257; goto _test_eof - _test_eof258: m.cs = 258; goto _test_eof - _test_eof259: m.cs = 259; goto _test_eof - _test_eof260: m.cs = 260; goto _test_eof - _test_eof261: m.cs = 261; goto _test_eof - _test_eof262: m.cs = 262; goto _test_eof - _test_eof263: m.cs = 263; goto _test_eof - _test_eof264: m.cs = 264; goto _test_eof - _test_eof44: m.cs = 44; goto _test_eof - _test_eof265: m.cs = 265; goto _test_eof - _test_eof266: m.cs = 266; goto _test_eof - _test_eof45: m.cs = 45; goto _test_eof - _test_eof267: m.cs = 267; goto _test_eof - _test_eof268: m.cs = 268; goto _test_eof - _test_eof269: m.cs = 269; goto _test_eof - _test_eof270: m.cs = 270; goto _test_eof - _test_eof271: m.cs = 271; goto _test_eof - _test_eof272: m.cs = 272; goto _test_eof - _test_eof273: m.cs = 273; goto _test_eof - _test_eof274: m.cs = 274; goto _test_eof - _test_eof275: m.cs = 275; goto _test_eof - _test_eof276: m.cs = 276; goto _test_eof - _test_eof277: m.cs = 277; goto _test_eof - _test_eof278: m.cs = 278; goto _test_eof - _test_eof279: m.cs = 279; goto _test_eof - _test_eof280: m.cs = 280; goto _test_eof - _test_eof281: m.cs = 281; goto _test_eof - _test_eof282: m.cs = 282; goto _test_eof - _test_eof283: m.cs = 283; goto _test_eof - _test_eof284: m.cs = 284; goto _test_eof - _test_eof285: m.cs = 285; goto _test_eof - _test_eof286: m.cs = 286; goto _test_eof - _test_eof46: m.cs = 46; goto _test_eof - _test_eof47: m.cs = 47; goto _test_eof - _test_eof48: m.cs = 48; goto _test_eof - _test_eof287: m.cs = 287; goto _test_eof - _test_eof49: m.cs = 49; goto _test_eof - _test_eof50: m.cs = 50; goto _test_eof - _test_eof51: m.cs = 51; goto _test_eof - _test_eof52: m.cs = 52; goto _test_eof - _test_eof53: m.cs = 53; goto _test_eof - _test_eof288: m.cs = 288; goto _test_eof - _test_eof54: m.cs = 54; goto _test_eof - _test_eof289: m.cs = 289; goto _test_eof - _test_eof55: m.cs = 55; goto _test_eof - _test_eof290: m.cs = 290; goto _test_eof - _test_eof291: m.cs = 291; goto _test_eof - _test_eof292: m.cs = 292; goto _test_eof - _test_eof293: m.cs = 293; goto _test_eof - _test_eof294: m.cs = 294; goto _test_eof - _test_eof295: m.cs = 295; goto _test_eof - _test_eof296: m.cs = 296; goto _test_eof - _test_eof297: m.cs = 297; goto _test_eof - _test_eof298: m.cs = 298; goto _test_eof - _test_eof56: m.cs = 56; goto _test_eof - _test_eof57: m.cs = 57; goto _test_eof - _test_eof58: m.cs = 58; goto _test_eof - _test_eof299: m.cs = 299; goto _test_eof - _test_eof59: m.cs = 59; goto _test_eof - _test_eof60: m.cs = 60; goto _test_eof - _test_eof61: m.cs = 61; goto _test_eof - _test_eof300: m.cs = 300; goto _test_eof - _test_eof62: m.cs = 62; goto _test_eof - _test_eof63: m.cs = 63; goto _test_eof - _test_eof301: m.cs = 301; goto _test_eof - _test_eof302: m.cs = 302; goto _test_eof - _test_eof64: m.cs = 64; goto _test_eof - _test_eof65: m.cs = 65; goto _test_eof - _test_eof66: m.cs = 66; goto _test_eof - _test_eof303: m.cs = 303; goto _test_eof - _test_eof67: m.cs = 67; goto _test_eof - _test_eof68: m.cs = 68; goto _test_eof - _test_eof304: m.cs = 304; goto _test_eof - _test_eof305: m.cs = 305; goto _test_eof - _test_eof306: m.cs = 306; goto _test_eof - _test_eof307: m.cs = 307; goto _test_eof - _test_eof308: m.cs = 308; goto _test_eof - _test_eof309: m.cs = 309; goto _test_eof - _test_eof310: m.cs = 310; goto _test_eof - _test_eof311: m.cs = 311; goto _test_eof - _test_eof312: m.cs = 312; goto _test_eof - _test_eof69: m.cs = 69; goto _test_eof - _test_eof70: m.cs = 70; goto _test_eof - _test_eof71: m.cs = 71; goto _test_eof - _test_eof313: m.cs = 313; goto _test_eof - _test_eof72: m.cs = 72; goto _test_eof - _test_eof73: m.cs = 73; goto _test_eof - _test_eof74: m.cs = 74; goto _test_eof - _test_eof314: m.cs = 314; goto _test_eof - _test_eof75: m.cs = 75; goto _test_eof - _test_eof76: m.cs = 76; goto _test_eof - _test_eof315: m.cs = 315; goto _test_eof - _test_eof316: m.cs = 316; goto _test_eof - _test_eof77: m.cs = 77; goto _test_eof - _test_eof78: m.cs = 78; goto _test_eof - _test_eof79: m.cs = 79; goto _test_eof - _test_eof80: m.cs = 80; goto _test_eof - _test_eof81: m.cs = 81; goto _test_eof - _test_eof82: m.cs = 82; goto _test_eof - _test_eof317: m.cs = 317; goto _test_eof - _test_eof318: m.cs = 318; goto _test_eof - _test_eof319: m.cs = 319; goto _test_eof - _test_eof320: m.cs = 320; goto _test_eof - _test_eof83: m.cs = 83; goto _test_eof - _test_eof321: m.cs = 321; goto _test_eof - _test_eof322: m.cs = 322; goto _test_eof - _test_eof323: m.cs = 323; goto _test_eof - _test_eof324: m.cs = 324; goto _test_eof - _test_eof84: m.cs = 84; goto _test_eof - _test_eof325: m.cs = 325; goto _test_eof - _test_eof326: m.cs = 326; goto _test_eof - _test_eof327: m.cs = 327; goto _test_eof - _test_eof328: m.cs = 328; goto _test_eof - _test_eof329: m.cs = 329; goto _test_eof - _test_eof330: m.cs = 330; goto _test_eof - _test_eof331: m.cs = 331; goto _test_eof - _test_eof332: m.cs = 332; goto _test_eof - _test_eof333: m.cs = 333; goto _test_eof - _test_eof334: m.cs = 334; goto _test_eof - _test_eof335: m.cs = 335; goto _test_eof - _test_eof336: m.cs = 336; goto _test_eof - _test_eof337: m.cs = 337; goto _test_eof - _test_eof338: m.cs = 338; goto _test_eof - _test_eof339: m.cs = 339; goto _test_eof - _test_eof340: m.cs = 340; goto _test_eof - _test_eof341: m.cs = 341; goto _test_eof - _test_eof342: m.cs = 342; goto _test_eof - _test_eof85: m.cs = 85; goto _test_eof - _test_eof86: m.cs = 86; goto _test_eof - _test_eof87: m.cs = 87; goto _test_eof - _test_eof88: m.cs = 88; goto _test_eof - _test_eof89: m.cs = 89; goto _test_eof - _test_eof90: m.cs = 90; goto _test_eof - _test_eof91: m.cs = 91; goto _test_eof - _test_eof92: m.cs = 92; goto _test_eof - _test_eof93: m.cs = 93; goto _test_eof - _test_eof94: m.cs = 94; goto _test_eof - _test_eof95: m.cs = 95; goto _test_eof - _test_eof96: m.cs = 96; goto _test_eof - _test_eof97: m.cs = 97; goto _test_eof - _test_eof343: m.cs = 343; goto _test_eof - _test_eof344: m.cs = 344; goto _test_eof - _test_eof98: m.cs = 98; goto _test_eof - _test_eof345: m.cs = 345; goto _test_eof - _test_eof346: m.cs = 346; goto _test_eof - _test_eof347: m.cs = 347; goto _test_eof - _test_eof348: m.cs = 348; goto _test_eof - _test_eof349: m.cs = 349; goto _test_eof - _test_eof350: m.cs = 350; goto _test_eof - _test_eof351: m.cs = 351; goto _test_eof - _test_eof352: m.cs = 352; goto _test_eof - _test_eof353: m.cs = 353; goto _test_eof - _test_eof354: m.cs = 354; goto _test_eof - _test_eof355: m.cs = 355; goto _test_eof - _test_eof356: m.cs = 356; goto _test_eof - _test_eof357: m.cs = 357; goto _test_eof - _test_eof358: m.cs = 358; goto _test_eof - _test_eof359: m.cs = 359; goto _test_eof - _test_eof360: m.cs = 360; goto _test_eof - _test_eof361: m.cs = 361; goto _test_eof - _test_eof362: m.cs = 362; goto _test_eof - _test_eof363: m.cs = 363; goto _test_eof - _test_eof364: m.cs = 364; goto _test_eof - _test_eof99: m.cs = 99; goto _test_eof - _test_eof100: m.cs = 100; goto _test_eof - _test_eof365: m.cs = 365; goto _test_eof - _test_eof366: m.cs = 366; goto _test_eof - _test_eof101: m.cs = 101; goto _test_eof - _test_eof367: m.cs = 367; goto _test_eof - _test_eof368: m.cs = 368; goto _test_eof - _test_eof369: m.cs = 369; goto _test_eof - _test_eof370: m.cs = 370; goto _test_eof - _test_eof371: m.cs = 371; goto _test_eof - _test_eof372: m.cs = 372; goto _test_eof - _test_eof373: m.cs = 373; goto _test_eof - _test_eof374: m.cs = 374; goto _test_eof - _test_eof375: m.cs = 375; goto _test_eof - _test_eof376: m.cs = 376; goto _test_eof - _test_eof377: m.cs = 377; goto _test_eof - _test_eof378: m.cs = 378; goto _test_eof - _test_eof379: m.cs = 379; goto _test_eof - _test_eof380: m.cs = 380; goto _test_eof - _test_eof381: m.cs = 381; goto _test_eof - _test_eof382: m.cs = 382; goto _test_eof - _test_eof383: m.cs = 383; goto _test_eof - _test_eof384: m.cs = 384; goto _test_eof - _test_eof385: m.cs = 385; goto _test_eof - _test_eof386: m.cs = 386; goto _test_eof - _test_eof102: m.cs = 102; goto _test_eof - _test_eof387: m.cs = 387; goto _test_eof - _test_eof388: m.cs = 388; goto _test_eof - _test_eof103: m.cs = 103; goto _test_eof - _test_eof104: m.cs = 104; goto _test_eof - _test_eof105: m.cs = 105; goto _test_eof - _test_eof106: m.cs = 106; goto _test_eof - _test_eof107: m.cs = 107; goto _test_eof - _test_eof389: m.cs = 389; goto _test_eof - _test_eof108: m.cs = 108; goto _test_eof - _test_eof109: m.cs = 109; goto _test_eof - _test_eof390: m.cs = 390; goto _test_eof - _test_eof391: m.cs = 391; goto _test_eof - _test_eof392: m.cs = 392; goto _test_eof - _test_eof393: m.cs = 393; goto _test_eof - _test_eof394: m.cs = 394; goto _test_eof - _test_eof395: m.cs = 395; goto _test_eof - _test_eof396: m.cs = 396; goto _test_eof - _test_eof397: m.cs = 397; goto _test_eof - _test_eof398: m.cs = 398; goto _test_eof - _test_eof110: m.cs = 110; goto _test_eof - _test_eof111: m.cs = 111; goto _test_eof - _test_eof112: m.cs = 112; goto _test_eof - _test_eof399: m.cs = 399; goto _test_eof - _test_eof113: m.cs = 113; goto _test_eof - _test_eof114: m.cs = 114; goto _test_eof - _test_eof115: m.cs = 115; goto _test_eof - _test_eof400: m.cs = 400; goto _test_eof - _test_eof116: m.cs = 116; goto _test_eof - _test_eof117: m.cs = 117; goto _test_eof - _test_eof401: m.cs = 401; goto _test_eof - _test_eof402: m.cs = 402; goto _test_eof - _test_eof118: m.cs = 118; goto _test_eof - _test_eof119: m.cs = 119; goto _test_eof - _test_eof120: m.cs = 120; goto _test_eof - _test_eof121: m.cs = 121; goto _test_eof - _test_eof122: m.cs = 122; goto _test_eof - _test_eof123: m.cs = 123; goto _test_eof - _test_eof124: m.cs = 124; goto _test_eof - _test_eof125: m.cs = 125; goto _test_eof - _test_eof126: m.cs = 126; goto _test_eof - _test_eof127: m.cs = 127; goto _test_eof - _test_eof128: m.cs = 128; goto _test_eof - _test_eof129: m.cs = 129; goto _test_eof - _test_eof403: m.cs = 403; goto _test_eof - _test_eof404: m.cs = 404; goto _test_eof - _test_eof405: m.cs = 405; goto _test_eof - _test_eof130: m.cs = 130; goto _test_eof - _test_eof406: m.cs = 406; goto _test_eof - _test_eof407: m.cs = 407; goto _test_eof - _test_eof408: m.cs = 408; goto _test_eof - _test_eof409: m.cs = 409; goto _test_eof - _test_eof410: m.cs = 410; goto _test_eof - _test_eof411: m.cs = 411; goto _test_eof - _test_eof412: m.cs = 412; goto _test_eof - _test_eof413: m.cs = 413; goto _test_eof - _test_eof414: m.cs = 414; goto _test_eof - _test_eof415: m.cs = 415; goto _test_eof - _test_eof416: m.cs = 416; goto _test_eof - _test_eof417: m.cs = 417; goto _test_eof - _test_eof418: m.cs = 418; goto _test_eof - _test_eof419: m.cs = 419; goto _test_eof - _test_eof420: m.cs = 420; goto _test_eof - _test_eof421: m.cs = 421; goto _test_eof - _test_eof422: m.cs = 422; goto _test_eof - _test_eof423: m.cs = 423; goto _test_eof - _test_eof424: m.cs = 424; goto _test_eof - _test_eof425: m.cs = 425; goto _test_eof - _test_eof426: m.cs = 426; goto _test_eof - _test_eof427: m.cs = 427; goto _test_eof - _test_eof131: m.cs = 131; goto _test_eof - _test_eof428: m.cs = 428; goto _test_eof - _test_eof429: m.cs = 429; goto _test_eof - _test_eof430: m.cs = 430; goto _test_eof - _test_eof431: m.cs = 431; goto _test_eof - _test_eof132: m.cs = 132; goto _test_eof - _test_eof432: m.cs = 432; goto _test_eof - _test_eof433: m.cs = 433; goto _test_eof - _test_eof434: m.cs = 434; goto _test_eof - _test_eof435: m.cs = 435; goto _test_eof - _test_eof436: m.cs = 436; goto _test_eof - _test_eof437: m.cs = 437; goto _test_eof - _test_eof438: m.cs = 438; goto _test_eof - _test_eof439: m.cs = 439; goto _test_eof - _test_eof440: m.cs = 440; goto _test_eof - _test_eof441: m.cs = 441; goto _test_eof - _test_eof442: m.cs = 442; goto _test_eof - _test_eof443: m.cs = 443; goto _test_eof - _test_eof444: m.cs = 444; goto _test_eof - _test_eof445: m.cs = 445; goto _test_eof - _test_eof446: m.cs = 446; goto _test_eof - _test_eof447: m.cs = 447; goto _test_eof - _test_eof448: m.cs = 448; goto _test_eof - _test_eof449: m.cs = 449; goto _test_eof - _test_eof450: m.cs = 450; goto _test_eof - _test_eof451: m.cs = 451; goto _test_eof - _test_eof133: m.cs = 133; goto _test_eof - _test_eof134: m.cs = 134; goto _test_eof - _test_eof135: m.cs = 135; goto _test_eof - _test_eof452: m.cs = 452; goto _test_eof - _test_eof453: m.cs = 453; goto _test_eof - _test_eof136: m.cs = 136; goto _test_eof - _test_eof454: m.cs = 454; goto _test_eof - _test_eof455: m.cs = 455; goto _test_eof - _test_eof456: m.cs = 456; goto _test_eof - _test_eof457: m.cs = 457; goto _test_eof - _test_eof458: m.cs = 458; goto _test_eof - _test_eof459: m.cs = 459; goto _test_eof - _test_eof460: m.cs = 460; goto _test_eof - _test_eof461: m.cs = 461; goto _test_eof - _test_eof462: m.cs = 462; goto _test_eof - _test_eof463: m.cs = 463; goto _test_eof - _test_eof464: m.cs = 464; goto _test_eof - _test_eof465: m.cs = 465; goto _test_eof - _test_eof466: m.cs = 466; goto _test_eof - _test_eof467: m.cs = 467; goto _test_eof - _test_eof468: m.cs = 468; goto _test_eof - _test_eof469: m.cs = 469; goto _test_eof - _test_eof470: m.cs = 470; goto _test_eof - _test_eof471: m.cs = 471; goto _test_eof - _test_eof472: m.cs = 472; goto _test_eof - _test_eof473: m.cs = 473; goto _test_eof - _test_eof137: m.cs = 137; goto _test_eof - _test_eof474: m.cs = 474; goto _test_eof - _test_eof475: m.cs = 475; goto _test_eof - _test_eof476: m.cs = 476; goto _test_eof - _test_eof138: m.cs = 138; goto _test_eof - _test_eof477: m.cs = 477; goto _test_eof - _test_eof478: m.cs = 478; goto _test_eof - _test_eof479: m.cs = 479; goto _test_eof - _test_eof480: m.cs = 480; goto _test_eof - _test_eof481: m.cs = 481; goto _test_eof - _test_eof482: m.cs = 482; goto _test_eof - _test_eof483: m.cs = 483; goto _test_eof - _test_eof484: m.cs = 484; goto _test_eof - _test_eof485: m.cs = 485; goto _test_eof - _test_eof486: m.cs = 486; goto _test_eof - _test_eof487: m.cs = 487; goto _test_eof - _test_eof488: m.cs = 488; goto _test_eof - _test_eof489: m.cs = 489; goto _test_eof - _test_eof490: m.cs = 490; goto _test_eof - _test_eof491: m.cs = 491; goto _test_eof - _test_eof492: m.cs = 492; goto _test_eof - _test_eof493: m.cs = 493; goto _test_eof - _test_eof494: m.cs = 494; goto _test_eof - _test_eof495: m.cs = 495; goto _test_eof - _test_eof496: m.cs = 496; goto _test_eof - _test_eof497: m.cs = 497; goto _test_eof - _test_eof498: m.cs = 498; goto _test_eof - _test_eof139: m.cs = 139; goto _test_eof - _test_eof499: m.cs = 499; goto _test_eof - _test_eof500: m.cs = 500; goto _test_eof - _test_eof501: m.cs = 501; goto _test_eof - _test_eof502: m.cs = 502; goto _test_eof - _test_eof503: m.cs = 503; goto _test_eof - _test_eof504: m.cs = 504; goto _test_eof - _test_eof505: m.cs = 505; goto _test_eof - _test_eof506: m.cs = 506; goto _test_eof - _test_eof507: m.cs = 507; goto _test_eof - _test_eof508: m.cs = 508; goto _test_eof - _test_eof509: m.cs = 509; goto _test_eof - _test_eof510: m.cs = 510; goto _test_eof - _test_eof511: m.cs = 511; goto _test_eof - _test_eof512: m.cs = 512; goto _test_eof - _test_eof513: m.cs = 513; goto _test_eof - _test_eof514: m.cs = 514; goto _test_eof - _test_eof515: m.cs = 515; goto _test_eof - _test_eof516: m.cs = 516; goto _test_eof - _test_eof517: m.cs = 517; goto _test_eof - _test_eof518: m.cs = 518; goto _test_eof - _test_eof519: m.cs = 519; goto _test_eof - _test_eof520: m.cs = 520; goto _test_eof - _test_eof140: m.cs = 140; goto _test_eof - _test_eof141: m.cs = 141; goto _test_eof - _test_eof142: m.cs = 142; goto _test_eof - _test_eof143: m.cs = 143; goto _test_eof - _test_eof144: m.cs = 144; goto _test_eof - _test_eof521: m.cs = 521; goto _test_eof - _test_eof145: m.cs = 145; goto _test_eof - _test_eof522: m.cs = 522; goto _test_eof - _test_eof146: m.cs = 146; goto _test_eof - _test_eof523: m.cs = 523; goto _test_eof - _test_eof524: m.cs = 524; goto _test_eof - _test_eof525: m.cs = 525; goto _test_eof - _test_eof526: m.cs = 526; goto _test_eof - _test_eof527: m.cs = 527; goto _test_eof - _test_eof528: m.cs = 528; goto _test_eof - _test_eof529: m.cs = 529; goto _test_eof - _test_eof530: m.cs = 530; goto _test_eof - _test_eof531: m.cs = 531; goto _test_eof - _test_eof147: m.cs = 147; goto _test_eof - _test_eof148: m.cs = 148; goto _test_eof - _test_eof149: m.cs = 149; goto _test_eof - _test_eof532: m.cs = 532; goto _test_eof - _test_eof150: m.cs = 150; goto _test_eof - _test_eof151: m.cs = 151; goto _test_eof - _test_eof152: m.cs = 152; goto _test_eof - _test_eof533: m.cs = 533; goto _test_eof - _test_eof153: m.cs = 153; goto _test_eof - _test_eof154: m.cs = 154; goto _test_eof - _test_eof534: m.cs = 534; goto _test_eof - _test_eof535: m.cs = 535; goto _test_eof - _test_eof155: m.cs = 155; goto _test_eof - _test_eof156: m.cs = 156; goto _test_eof - _test_eof157: m.cs = 157; goto _test_eof - _test_eof536: m.cs = 536; goto _test_eof - _test_eof537: m.cs = 537; goto _test_eof - _test_eof538: m.cs = 538; goto _test_eof - _test_eof158: m.cs = 158; goto _test_eof - _test_eof539: m.cs = 539; goto _test_eof - _test_eof540: m.cs = 540; goto _test_eof - _test_eof541: m.cs = 541; goto _test_eof - _test_eof542: m.cs = 542; goto _test_eof - _test_eof543: m.cs = 543; goto _test_eof - _test_eof544: m.cs = 544; goto _test_eof - _test_eof545: m.cs = 545; goto _test_eof - _test_eof546: m.cs = 546; goto _test_eof - _test_eof547: m.cs = 547; goto _test_eof - _test_eof548: m.cs = 548; goto _test_eof - _test_eof549: m.cs = 549; goto _test_eof - _test_eof550: m.cs = 550; goto _test_eof - _test_eof551: m.cs = 551; goto _test_eof - _test_eof552: m.cs = 552; goto _test_eof - _test_eof553: m.cs = 553; goto _test_eof - _test_eof554: m.cs = 554; goto _test_eof - _test_eof555: m.cs = 555; goto _test_eof - _test_eof556: m.cs = 556; goto _test_eof - _test_eof557: m.cs = 557; goto _test_eof - _test_eof558: m.cs = 558; goto _test_eof - _test_eof159: m.cs = 159; goto _test_eof - _test_eof160: m.cs = 160; goto _test_eof - _test_eof559: m.cs = 559; goto _test_eof - _test_eof560: m.cs = 560; goto _test_eof - _test_eof561: m.cs = 561; goto _test_eof - _test_eof562: m.cs = 562; goto _test_eof - _test_eof563: m.cs = 563; goto _test_eof - _test_eof564: m.cs = 564; goto _test_eof - _test_eof565: m.cs = 565; goto _test_eof - _test_eof566: m.cs = 566; goto _test_eof - _test_eof567: m.cs = 567; goto _test_eof - _test_eof161: m.cs = 161; goto _test_eof - _test_eof162: m.cs = 162; goto _test_eof - _test_eof163: m.cs = 163; goto _test_eof - _test_eof568: m.cs = 568; goto _test_eof - _test_eof164: m.cs = 164; goto _test_eof - _test_eof165: m.cs = 165; goto _test_eof - _test_eof166: m.cs = 166; goto _test_eof - _test_eof569: m.cs = 569; goto _test_eof - _test_eof167: m.cs = 167; goto _test_eof - _test_eof168: m.cs = 168; goto _test_eof - _test_eof570: m.cs = 570; goto _test_eof - _test_eof571: m.cs = 571; goto _test_eof - _test_eof169: m.cs = 169; goto _test_eof - _test_eof170: m.cs = 170; goto _test_eof - _test_eof171: m.cs = 171; goto _test_eof - _test_eof172: m.cs = 172; goto _test_eof - _test_eof572: m.cs = 572; goto _test_eof - _test_eof173: m.cs = 173; goto _test_eof - _test_eof573: m.cs = 573; goto _test_eof - _test_eof574: m.cs = 574; goto _test_eof - _test_eof174: m.cs = 174; goto _test_eof - _test_eof575: m.cs = 575; goto _test_eof - _test_eof576: m.cs = 576; goto _test_eof - _test_eof577: m.cs = 577; goto _test_eof - _test_eof578: m.cs = 578; goto _test_eof - _test_eof579: m.cs = 579; goto _test_eof - _test_eof580: m.cs = 580; goto _test_eof - _test_eof581: m.cs = 581; goto _test_eof - _test_eof582: m.cs = 582; goto _test_eof - _test_eof583: m.cs = 583; goto _test_eof - _test_eof175: m.cs = 175; goto _test_eof - _test_eof176: m.cs = 176; goto _test_eof - _test_eof177: m.cs = 177; goto _test_eof - _test_eof584: m.cs = 584; goto _test_eof - _test_eof178: m.cs = 178; goto _test_eof - _test_eof179: m.cs = 179; goto _test_eof - _test_eof180: m.cs = 180; goto _test_eof - _test_eof585: m.cs = 585; goto _test_eof - _test_eof181: m.cs = 181; goto _test_eof - _test_eof182: m.cs = 182; goto _test_eof - _test_eof586: m.cs = 586; goto _test_eof - _test_eof587: m.cs = 587; goto _test_eof - _test_eof183: m.cs = 183; goto _test_eof - _test_eof184: m.cs = 184; goto _test_eof - _test_eof588: m.cs = 588; goto _test_eof - _test_eof185: m.cs = 185; goto _test_eof - _test_eof186: m.cs = 186; goto _test_eof - _test_eof589: m.cs = 589; goto _test_eof - _test_eof590: m.cs = 590; goto _test_eof - _test_eof591: m.cs = 591; goto _test_eof - _test_eof592: m.cs = 592; goto _test_eof - _test_eof593: m.cs = 593; goto _test_eof - _test_eof594: m.cs = 594; goto _test_eof - _test_eof595: m.cs = 595; goto _test_eof - _test_eof596: m.cs = 596; goto _test_eof - _test_eof187: m.cs = 187; goto _test_eof - _test_eof188: m.cs = 188; goto _test_eof - _test_eof189: m.cs = 189; goto _test_eof - _test_eof597: m.cs = 597; goto _test_eof - _test_eof190: m.cs = 190; goto _test_eof - _test_eof191: m.cs = 191; goto _test_eof - _test_eof192: m.cs = 192; goto _test_eof - _test_eof598: m.cs = 598; goto _test_eof - _test_eof193: m.cs = 193; goto _test_eof - _test_eof194: m.cs = 194; goto _test_eof - _test_eof599: m.cs = 599; goto _test_eof - _test_eof600: m.cs = 600; goto _test_eof - _test_eof195: m.cs = 195; goto _test_eof - _test_eof601: m.cs = 601; goto _test_eof - _test_eof196: m.cs = 196; goto _test_eof - _test_eof602: m.cs = 602; goto _test_eof - _test_eof603: m.cs = 603; goto _test_eof - _test_eof197: m.cs = 197; goto _test_eof - _test_eof198: m.cs = 198; goto _test_eof - _test_eof199: m.cs = 199; goto _test_eof - _test_eof604: m.cs = 604; goto _test_eof - _test_eof605: m.cs = 605; goto _test_eof - _test_eof606: m.cs = 606; goto _test_eof - _test_eof200: m.cs = 200; goto _test_eof - _test_eof201: m.cs = 201; goto _test_eof - _test_eof202: m.cs = 202; goto _test_eof - _test_eof607: m.cs = 607; goto _test_eof - _test_eof203: m.cs = 203; goto _test_eof - _test_eof204: m.cs = 204; goto _test_eof - _test_eof205: m.cs = 205; goto _test_eof + _test_eof269: ( m.cs) = 269; goto _test_eof + _test_eof1: ( m.cs) = 1; goto _test_eof + _test_eof2: ( m.cs) = 2; goto _test_eof + _test_eof3: ( m.cs) = 3; goto _test_eof + _test_eof4: ( m.cs) = 4; goto _test_eof + _test_eof5: ( m.cs) = 5; goto _test_eof + _test_eof6: ( m.cs) = 6; goto _test_eof + _test_eof270: ( m.cs) = 270; goto _test_eof + _test_eof271: ( m.cs) = 271; goto _test_eof + _test_eof272: ( m.cs) = 272; goto _test_eof + _test_eof7: ( m.cs) = 7; goto _test_eof + _test_eof8: ( m.cs) = 8; goto _test_eof + _test_eof9: ( m.cs) = 9; goto _test_eof + _test_eof10: ( m.cs) = 10; goto _test_eof + _test_eof11: ( m.cs) = 11; goto _test_eof + _test_eof12: ( m.cs) = 12; goto _test_eof + _test_eof13: ( m.cs) = 13; goto _test_eof + _test_eof14: ( m.cs) = 14; goto _test_eof + _test_eof15: ( m.cs) = 15; goto _test_eof + _test_eof16: ( m.cs) = 16; goto _test_eof + _test_eof17: ( m.cs) = 17; goto _test_eof + _test_eof18: ( m.cs) = 18; goto _test_eof + _test_eof19: ( m.cs) = 19; goto _test_eof + _test_eof20: ( m.cs) = 20; goto _test_eof + _test_eof21: ( m.cs) = 21; goto _test_eof + _test_eof22: ( m.cs) = 22; goto _test_eof + _test_eof23: ( m.cs) = 23; goto _test_eof + _test_eof24: ( m.cs) = 24; goto _test_eof + _test_eof25: ( m.cs) = 25; goto _test_eof + _test_eof26: ( m.cs) = 26; goto _test_eof + _test_eof27: ( m.cs) = 27; goto _test_eof + _test_eof28: ( m.cs) = 28; goto _test_eof + _test_eof29: ( m.cs) = 29; goto _test_eof + _test_eof30: ( m.cs) = 30; goto _test_eof + _test_eof31: ( m.cs) = 31; goto _test_eof + _test_eof273: ( m.cs) = 273; goto _test_eof + _test_eof274: ( m.cs) = 274; goto _test_eof + _test_eof32: ( m.cs) = 32; goto _test_eof + _test_eof33: ( m.cs) = 33; goto _test_eof + _test_eof275: ( m.cs) = 275; goto _test_eof + _test_eof276: ( m.cs) = 276; goto _test_eof + _test_eof277: ( m.cs) = 277; goto _test_eof + _test_eof34: ( m.cs) = 34; goto _test_eof + _test_eof278: ( m.cs) = 278; goto _test_eof + _test_eof279: ( m.cs) = 279; goto _test_eof + _test_eof280: ( m.cs) = 280; goto _test_eof + _test_eof281: ( m.cs) = 281; goto _test_eof + _test_eof282: ( m.cs) = 282; goto _test_eof + _test_eof283: ( m.cs) = 283; goto _test_eof + _test_eof284: ( m.cs) = 284; goto _test_eof + _test_eof285: ( m.cs) = 285; goto _test_eof + _test_eof286: ( m.cs) = 286; goto _test_eof + _test_eof287: ( m.cs) = 287; goto _test_eof + _test_eof288: ( m.cs) = 288; goto _test_eof + _test_eof289: ( m.cs) = 289; goto _test_eof + _test_eof290: ( m.cs) = 290; goto _test_eof + _test_eof291: ( m.cs) = 291; goto _test_eof + _test_eof292: ( m.cs) = 292; goto _test_eof + _test_eof293: ( m.cs) = 293; goto _test_eof + _test_eof294: ( m.cs) = 294; goto _test_eof + _test_eof295: ( m.cs) = 295; goto _test_eof + _test_eof35: ( m.cs) = 35; goto _test_eof + _test_eof36: ( m.cs) = 36; goto _test_eof + _test_eof296: ( m.cs) = 296; goto _test_eof + _test_eof297: ( m.cs) = 297; goto _test_eof + _test_eof298: ( m.cs) = 298; goto _test_eof + _test_eof37: ( m.cs) = 37; goto _test_eof + _test_eof38: ( m.cs) = 38; goto _test_eof + _test_eof39: ( m.cs) = 39; goto _test_eof + _test_eof40: ( m.cs) = 40; goto _test_eof + _test_eof41: ( m.cs) = 41; goto _test_eof + _test_eof299: ( m.cs) = 299; goto _test_eof + _test_eof300: ( m.cs) = 300; goto _test_eof + _test_eof301: ( m.cs) = 301; goto _test_eof + _test_eof302: ( m.cs) = 302; goto _test_eof + _test_eof42: ( m.cs) = 42; goto _test_eof + _test_eof303: ( m.cs) = 303; goto _test_eof + _test_eof304: ( m.cs) = 304; goto _test_eof + _test_eof305: ( m.cs) = 305; goto _test_eof + _test_eof306: ( m.cs) = 306; goto _test_eof + _test_eof307: ( m.cs) = 307; goto _test_eof + _test_eof308: ( m.cs) = 308; goto _test_eof + _test_eof309: ( m.cs) = 309; goto _test_eof + _test_eof310: ( m.cs) = 310; goto _test_eof + _test_eof311: ( m.cs) = 311; goto _test_eof + _test_eof312: ( m.cs) = 312; goto _test_eof + _test_eof313: ( m.cs) = 313; goto _test_eof + _test_eof314: ( m.cs) = 314; goto _test_eof + _test_eof315: ( m.cs) = 315; goto _test_eof + _test_eof316: ( m.cs) = 316; goto _test_eof + _test_eof317: ( m.cs) = 317; goto _test_eof + _test_eof318: ( m.cs) = 318; goto _test_eof + _test_eof319: ( m.cs) = 319; goto _test_eof + _test_eof320: ( m.cs) = 320; goto _test_eof + _test_eof321: ( m.cs) = 321; goto _test_eof + _test_eof322: ( m.cs) = 322; goto _test_eof + _test_eof323: ( m.cs) = 323; goto _test_eof + _test_eof324: ( m.cs) = 324; goto _test_eof + _test_eof43: ( m.cs) = 43; goto _test_eof + _test_eof44: ( m.cs) = 44; goto _test_eof + _test_eof45: ( m.cs) = 45; goto _test_eof + _test_eof46: ( m.cs) = 46; goto _test_eof + _test_eof47: ( m.cs) = 47; goto _test_eof + _test_eof48: ( m.cs) = 48; goto _test_eof + _test_eof49: ( m.cs) = 49; goto _test_eof + _test_eof50: ( m.cs) = 50; goto _test_eof + _test_eof51: ( m.cs) = 51; goto _test_eof + _test_eof52: ( m.cs) = 52; goto _test_eof + _test_eof325: ( m.cs) = 325; goto _test_eof + _test_eof326: ( m.cs) = 326; goto _test_eof + _test_eof327: ( m.cs) = 327; goto _test_eof + _test_eof53: ( m.cs) = 53; goto _test_eof + _test_eof54: ( m.cs) = 54; goto _test_eof + _test_eof55: ( m.cs) = 55; goto _test_eof + _test_eof56: ( m.cs) = 56; goto _test_eof + _test_eof57: ( m.cs) = 57; goto _test_eof + _test_eof58: ( m.cs) = 58; goto _test_eof + _test_eof328: ( m.cs) = 328; goto _test_eof + _test_eof329: ( m.cs) = 329; goto _test_eof + _test_eof59: ( m.cs) = 59; goto _test_eof + _test_eof330: ( m.cs) = 330; goto _test_eof + _test_eof331: ( m.cs) = 331; goto _test_eof + _test_eof332: ( m.cs) = 332; goto _test_eof + _test_eof333: ( m.cs) = 333; goto _test_eof + _test_eof334: ( m.cs) = 334; goto _test_eof + _test_eof335: ( m.cs) = 335; goto _test_eof + _test_eof336: ( m.cs) = 336; goto _test_eof + _test_eof337: ( m.cs) = 337; goto _test_eof + _test_eof338: ( m.cs) = 338; goto _test_eof + _test_eof339: ( m.cs) = 339; goto _test_eof + _test_eof340: ( m.cs) = 340; goto _test_eof + _test_eof341: ( m.cs) = 341; goto _test_eof + _test_eof342: ( m.cs) = 342; goto _test_eof + _test_eof343: ( m.cs) = 343; goto _test_eof + _test_eof344: ( m.cs) = 344; goto _test_eof + _test_eof345: ( m.cs) = 345; goto _test_eof + _test_eof346: ( m.cs) = 346; goto _test_eof + _test_eof347: ( m.cs) = 347; goto _test_eof + _test_eof348: ( m.cs) = 348; goto _test_eof + _test_eof349: ( m.cs) = 349; goto _test_eof + _test_eof60: ( m.cs) = 60; goto _test_eof + _test_eof350: ( m.cs) = 350; goto _test_eof + _test_eof351: ( m.cs) = 351; goto _test_eof + _test_eof352: ( m.cs) = 352; goto _test_eof + _test_eof61: ( m.cs) = 61; goto _test_eof + _test_eof353: ( m.cs) = 353; goto _test_eof + _test_eof354: ( m.cs) = 354; goto _test_eof + _test_eof355: ( m.cs) = 355; goto _test_eof + _test_eof356: ( m.cs) = 356; goto _test_eof + _test_eof357: ( m.cs) = 357; goto _test_eof + _test_eof358: ( m.cs) = 358; goto _test_eof + _test_eof359: ( m.cs) = 359; goto _test_eof + _test_eof360: ( m.cs) = 360; goto _test_eof + _test_eof361: ( m.cs) = 361; goto _test_eof + _test_eof362: ( m.cs) = 362; goto _test_eof + _test_eof363: ( m.cs) = 363; goto _test_eof + _test_eof364: ( m.cs) = 364; goto _test_eof + _test_eof365: ( m.cs) = 365; goto _test_eof + _test_eof366: ( m.cs) = 366; goto _test_eof + _test_eof367: ( m.cs) = 367; goto _test_eof + _test_eof368: ( m.cs) = 368; goto _test_eof + _test_eof369: ( m.cs) = 369; goto _test_eof + _test_eof370: ( m.cs) = 370; goto _test_eof + _test_eof371: ( m.cs) = 371; goto _test_eof + _test_eof372: ( m.cs) = 372; goto _test_eof + _test_eof62: ( m.cs) = 62; goto _test_eof + _test_eof63: ( m.cs) = 63; goto _test_eof + _test_eof64: ( m.cs) = 64; goto _test_eof + _test_eof65: ( m.cs) = 65; goto _test_eof + _test_eof66: ( m.cs) = 66; goto _test_eof + _test_eof373: ( m.cs) = 373; goto _test_eof + _test_eof67: ( m.cs) = 67; goto _test_eof + _test_eof68: ( m.cs) = 68; goto _test_eof + _test_eof69: ( m.cs) = 69; goto _test_eof + _test_eof70: ( m.cs) = 70; goto _test_eof + _test_eof71: ( m.cs) = 71; goto _test_eof + _test_eof374: ( m.cs) = 374; goto _test_eof + _test_eof375: ( m.cs) = 375; goto _test_eof + _test_eof376: ( m.cs) = 376; goto _test_eof + _test_eof72: ( m.cs) = 72; goto _test_eof + _test_eof73: ( m.cs) = 73; goto _test_eof + _test_eof74: ( m.cs) = 74; goto _test_eof + _test_eof377: ( m.cs) = 377; goto _test_eof + _test_eof378: ( m.cs) = 378; goto _test_eof + _test_eof379: ( m.cs) = 379; goto _test_eof + _test_eof75: ( m.cs) = 75; goto _test_eof + _test_eof380: ( m.cs) = 380; goto _test_eof + _test_eof381: ( m.cs) = 381; goto _test_eof + _test_eof382: ( m.cs) = 382; goto _test_eof + _test_eof383: ( m.cs) = 383; goto _test_eof + _test_eof384: ( m.cs) = 384; goto _test_eof + _test_eof385: ( m.cs) = 385; goto _test_eof + _test_eof386: ( m.cs) = 386; goto _test_eof + _test_eof387: ( m.cs) = 387; goto _test_eof + _test_eof388: ( m.cs) = 388; goto _test_eof + _test_eof389: ( m.cs) = 389; goto _test_eof + _test_eof390: ( m.cs) = 390; goto _test_eof + _test_eof391: ( m.cs) = 391; goto _test_eof + _test_eof392: ( m.cs) = 392; goto _test_eof + _test_eof393: ( m.cs) = 393; goto _test_eof + _test_eof394: ( m.cs) = 394; goto _test_eof + _test_eof395: ( m.cs) = 395; goto _test_eof + _test_eof396: ( m.cs) = 396; goto _test_eof + _test_eof397: ( m.cs) = 397; goto _test_eof + _test_eof398: ( m.cs) = 398; goto _test_eof + _test_eof399: ( m.cs) = 399; goto _test_eof + _test_eof76: ( m.cs) = 76; goto _test_eof + _test_eof77: ( m.cs) = 77; goto _test_eof + _test_eof78: ( m.cs) = 78; goto _test_eof + _test_eof79: ( m.cs) = 79; goto _test_eof + _test_eof80: ( m.cs) = 80; goto _test_eof + _test_eof81: ( m.cs) = 81; goto _test_eof + _test_eof82: ( m.cs) = 82; goto _test_eof + _test_eof83: ( m.cs) = 83; goto _test_eof + _test_eof84: ( m.cs) = 84; goto _test_eof + _test_eof85: ( m.cs) = 85; goto _test_eof + _test_eof86: ( m.cs) = 86; goto _test_eof + _test_eof87: ( m.cs) = 87; goto _test_eof + _test_eof88: ( m.cs) = 88; goto _test_eof + _test_eof89: ( m.cs) = 89; goto _test_eof + _test_eof400: ( m.cs) = 400; goto _test_eof + _test_eof401: ( m.cs) = 401; goto _test_eof + _test_eof402: ( m.cs) = 402; goto _test_eof + _test_eof403: ( m.cs) = 403; goto _test_eof + _test_eof90: ( m.cs) = 90; goto _test_eof + _test_eof91: ( m.cs) = 91; goto _test_eof + _test_eof92: ( m.cs) = 92; goto _test_eof + _test_eof93: ( m.cs) = 93; goto _test_eof + _test_eof404: ( m.cs) = 404; goto _test_eof + _test_eof405: ( m.cs) = 405; goto _test_eof + _test_eof94: ( m.cs) = 94; goto _test_eof + _test_eof95: ( m.cs) = 95; goto _test_eof + _test_eof406: ( m.cs) = 406; goto _test_eof + _test_eof96: ( m.cs) = 96; goto _test_eof + _test_eof97: ( m.cs) = 97; goto _test_eof + _test_eof407: ( m.cs) = 407; goto _test_eof + _test_eof408: ( m.cs) = 408; goto _test_eof + _test_eof98: ( m.cs) = 98; goto _test_eof + _test_eof409: ( m.cs) = 409; goto _test_eof + _test_eof410: ( m.cs) = 410; goto _test_eof + _test_eof99: ( m.cs) = 99; goto _test_eof + _test_eof100: ( m.cs) = 100; goto _test_eof + _test_eof411: ( m.cs) = 411; goto _test_eof + _test_eof412: ( m.cs) = 412; goto _test_eof + _test_eof413: ( m.cs) = 413; goto _test_eof + _test_eof414: ( m.cs) = 414; goto _test_eof + _test_eof415: ( m.cs) = 415; goto _test_eof + _test_eof416: ( m.cs) = 416; goto _test_eof + _test_eof417: ( m.cs) = 417; goto _test_eof + _test_eof418: ( m.cs) = 418; goto _test_eof + _test_eof419: ( m.cs) = 419; goto _test_eof + _test_eof420: ( m.cs) = 420; goto _test_eof + _test_eof421: ( m.cs) = 421; goto _test_eof + _test_eof422: ( m.cs) = 422; goto _test_eof + _test_eof423: ( m.cs) = 423; goto _test_eof + _test_eof424: ( m.cs) = 424; goto _test_eof + _test_eof425: ( m.cs) = 425; goto _test_eof + _test_eof426: ( m.cs) = 426; goto _test_eof + _test_eof427: ( m.cs) = 427; goto _test_eof + _test_eof428: ( m.cs) = 428; goto _test_eof + _test_eof101: ( m.cs) = 101; goto _test_eof + _test_eof429: ( m.cs) = 429; goto _test_eof + _test_eof430: ( m.cs) = 430; goto _test_eof + _test_eof431: ( m.cs) = 431; goto _test_eof + _test_eof102: ( m.cs) = 102; goto _test_eof + _test_eof103: ( m.cs) = 103; goto _test_eof + _test_eof432: ( m.cs) = 432; goto _test_eof + _test_eof433: ( m.cs) = 433; goto _test_eof + _test_eof434: ( m.cs) = 434; goto _test_eof + _test_eof104: ( m.cs) = 104; goto _test_eof + _test_eof435: ( m.cs) = 435; goto _test_eof + _test_eof436: ( m.cs) = 436; goto _test_eof + _test_eof437: ( m.cs) = 437; goto _test_eof + _test_eof438: ( m.cs) = 438; goto _test_eof + _test_eof439: ( m.cs) = 439; goto _test_eof + _test_eof440: ( m.cs) = 440; goto _test_eof + _test_eof441: ( m.cs) = 441; goto _test_eof + _test_eof442: ( m.cs) = 442; goto _test_eof + _test_eof443: ( m.cs) = 443; goto _test_eof + _test_eof444: ( m.cs) = 444; goto _test_eof + _test_eof445: ( m.cs) = 445; goto _test_eof + _test_eof446: ( m.cs) = 446; goto _test_eof + _test_eof447: ( m.cs) = 447; goto _test_eof + _test_eof448: ( m.cs) = 448; goto _test_eof + _test_eof449: ( m.cs) = 449; goto _test_eof + _test_eof450: ( m.cs) = 450; goto _test_eof + _test_eof451: ( m.cs) = 451; goto _test_eof + _test_eof452: ( m.cs) = 452; goto _test_eof + _test_eof453: ( m.cs) = 453; goto _test_eof + _test_eof454: ( m.cs) = 454; goto _test_eof + _test_eof105: ( m.cs) = 105; goto _test_eof + _test_eof455: ( m.cs) = 455; goto _test_eof + _test_eof456: ( m.cs) = 456; goto _test_eof + _test_eof457: ( m.cs) = 457; goto _test_eof + _test_eof458: ( m.cs) = 458; goto _test_eof + _test_eof459: ( m.cs) = 459; goto _test_eof + _test_eof460: ( m.cs) = 460; goto _test_eof + _test_eof461: ( m.cs) = 461; goto _test_eof + _test_eof462: ( m.cs) = 462; goto _test_eof + _test_eof463: ( m.cs) = 463; goto _test_eof + _test_eof464: ( m.cs) = 464; goto _test_eof + _test_eof465: ( m.cs) = 465; goto _test_eof + _test_eof466: ( m.cs) = 466; goto _test_eof + _test_eof467: ( m.cs) = 467; goto _test_eof + _test_eof468: ( m.cs) = 468; goto _test_eof + _test_eof469: ( m.cs) = 469; goto _test_eof + _test_eof470: ( m.cs) = 470; goto _test_eof + _test_eof471: ( m.cs) = 471; goto _test_eof + _test_eof472: ( m.cs) = 472; goto _test_eof + _test_eof473: ( m.cs) = 473; goto _test_eof + _test_eof474: ( m.cs) = 474; goto _test_eof + _test_eof475: ( m.cs) = 475; goto _test_eof + _test_eof476: ( m.cs) = 476; goto _test_eof + _test_eof106: ( m.cs) = 106; goto _test_eof + _test_eof107: ( m.cs) = 107; goto _test_eof + _test_eof108: ( m.cs) = 108; goto _test_eof + _test_eof109: ( m.cs) = 109; goto _test_eof + _test_eof110: ( m.cs) = 110; goto _test_eof + _test_eof477: ( m.cs) = 477; goto _test_eof + _test_eof111: ( m.cs) = 111; goto _test_eof + _test_eof478: ( m.cs) = 478; goto _test_eof + _test_eof479: ( m.cs) = 479; goto _test_eof + _test_eof112: ( m.cs) = 112; goto _test_eof + _test_eof480: ( m.cs) = 480; goto _test_eof + _test_eof481: ( m.cs) = 481; goto _test_eof + _test_eof482: ( m.cs) = 482; goto _test_eof + _test_eof483: ( m.cs) = 483; goto _test_eof + _test_eof484: ( m.cs) = 484; goto _test_eof + _test_eof485: ( m.cs) = 485; goto _test_eof + _test_eof486: ( m.cs) = 486; goto _test_eof + _test_eof487: ( m.cs) = 487; goto _test_eof + _test_eof488: ( m.cs) = 488; goto _test_eof + _test_eof113: ( m.cs) = 113; goto _test_eof + _test_eof114: ( m.cs) = 114; goto _test_eof + _test_eof115: ( m.cs) = 115; goto _test_eof + _test_eof489: ( m.cs) = 489; goto _test_eof + _test_eof116: ( m.cs) = 116; goto _test_eof + _test_eof117: ( m.cs) = 117; goto _test_eof + _test_eof118: ( m.cs) = 118; goto _test_eof + _test_eof490: ( m.cs) = 490; goto _test_eof + _test_eof119: ( m.cs) = 119; goto _test_eof + _test_eof120: ( m.cs) = 120; goto _test_eof + _test_eof491: ( m.cs) = 491; goto _test_eof + _test_eof492: ( m.cs) = 492; goto _test_eof + _test_eof121: ( m.cs) = 121; goto _test_eof + _test_eof122: ( m.cs) = 122; goto _test_eof + _test_eof123: ( m.cs) = 123; goto _test_eof + _test_eof124: ( m.cs) = 124; goto _test_eof + _test_eof493: ( m.cs) = 493; goto _test_eof + _test_eof494: ( m.cs) = 494; goto _test_eof + _test_eof495: ( m.cs) = 495; goto _test_eof + _test_eof125: ( m.cs) = 125; goto _test_eof + _test_eof496: ( m.cs) = 496; goto _test_eof + _test_eof497: ( m.cs) = 497; goto _test_eof + _test_eof498: ( m.cs) = 498; goto _test_eof + _test_eof499: ( m.cs) = 499; goto _test_eof + _test_eof500: ( m.cs) = 500; goto _test_eof + _test_eof501: ( m.cs) = 501; goto _test_eof + _test_eof502: ( m.cs) = 502; goto _test_eof + _test_eof503: ( m.cs) = 503; goto _test_eof + _test_eof504: ( m.cs) = 504; goto _test_eof + _test_eof505: ( m.cs) = 505; goto _test_eof + _test_eof506: ( m.cs) = 506; goto _test_eof + _test_eof507: ( m.cs) = 507; goto _test_eof + _test_eof508: ( m.cs) = 508; goto _test_eof + _test_eof509: ( m.cs) = 509; goto _test_eof + _test_eof510: ( m.cs) = 510; goto _test_eof + _test_eof511: ( m.cs) = 511; goto _test_eof + _test_eof512: ( m.cs) = 512; goto _test_eof + _test_eof513: ( m.cs) = 513; goto _test_eof + _test_eof514: ( m.cs) = 514; goto _test_eof + _test_eof515: ( m.cs) = 515; goto _test_eof + _test_eof126: ( m.cs) = 126; goto _test_eof + _test_eof127: ( m.cs) = 127; goto _test_eof + _test_eof516: ( m.cs) = 516; goto _test_eof + _test_eof517: ( m.cs) = 517; goto _test_eof + _test_eof518: ( m.cs) = 518; goto _test_eof + _test_eof519: ( m.cs) = 519; goto _test_eof + _test_eof520: ( m.cs) = 520; goto _test_eof + _test_eof521: ( m.cs) = 521; goto _test_eof + _test_eof522: ( m.cs) = 522; goto _test_eof + _test_eof523: ( m.cs) = 523; goto _test_eof + _test_eof524: ( m.cs) = 524; goto _test_eof + _test_eof128: ( m.cs) = 128; goto _test_eof + _test_eof129: ( m.cs) = 129; goto _test_eof + _test_eof130: ( m.cs) = 130; goto _test_eof + _test_eof525: ( m.cs) = 525; goto _test_eof + _test_eof131: ( m.cs) = 131; goto _test_eof + _test_eof132: ( m.cs) = 132; goto _test_eof + _test_eof133: ( m.cs) = 133; goto _test_eof + _test_eof526: ( m.cs) = 526; goto _test_eof + _test_eof134: ( m.cs) = 134; goto _test_eof + _test_eof135: ( m.cs) = 135; goto _test_eof + _test_eof527: ( m.cs) = 527; goto _test_eof + _test_eof528: ( m.cs) = 528; goto _test_eof + _test_eof136: ( m.cs) = 136; goto _test_eof + _test_eof137: ( m.cs) = 137; goto _test_eof + _test_eof138: ( m.cs) = 138; goto _test_eof + _test_eof529: ( m.cs) = 529; goto _test_eof + _test_eof530: ( m.cs) = 530; goto _test_eof + _test_eof139: ( m.cs) = 139; goto _test_eof + _test_eof531: ( m.cs) = 531; goto _test_eof + _test_eof140: ( m.cs) = 140; goto _test_eof + _test_eof532: ( m.cs) = 532; goto _test_eof + _test_eof533: ( m.cs) = 533; goto _test_eof + _test_eof534: ( m.cs) = 534; goto _test_eof + _test_eof535: ( m.cs) = 535; goto _test_eof + _test_eof536: ( m.cs) = 536; goto _test_eof + _test_eof537: ( m.cs) = 537; goto _test_eof + _test_eof538: ( m.cs) = 538; goto _test_eof + _test_eof539: ( m.cs) = 539; goto _test_eof + _test_eof141: ( m.cs) = 141; goto _test_eof + _test_eof142: ( m.cs) = 142; goto _test_eof + _test_eof143: ( m.cs) = 143; goto _test_eof + _test_eof540: ( m.cs) = 540; goto _test_eof + _test_eof144: ( m.cs) = 144; goto _test_eof + _test_eof145: ( m.cs) = 145; goto _test_eof + _test_eof146: ( m.cs) = 146; goto _test_eof + _test_eof541: ( m.cs) = 541; goto _test_eof + _test_eof147: ( m.cs) = 147; goto _test_eof + _test_eof148: ( m.cs) = 148; goto _test_eof + _test_eof542: ( m.cs) = 542; goto _test_eof + _test_eof543: ( m.cs) = 543; goto _test_eof + _test_eof544: ( m.cs) = 544; goto _test_eof + _test_eof545: ( m.cs) = 545; goto _test_eof + _test_eof546: ( m.cs) = 546; goto _test_eof + _test_eof547: ( m.cs) = 547; goto _test_eof + _test_eof548: ( m.cs) = 548; goto _test_eof + _test_eof549: ( m.cs) = 549; goto _test_eof + _test_eof550: ( m.cs) = 550; goto _test_eof + _test_eof551: ( m.cs) = 551; goto _test_eof + _test_eof552: ( m.cs) = 552; goto _test_eof + _test_eof553: ( m.cs) = 553; goto _test_eof + _test_eof554: ( m.cs) = 554; goto _test_eof + _test_eof555: ( m.cs) = 555; goto _test_eof + _test_eof556: ( m.cs) = 556; goto _test_eof + _test_eof557: ( m.cs) = 557; goto _test_eof + _test_eof558: ( m.cs) = 558; goto _test_eof + _test_eof559: ( m.cs) = 559; goto _test_eof + _test_eof560: ( m.cs) = 560; goto _test_eof + _test_eof561: ( m.cs) = 561; goto _test_eof + _test_eof149: ( m.cs) = 149; goto _test_eof + _test_eof150: ( m.cs) = 150; goto _test_eof + _test_eof562: ( m.cs) = 562; goto _test_eof + _test_eof563: ( m.cs) = 563; goto _test_eof + _test_eof564: ( m.cs) = 564; goto _test_eof + _test_eof151: ( m.cs) = 151; goto _test_eof + _test_eof565: ( m.cs) = 565; goto _test_eof + _test_eof566: ( m.cs) = 566; goto _test_eof + _test_eof152: ( m.cs) = 152; goto _test_eof + _test_eof567: ( m.cs) = 567; goto _test_eof + _test_eof568: ( m.cs) = 568; goto _test_eof + _test_eof569: ( m.cs) = 569; goto _test_eof + _test_eof570: ( m.cs) = 570; goto _test_eof + _test_eof571: ( m.cs) = 571; goto _test_eof + _test_eof572: ( m.cs) = 572; goto _test_eof + _test_eof573: ( m.cs) = 573; goto _test_eof + _test_eof574: ( m.cs) = 574; goto _test_eof + _test_eof575: ( m.cs) = 575; goto _test_eof + _test_eof576: ( m.cs) = 576; goto _test_eof + _test_eof577: ( m.cs) = 577; goto _test_eof + _test_eof578: ( m.cs) = 578; goto _test_eof + _test_eof579: ( m.cs) = 579; goto _test_eof + _test_eof580: ( m.cs) = 580; goto _test_eof + _test_eof581: ( m.cs) = 581; goto _test_eof + _test_eof582: ( m.cs) = 582; goto _test_eof + _test_eof583: ( m.cs) = 583; goto _test_eof + _test_eof584: ( m.cs) = 584; goto _test_eof + _test_eof153: ( m.cs) = 153; goto _test_eof + _test_eof154: ( m.cs) = 154; goto _test_eof + _test_eof585: ( m.cs) = 585; goto _test_eof + _test_eof155: ( m.cs) = 155; goto _test_eof + _test_eof586: ( m.cs) = 586; goto _test_eof + _test_eof587: ( m.cs) = 587; goto _test_eof + _test_eof588: ( m.cs) = 588; goto _test_eof + _test_eof589: ( m.cs) = 589; goto _test_eof + _test_eof590: ( m.cs) = 590; goto _test_eof + _test_eof591: ( m.cs) = 591; goto _test_eof + _test_eof592: ( m.cs) = 592; goto _test_eof + _test_eof593: ( m.cs) = 593; goto _test_eof + _test_eof156: ( m.cs) = 156; goto _test_eof + _test_eof157: ( m.cs) = 157; goto _test_eof + _test_eof158: ( m.cs) = 158; goto _test_eof + _test_eof594: ( m.cs) = 594; goto _test_eof + _test_eof159: ( m.cs) = 159; goto _test_eof + _test_eof160: ( m.cs) = 160; goto _test_eof + _test_eof161: ( m.cs) = 161; goto _test_eof + _test_eof595: ( m.cs) = 595; goto _test_eof + _test_eof162: ( m.cs) = 162; goto _test_eof + _test_eof163: ( m.cs) = 163; goto _test_eof + _test_eof596: ( m.cs) = 596; goto _test_eof + _test_eof597: ( m.cs) = 597; goto _test_eof + _test_eof164: ( m.cs) = 164; goto _test_eof + _test_eof165: ( m.cs) = 165; goto _test_eof + _test_eof166: ( m.cs) = 166; goto _test_eof + _test_eof167: ( m.cs) = 167; goto _test_eof + _test_eof168: ( m.cs) = 168; goto _test_eof + _test_eof169: ( m.cs) = 169; goto _test_eof + _test_eof598: ( m.cs) = 598; goto _test_eof + _test_eof599: ( m.cs) = 599; goto _test_eof + _test_eof600: ( m.cs) = 600; goto _test_eof + _test_eof601: ( m.cs) = 601; goto _test_eof + _test_eof602: ( m.cs) = 602; goto _test_eof + _test_eof603: ( m.cs) = 603; goto _test_eof + _test_eof604: ( m.cs) = 604; goto _test_eof + _test_eof605: ( m.cs) = 605; goto _test_eof + _test_eof606: ( m.cs) = 606; goto _test_eof + _test_eof607: ( m.cs) = 607; goto _test_eof + _test_eof608: ( m.cs) = 608; goto _test_eof + _test_eof609: ( m.cs) = 609; goto _test_eof + _test_eof610: ( m.cs) = 610; goto _test_eof + _test_eof611: ( m.cs) = 611; goto _test_eof + _test_eof612: ( m.cs) = 612; goto _test_eof + _test_eof613: ( m.cs) = 613; goto _test_eof + _test_eof614: ( m.cs) = 614; goto _test_eof + _test_eof615: ( m.cs) = 615; goto _test_eof + _test_eof616: ( m.cs) = 616; goto _test_eof + _test_eof170: ( m.cs) = 170; goto _test_eof + _test_eof171: ( m.cs) = 171; goto _test_eof + _test_eof172: ( m.cs) = 172; goto _test_eof + _test_eof617: ( m.cs) = 617; goto _test_eof + _test_eof618: ( m.cs) = 618; goto _test_eof + _test_eof619: ( m.cs) = 619; goto _test_eof + _test_eof173: ( m.cs) = 173; goto _test_eof + _test_eof620: ( m.cs) = 620; goto _test_eof + _test_eof621: ( m.cs) = 621; goto _test_eof + _test_eof174: ( m.cs) = 174; goto _test_eof + _test_eof622: ( m.cs) = 622; goto _test_eof + _test_eof623: ( m.cs) = 623; goto _test_eof + _test_eof624: ( m.cs) = 624; goto _test_eof + _test_eof625: ( m.cs) = 625; goto _test_eof + _test_eof626: ( m.cs) = 626; goto _test_eof + _test_eof175: ( m.cs) = 175; goto _test_eof + _test_eof176: ( m.cs) = 176; goto _test_eof + _test_eof177: ( m.cs) = 177; goto _test_eof + _test_eof627: ( m.cs) = 627; goto _test_eof + _test_eof178: ( m.cs) = 178; goto _test_eof + _test_eof179: ( m.cs) = 179; goto _test_eof + _test_eof180: ( m.cs) = 180; goto _test_eof + _test_eof628: ( m.cs) = 628; goto _test_eof + _test_eof181: ( m.cs) = 181; goto _test_eof + _test_eof182: ( m.cs) = 182; goto _test_eof + _test_eof629: ( m.cs) = 629; goto _test_eof + _test_eof630: ( m.cs) = 630; goto _test_eof + _test_eof183: ( m.cs) = 183; goto _test_eof + _test_eof631: ( m.cs) = 631; goto _test_eof + _test_eof632: ( m.cs) = 632; goto _test_eof + _test_eof633: ( m.cs) = 633; goto _test_eof + _test_eof184: ( m.cs) = 184; goto _test_eof + _test_eof185: ( m.cs) = 185; goto _test_eof + _test_eof186: ( m.cs) = 186; goto _test_eof + _test_eof634: ( m.cs) = 634; goto _test_eof + _test_eof187: ( m.cs) = 187; goto _test_eof + _test_eof188: ( m.cs) = 188; goto _test_eof + _test_eof189: ( m.cs) = 189; goto _test_eof + _test_eof635: ( m.cs) = 635; goto _test_eof + _test_eof190: ( m.cs) = 190; goto _test_eof + _test_eof191: ( m.cs) = 191; goto _test_eof + _test_eof636: ( m.cs) = 636; goto _test_eof + _test_eof637: ( m.cs) = 637; goto _test_eof + _test_eof192: ( m.cs) = 192; goto _test_eof + _test_eof193: ( m.cs) = 193; goto _test_eof + _test_eof194: ( m.cs) = 194; goto _test_eof + _test_eof638: ( m.cs) = 638; goto _test_eof + _test_eof195: ( m.cs) = 195; goto _test_eof + _test_eof196: ( m.cs) = 196; goto _test_eof + _test_eof639: ( m.cs) = 639; goto _test_eof + _test_eof640: ( m.cs) = 640; goto _test_eof + _test_eof641: ( m.cs) = 641; goto _test_eof + _test_eof642: ( m.cs) = 642; goto _test_eof + _test_eof643: ( m.cs) = 643; goto _test_eof + _test_eof644: ( m.cs) = 644; goto _test_eof + _test_eof645: ( m.cs) = 645; goto _test_eof + _test_eof646: ( m.cs) = 646; goto _test_eof + _test_eof197: ( m.cs) = 197; goto _test_eof + _test_eof198: ( m.cs) = 198; goto _test_eof + _test_eof199: ( m.cs) = 199; goto _test_eof + _test_eof647: ( m.cs) = 647; goto _test_eof + _test_eof200: ( m.cs) = 200; goto _test_eof + _test_eof201: ( m.cs) = 201; goto _test_eof + _test_eof202: ( m.cs) = 202; goto _test_eof + _test_eof648: ( m.cs) = 648; goto _test_eof + _test_eof203: ( m.cs) = 203; goto _test_eof + _test_eof204: ( m.cs) = 204; goto _test_eof + _test_eof649: ( m.cs) = 649; goto _test_eof + _test_eof650: ( m.cs) = 650; goto _test_eof + _test_eof205: ( m.cs) = 205; goto _test_eof + _test_eof206: ( m.cs) = 206; goto _test_eof + _test_eof207: ( m.cs) = 207; goto _test_eof + _test_eof651: ( m.cs) = 651; goto _test_eof + _test_eof652: ( m.cs) = 652; goto _test_eof + _test_eof653: ( m.cs) = 653; goto _test_eof + _test_eof654: ( m.cs) = 654; goto _test_eof + _test_eof655: ( m.cs) = 655; goto _test_eof + _test_eof656: ( m.cs) = 656; goto _test_eof + _test_eof657: ( m.cs) = 657; goto _test_eof + _test_eof658: ( m.cs) = 658; goto _test_eof + _test_eof659: ( m.cs) = 659; goto _test_eof + _test_eof660: ( m.cs) = 660; goto _test_eof + _test_eof661: ( m.cs) = 661; goto _test_eof + _test_eof662: ( m.cs) = 662; goto _test_eof + _test_eof663: ( m.cs) = 663; goto _test_eof + _test_eof664: ( m.cs) = 664; goto _test_eof + _test_eof665: ( m.cs) = 665; goto _test_eof + _test_eof666: ( m.cs) = 666; goto _test_eof + _test_eof667: ( m.cs) = 667; goto _test_eof + _test_eof668: ( m.cs) = 668; goto _test_eof + _test_eof669: ( m.cs) = 669; goto _test_eof + _test_eof208: ( m.cs) = 208; goto _test_eof + _test_eof209: ( m.cs) = 209; goto _test_eof + _test_eof210: ( m.cs) = 210; goto _test_eof + _test_eof211: ( m.cs) = 211; goto _test_eof + _test_eof212: ( m.cs) = 212; goto _test_eof + _test_eof670: ( m.cs) = 670; goto _test_eof + _test_eof213: ( m.cs) = 213; goto _test_eof + _test_eof214: ( m.cs) = 214; goto _test_eof + _test_eof671: ( m.cs) = 671; goto _test_eof + _test_eof672: ( m.cs) = 672; goto _test_eof + _test_eof673: ( m.cs) = 673; goto _test_eof + _test_eof674: ( m.cs) = 674; goto _test_eof + _test_eof675: ( m.cs) = 675; goto _test_eof + _test_eof676: ( m.cs) = 676; goto _test_eof + _test_eof677: ( m.cs) = 677; goto _test_eof + _test_eof678: ( m.cs) = 678; goto _test_eof + _test_eof679: ( m.cs) = 679; goto _test_eof + _test_eof215: ( m.cs) = 215; goto _test_eof + _test_eof216: ( m.cs) = 216; goto _test_eof + _test_eof217: ( m.cs) = 217; goto _test_eof + _test_eof680: ( m.cs) = 680; goto _test_eof + _test_eof218: ( m.cs) = 218; goto _test_eof + _test_eof219: ( m.cs) = 219; goto _test_eof + _test_eof220: ( m.cs) = 220; goto _test_eof + _test_eof681: ( m.cs) = 681; goto _test_eof + _test_eof221: ( m.cs) = 221; goto _test_eof + _test_eof222: ( m.cs) = 222; goto _test_eof + _test_eof682: ( m.cs) = 682; goto _test_eof + _test_eof683: ( m.cs) = 683; goto _test_eof + _test_eof223: ( m.cs) = 223; goto _test_eof + _test_eof224: ( m.cs) = 224; goto _test_eof + _test_eof225: ( m.cs) = 225; goto _test_eof + _test_eof684: ( m.cs) = 684; goto _test_eof + _test_eof226: ( m.cs) = 226; goto _test_eof + _test_eof227: ( m.cs) = 227; goto _test_eof + _test_eof685: ( m.cs) = 685; goto _test_eof + _test_eof686: ( m.cs) = 686; goto _test_eof + _test_eof687: ( m.cs) = 687; goto _test_eof + _test_eof688: ( m.cs) = 688; goto _test_eof + _test_eof689: ( m.cs) = 689; goto _test_eof + _test_eof690: ( m.cs) = 690; goto _test_eof + _test_eof691: ( m.cs) = 691; goto _test_eof + _test_eof692: ( m.cs) = 692; goto _test_eof + _test_eof228: ( m.cs) = 228; goto _test_eof + _test_eof229: ( m.cs) = 229; goto _test_eof + _test_eof230: ( m.cs) = 230; goto _test_eof + _test_eof693: ( m.cs) = 693; goto _test_eof + _test_eof231: ( m.cs) = 231; goto _test_eof + _test_eof232: ( m.cs) = 232; goto _test_eof + _test_eof694: ( m.cs) = 694; goto _test_eof + _test_eof695: ( m.cs) = 695; goto _test_eof + _test_eof696: ( m.cs) = 696; goto _test_eof + _test_eof697: ( m.cs) = 697; goto _test_eof + _test_eof698: ( m.cs) = 698; goto _test_eof + _test_eof699: ( m.cs) = 699; goto _test_eof + _test_eof700: ( m.cs) = 700; goto _test_eof + _test_eof701: ( m.cs) = 701; goto _test_eof + _test_eof233: ( m.cs) = 233; goto _test_eof + _test_eof234: ( m.cs) = 234; goto _test_eof + _test_eof235: ( m.cs) = 235; goto _test_eof + _test_eof702: ( m.cs) = 702; goto _test_eof + _test_eof236: ( m.cs) = 236; goto _test_eof + _test_eof237: ( m.cs) = 237; goto _test_eof + _test_eof238: ( m.cs) = 238; goto _test_eof + _test_eof703: ( m.cs) = 703; goto _test_eof + _test_eof239: ( m.cs) = 239; goto _test_eof + _test_eof240: ( m.cs) = 240; goto _test_eof + _test_eof704: ( m.cs) = 704; goto _test_eof + _test_eof705: ( m.cs) = 705; goto _test_eof + _test_eof241: ( m.cs) = 241; goto _test_eof + _test_eof242: ( m.cs) = 242; goto _test_eof + _test_eof243: ( m.cs) = 243; goto _test_eof + _test_eof706: ( m.cs) = 706; goto _test_eof + _test_eof707: ( m.cs) = 707; goto _test_eof + _test_eof708: ( m.cs) = 708; goto _test_eof + _test_eof709: ( m.cs) = 709; goto _test_eof + _test_eof710: ( m.cs) = 710; goto _test_eof + _test_eof711: ( m.cs) = 711; goto _test_eof + _test_eof712: ( m.cs) = 712; goto _test_eof + _test_eof713: ( m.cs) = 713; goto _test_eof + _test_eof714: ( m.cs) = 714; goto _test_eof + _test_eof715: ( m.cs) = 715; goto _test_eof + _test_eof716: ( m.cs) = 716; goto _test_eof + _test_eof717: ( m.cs) = 717; goto _test_eof + _test_eof718: ( m.cs) = 718; goto _test_eof + _test_eof719: ( m.cs) = 719; goto _test_eof + _test_eof720: ( m.cs) = 720; goto _test_eof + _test_eof721: ( m.cs) = 721; goto _test_eof + _test_eof722: ( m.cs) = 722; goto _test_eof + _test_eof723: ( m.cs) = 723; goto _test_eof + _test_eof724: ( m.cs) = 724; goto _test_eof + _test_eof244: ( m.cs) = 244; goto _test_eof + _test_eof245: ( m.cs) = 245; goto _test_eof + _test_eof725: ( m.cs) = 725; goto _test_eof + _test_eof246: ( m.cs) = 246; goto _test_eof + _test_eof247: ( m.cs) = 247; goto _test_eof + _test_eof726: ( m.cs) = 726; goto _test_eof + _test_eof727: ( m.cs) = 727; goto _test_eof + _test_eof728: ( m.cs) = 728; goto _test_eof + _test_eof729: ( m.cs) = 729; goto _test_eof + _test_eof730: ( m.cs) = 730; goto _test_eof + _test_eof731: ( m.cs) = 731; goto _test_eof + _test_eof732: ( m.cs) = 732; goto _test_eof + _test_eof733: ( m.cs) = 733; goto _test_eof + _test_eof248: ( m.cs) = 248; goto _test_eof + _test_eof249: ( m.cs) = 249; goto _test_eof + _test_eof250: ( m.cs) = 250; goto _test_eof + _test_eof734: ( m.cs) = 734; goto _test_eof + _test_eof251: ( m.cs) = 251; goto _test_eof + _test_eof252: ( m.cs) = 252; goto _test_eof + _test_eof253: ( m.cs) = 253; goto _test_eof + _test_eof735: ( m.cs) = 735; goto _test_eof + _test_eof254: ( m.cs) = 254; goto _test_eof + _test_eof255: ( m.cs) = 255; goto _test_eof + _test_eof736: ( m.cs) = 736; goto _test_eof + _test_eof737: ( m.cs) = 737; goto _test_eof + _test_eof256: ( m.cs) = 256; goto _test_eof + _test_eof257: ( m.cs) = 257; goto _test_eof + _test_eof738: ( m.cs) = 738; goto _test_eof + _test_eof260: ( m.cs) = 260; goto _test_eof + _test_eof740: ( m.cs) = 740; goto _test_eof + _test_eof741: ( m.cs) = 741; goto _test_eof + _test_eof261: ( m.cs) = 261; goto _test_eof + _test_eof262: ( m.cs) = 262; goto _test_eof + _test_eof263: ( m.cs) = 263; goto _test_eof + _test_eof264: ( m.cs) = 264; goto _test_eof + _test_eof742: ( m.cs) = 742; goto _test_eof + _test_eof265: ( m.cs) = 265; goto _test_eof + _test_eof743: ( m.cs) = 743; goto _test_eof + _test_eof266: ( m.cs) = 266; goto _test_eof + _test_eof267: ( m.cs) = 267; goto _test_eof + _test_eof268: ( m.cs) = 268; goto _test_eof + _test_eof739: ( m.cs) = 739; goto _test_eof + _test_eof258: ( m.cs) = 258; goto _test_eof + _test_eof259: ( m.cs) = 259; goto _test_eof _test_eof: {} if ( m.p) == ( m.eof) { - switch m.cs { - case 206, 207, 208, 210, 243, 244, 246, 265, 266, 268, 287, 289, 317, 318, 319, 320, 322, 323, 324, 343, 344, 346, 365, 366, 368, 387, 388, 403, 404, 405, 407, 426, 427, 429, 430, 431, 450, 451, 452, 453, 455, 474, 475, 476, 478, 497, 498, 500, 501, 502, 522, 537, 538, 540, 573, 602, 603, 605, 606: -//line plugins/parsers/influx/machine.go.rl:22 + switch ( m.cs) { + case 7, 260: +//line plugins/parsers/influx/machine.go.rl:32 - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 1, 133: -//line plugins/parsers/influx/machine.go.rl:56 - - m.err = ErrParse + err = ErrNameParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } - case 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 38, 39, 40, 41, 42, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 90, 91, 92, 93, 94, 129, 132, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194: -//line plugins/parsers/influx/machine.go.rl:35 + case 2, 3, 4, 5, 6, 27, 30, 31, 34, 35, 36, 48, 49, 50, 51, 52, 72, 73, 75, 92, 102, 104, 140, 152, 155, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256: +//line plugins/parsers/influx/machine.go.rl:39 - m.err = ErrFieldParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 + case 12, 13, 14, 21, 23, 24, 262, 263, 264, 265, 266, 267: +//line plugins/parsers/influx/machine.go.rl:46 - m.err = ErrParse + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } - case 28, 29, 30, 36, 37, 200, 201, 202, 203, 204: -//line plugins/parsers/influx/machine.go.rl:42 + case 243: +//line plugins/parsers/influx/machine.go.rl:53 - m.err = ErrTagParse + err = ErrTimestampParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 + case 740: +//line plugins/parsers/influx/machine.go.rl:86 - m.err = ErrParse + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 742, 743: +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + + case 270, 271, 272, 273, 274, 276, 277, 296, 297, 298, 300, 301, 304, 305, 326, 327, 328, 329, 331, 375, 376, 378, 379, 401, 402, 407, 408, 410, 430, 431, 433, 434, 456, 457, 617, 620: +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 9, 37, 39, 164, 166: +//line plugins/parsers/influx/machine.go.rl:32 + + err = ErrNameParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:39 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 33, 74, 103, 169, 207: +//line plugins/parsers/influx/machine.go.rl:39 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:53 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 19, 43, 44, 45, 57, 58, 60, 62, 67, 69, 70, 76, 77, 78, 83, 85, 87, 88, 96, 97, 99, 100, 101, 106, 107, 108, 121, 122, 136, 137: +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:39 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 59: +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:53 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 269: +//line plugins/parsers/influx/machine.go.rl:82 + + m.beginMetric = true + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 1: +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 299, 302, 306, 374, 398, 399, 403, 404, 405, 529, 563, 564, 566: +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 15, 22: +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + + case 350, 351, 352, 354, 373, 429, 453, 454, 458, 478, 494, 495, 497: +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 623, 674, 688, 728: +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 624, 677, 691, 731: +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 325, 618, 619, 621, 622, 625, 631, 632, 670, 671, 672, 673, 675, 676, 678, 684, 685, 686, 687, 689, 690, 692, 725, 726, 727, 729, 730, 732: +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 626, 627, 628, 629, 630, 633, 634, 635, 636, 637, 679, 680, 681, 682, 683, 733, 734, 735, 736, 737: +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 275, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 330, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 377, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 409, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 432, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 598, 599, 600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724: +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true case 8: -//line plugins/parsers/influx/machine.go.rl:49 +//line plugins/parsers/influx/machine.go.rl:32 - m.err = ErrTimestampParse + err = ErrNameParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 +//line plugins/parsers/influx/machine.go.rl:86 - m.err = ErrParse + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } - case 604: -//line plugins/parsers/influx/machine.go.rl:72 + case 98: +//line plugins/parsers/influx/machine.go.rl:46 - m.handler.SetMeasurement(m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 607: -//line plugins/parsers/influx/machine.go.rl:80 - - m.handler.AddTag(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 233, 293, 307, 393, 526, 562, 578, 591: -//line plugins/parsers/influx/machine.go.rl:88 - - m.handler.AddInt(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 236, 296, 310, 396, 529, 565, 581, 594: -//line plugins/parsers/influx/machine.go.rl:92 - - m.handler.AddUint(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 229, 230, 231, 232, 234, 235, 237, 288, 290, 291, 292, 294, 295, 297, 303, 304, 305, 306, 308, 309, 311, 389, 390, 391, 392, 394, 395, 397, 521, 523, 524, 525, 527, 528, 530, 536, 559, 560, 561, 563, 564, 566, 572, 574, 575, 576, 577, 579, 580, 582, 588, 589, 590, 592, 593, 595: -//line plugins/parsers/influx/machine.go.rl:96 - - m.handler.AddFloat(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 238, 239, 240, 241, 242, 298, 299, 300, 301, 302, 312, 313, 314, 315, 316, 398, 399, 400, 401, 402, 531, 532, 533, 534, 535, 567, 568, 569, 570, 571, 583, 584, 585, 586, 587, 596, 597, 598, 599, 600: -//line plugins/parsers/influx/machine.go.rl:100 - - m.handler.AddBool(key, m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 209, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 245, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 267, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 321, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 345, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 367, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 406, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 428, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 454, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 477, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 499, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 539, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558: -//line plugins/parsers/influx/machine.go.rl:108 - - m.handler.SetTimestamp(m.text()) - -//line plugins/parsers/influx/machine.go.rl:22 - - yield = true - m.cs = 196; - {( m.p)++; m.cs = 0; goto _out } - - case 43, 45, 83, 130, 131, 138: -//line plugins/parsers/influx/machine.go.rl:35 - - m.err = ErrFieldParse + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:49 +//line plugins/parsers/influx/machine.go.rl:39 - m.err = ErrTimestampParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 +//line plugins/parsers/influx/machine.go.rl:53 - m.err = ErrParse + err = ErrTimestampParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } - case 31, 32, 33, 34, 35, 85, 86, 87, 88, 89, 95, 96, 97, 99, 100, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 134, 135, 137, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169: -//line plugins/parsers/influx/machine.go.rl:42 + case 10, 11, 25, 26, 28, 29, 40, 41, 53, 54, 55, 56, 71, 90, 91, 93, 95, 138, 139, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 153, 154, 156, 157, 158, 159, 160, 161, 162, 163, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240: +//line plugins/parsers/influx/machine.go.rl:86 - m.err = ErrTagParse + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:35 +//line plugins/parsers/influx/machine.go.rl:39 - m.err = ErrFieldParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 + case 534, 588, 696: +//line plugins/parsers/influx/machine.go.rl:86 - m.err = ErrParse + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 537, 591, 699: +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 406, 530, 531, 532, 533, 535, 536, 538, 562, 585, 586, 587, 589, 590, 592, 693, 694, 695, 697, 698, 700: +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 539, 540, 541, 542, 543, 593, 594, 595, 596, 597, 701, 702, 703, 704, 705: +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 303, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 400, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 565, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584: +//line plugins/parsers/influx/machine.go.rl:86 + + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 16, 17, 18, 20, 46, 47, 63, 64, 65, 66, 68, 79, 80, 81, 82, 84, 86, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 123, 124, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204: +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } - case 101: -//line plugins/parsers/influx/machine.go.rl:42 +//line plugins/parsers/influx/machine.go.rl:39 - m.err = ErrTagParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:49 + case 483, 519, 641: +//line plugins/parsers/influx/machine.go.rl:99 - m.err = ErrTimestampParse + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:112 + + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 486, 522, 644: +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:121 + + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 477, 479, 480, 481, 482, 484, 485, 487, 493, 516, 517, 518, 520, 521, 523, 638, 639, 640, 642, 643, 645: +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:130 + + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 488, 489, 490, 491, 492, 524, 525, 526, 527, 528, 646, 647, 648, 649, 650: +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:139 + + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 353, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 455, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 496, 498, 499, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515: +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:157 + + err = m.handler.SetTimestamp(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:178 + + m.finishMetric = true + + case 38, 165, 167, 168, 205, 206, 241, 242: +//line plugins/parsers/influx/machine.go.rl:32 + + err = ErrNameParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 +//line plugins/parsers/influx/machine.go.rl:86 - m.err = ErrParse + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } - case 98, 136, 139, 158: -//line plugins/parsers/influx/machine.go.rl:42 +//line plugins/parsers/influx/machine.go.rl:39 - m.err = ErrTagParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:35 + case 42, 89, 151: +//line plugins/parsers/influx/machine.go.rl:86 - m.err = ErrFieldParse + err = m.handler.SetMeasurement(m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:49 +//line plugins/parsers/influx/machine.go.rl:39 - m.err = ErrTimestampParse + err = ErrFieldParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go.rl:56 +//line plugins/parsers/influx/machine.go.rl:53 - m.err = ErrParse + err = ErrTimestampParse ( m.p)-- - m.cs = 195; - {( m.p)++; m.cs = 0; goto _out } + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } -//line plugins/parsers/influx/machine.go:23507 + case 61, 105, 125: +//line plugins/parsers/influx/machine.go.rl:99 + + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + } + +//line plugins/parsers/influx/machine.go.rl:46 + + err = ErrTagParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:39 + + err = ErrFieldParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go.rl:53 + + err = ErrTimestampParse + ( m.p)-- + + ( m.cs) = 257; + {( m.p)++; ( m.cs) = 0; goto _out } + +//line plugins/parsers/influx/machine.go:31580 } } _out: {} } -//line plugins/parsers/influx/machine.go.rl:308 +//line plugins/parsers/influx/machine.go.rl:415 - // Even if there was an error, return true. On the next call to this - // function we will attempt to scan to the next line of input and recover. - if m.err != nil { - return true + if err != nil { + return err } - // Don't check the error state in the case that we just yielded, because - // the yield indicates we just completed parsing a line. - if !yield && m.cs == LineProtocol_error { - m.err = ErrParse - return true + // This would indicate an error in the machine that was reported with a + // more specific error. We return a generic error but this should + // possibly be a panic. + if m.cs == 0 { + m.cs = LineProtocol_en_discard_line + return ErrParse } - return true + // If we haven't found a metric line yet and we reached the EOF, report it + // now. This happens when the data ends with a comment or whitespace. + // + // Otherwise we have successfully parsed a metric line, so if we are at + // the EOF we will report it the next call. + if !m.beginMetric && m.p == m.pe && m.pe == m.eof { + return EOF + } + + return nil } -// Err returns the error that occurred on the last call to ParseLine. If the -// result is nil, then the line was parsed successfully. -func (m *machine) Err() error { - return m.err -} - -// Position returns the current position into the input. +// Position returns the current byte offset into the data. func (m *machine) Position() int { return m.p } +// LineOffset returns the byte offset of the current line. +func (m *machine) LineOffset() int { + return m.sol +} + +// LineNumber returns the current line number. Lines are counted based on the +// regular expression `\r?\n`. +func (m *machine) LineNumber() int { + return m.lineno +} + +// Column returns the current column. +func (m *machine) Column() int { + lineOffset := m.p - m.sol + return lineOffset + 1 +} + func (m *machine) text() []byte { return m.data[m.pb:m.p] } + +type streamMachine struct { + machine *machine + reader io.Reader +} + +func NewStreamMachine(r io.Reader, handler Handler) *streamMachine { + m := &streamMachine{ + machine: NewMachine(handler), + reader: r, + } + + m.machine.SetData(make([]byte, 1024)) + m.machine.pe = 0 + m.machine.eof = -1 + return m +} + +func (m *streamMachine) Next() error { + // Check if we are already at EOF, this should only happen if called again + // after already returning EOF. + if m.machine.p == m.machine.pe && m.machine.pe == m.machine.eof { + return EOF + } + + copy(m.machine.data, m.machine.data[m.machine.p:]) + m.machine.pe = m.machine.pe - m.machine.p + m.machine.sol = m.machine.sol - m.machine.p + m.machine.pb = 0 + m.machine.p = 0 + m.machine.eof = -1 + + m.machine.key = nil + m.machine.beginMetric = false + m.machine.finishMetric = false + + for { + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2 * len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + + err := m.machine.exec() + if err != nil { + return err + } + + // If we have successfully parsed a full metric line break out + if m.machine.finishMetric { + break + } + + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) + if n == 0 && err == io.EOF { + m.machine.eof = m.machine.pe + } else if err != nil && err != io.EOF { + // After the reader returns an error this function shouldn't be + // called again. This will cause the machine to return EOF this + // is done. + m.machine.p = m.machine.pe + m.machine.eof = m.machine.pe + return &readErr{Err: err} + } + + m.machine.pe += n + + } + + return nil +} + +// Position returns the current byte offset into the data. +func (m *streamMachine) Position() int { + return m.machine.Position() +} + +// LineOffset returns the byte offset of the current line. +func (m *streamMachine) LineOffset() int { + return m.machine.LineOffset() +} + +// LineNumber returns the current line number. Lines are counted based on the +// regular expression `\r?\n`. +func (m *streamMachine) LineNumber() int { + return m.machine.LineNumber() +} + +// Column returns the current column. +func (m *streamMachine) Column() int { + return m.machine.Column() +} + +// LineText returns the text of the current line that has been parsed so far. +func (m *streamMachine) LineText() string { + return string(m.machine.data[0:m.machine.p]) +} diff --git a/plugins/parsers/influx/machine.go.rl b/plugins/parsers/influx/machine.go.rl index c8cf0bee9..f8f40cd7c 100644 --- a/plugins/parsers/influx/machine.go.rl +++ b/plugins/parsers/influx/machine.go.rl @@ -2,14 +2,24 @@ package influx import ( "errors" + "io" ) +type readErr struct { + Err error +} + +func (e *readErr) Error() string { + return e.Err.Error() +} + var ( ErrNameParse = errors.New("expected measurement name") ErrFieldParse = errors.New("expected field") ErrTagParse = errors.New("expected tag") ErrTimestampParse = errors.New("expected timestamp") ErrParse = errors.New("parse error") + EOF = errors.New("EOF") ) %%{ @@ -19,99 +29,162 @@ action begin { m.pb = m.p } -action yield { - yield = true - fnext align; - fbreak; -} - action name_error { - m.err = ErrNameParse + err = ErrNameParse fhold; fnext discard_line; fbreak; } action field_error { - m.err = ErrFieldParse + err = ErrFieldParse fhold; fnext discard_line; fbreak; } action tagset_error { - m.err = ErrTagParse + err = ErrTagParse fhold; fnext discard_line; fbreak; } action timestamp_error { - m.err = ErrTimestampParse + err = ErrTimestampParse fhold; fnext discard_line; fbreak; } action parse_error { - m.err = ErrParse + err = ErrParse fhold; fnext discard_line; fbreak; } +action align_error { + err = ErrParse + fnext discard_line; + fbreak; +} + action hold_recover { fhold; fgoto main; } -action discard { +action goto_align { fgoto align; } +action begin_metric { + m.beginMetric = true +} + action name { - m.handler.SetMeasurement(m.text()) + err = m.handler.SetMeasurement(m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action tagkey { - key = m.text() + m.key = m.text() } action tagvalue { - m.handler.AddTag(key, m.text()) + err = m.handler.AddTag(m.key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action fieldkey { - key = m.text() + m.key = m.text() } action integer { - m.handler.AddInt(key, m.text()) + err = m.handler.AddInt(m.key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action unsigned { - m.handler.AddUint(key, m.text()) + err = m.handler.AddUint(m.key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action float { - m.handler.AddFloat(key, m.text()) + err = m.handler.AddFloat(m.key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action bool { - m.handler.AddBool(key, m.text()) + err = m.handler.AddBool(m.key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action string { - m.handler.AddString(key, m.text()) + err = m.handler.AddString(m.key, m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } } action timestamp { - m.handler.SetTimestamp(m.text()) + err = m.handler.SetTimestamp(m.text()) + if err != nil { + fhold; + fnext discard_line; + fbreak; + } +} + +action incr_newline { + m.lineno++ + m.sol = m.p + m.sol++ // next char will be the first column in the line +} + +action eol { + m.finishMetric = true + fnext align; + fbreak; +} + +action finish_metric { + m.finishMetric = true } ws = [\t\v\f ]; +newline = + '\r'? '\n' >incr_newline; + non_zero_digit = [1-9]; @@ -155,7 +228,7 @@ fieldbool = (true | false) >begin %bool; fieldstringchar = - [^\n\f\r\\"] | '\\' [\\"]; + [^\n\\"] | '\\' [\\"] | newline; fieldstring = fieldstringchar* >begin %string; @@ -172,16 +245,16 @@ fieldset = field ( ',' field )*; tagchar = - [^\t\n\f\r ,=\\] | ( '\\' [^\t\n\f\r] ); + [^\t\n\f\r ,=\\] | ( '\\' [^\t\n\f\r\\] ) | '\\\\' %to{ fhold; }; tagkey = tagchar+ >begin %tagkey; tagvalue = - tagchar+ >begin %tagvalue; + tagchar+ >begin %eof(tagvalue) %tagvalue; tagset = - (',' (tagkey '=' tagvalue) $err(tagset_error))*; + ((',' tagkey '=' tagvalue) $err(tagset_error))*; measurement_chars = [^\t\n\f\r ,\\] | ( '\\' [^\t\n\f\r] ); @@ -190,62 +263,85 @@ measurement_start = measurement_chars - '#'; measurement = - (measurement_start measurement_chars*) >begin %name; + (measurement_start measurement_chars*) >begin %eof(name) %name; -newline = - [\r\n]; +eol_break = + newline %to(eol) + ; -comment = - '#' (any -- newline)* newline; - -eol = - ws* newline? >yield %eof(yield); - -line = - measurement +metric = + measurement >err(name_error) tagset - (ws+ fieldset) $err(field_error) + ws+ fieldset $err(field_error) (ws+ timestamp)? $err(timestamp_error) - eol; + ; -# The main machine parses a single line of line protocol. -main := line $err(parse_error); +line_with_term = + ws* metric ws* eol_break + ; + +line_without_term = + ws* metric ws* + ; + +main := + (line_with_term* + (line_with_term | line_without_term?) + ) >begin_metric %eof(finish_metric) + ; # The discard_line machine discards the current line. Useful for recovering # on the next line when an error occurs. discard_line := - (any - newline)* newline @discard; + (any -- newline)* newline @goto_align; + +commentline = + ws* '#' (any -- newline)* newline; + +emptyline = + ws* newline; # The align machine scans forward to the start of the next line. This machine # is used to skip over whitespace and comments, keeping this logic out of the # main machine. +# +# Skip valid lines that don't contain line protocol, any other data will move +# control to the main parser via the err action. align := - (space* comment)* space* measurement_start @hold_recover %eof(yield); + (emptyline | commentline | ws+)* %err(hold_recover); -series := measurement tagset $err(parse_error) eol; +# Series is a machine for matching measurement+tagset +series := + (measurement >err(name_error) tagset eol_break?) + >begin_metric + ; }%% %% write data; type Handler interface { - SetMeasurement(name []byte) - AddTag(key []byte, value []byte) - AddInt(key []byte, value []byte) - AddUint(key []byte, value []byte) - AddFloat(key []byte, value []byte) - AddString(key []byte, value []byte) - AddBool(key []byte, value []byte) - SetTimestamp(tm []byte) + SetMeasurement(name []byte) error + AddTag(key []byte, value []byte) error + AddInt(key []byte, value []byte) error + AddUint(key []byte, value []byte) error + AddFloat(key []byte, value []byte) error + AddString(key []byte, value []byte) error + AddBool(key []byte, value []byte) error + SetTimestamp(tm []byte) error } type machine struct { - data []byte - cs int - p, pe, eof int - pb int - handler Handler - initState int - err error + data []byte + cs int + p, pe, eof int + pb int + lineno int + sol int + handler Handler + initState int + key []byte + beginMetric bool + finishMetric bool } func NewMachine(handler Handler) *machine { @@ -256,6 +352,7 @@ func NewMachine(handler Handler) *machine { %% access m.; %% variable p m.p; + %% variable cs m.cs; %% variable pe m.pe; %% variable eof m.eof; %% variable data m.data; @@ -284,55 +381,182 @@ func (m *machine) SetData(data []byte) { m.data = data m.p = 0 m.pb = 0 + m.lineno = 1 + m.sol = 0 m.pe = len(data) m.eof = len(data) - m.err = nil + m.key = nil + m.beginMetric = false + m.finishMetric = false %% write init; m.cs = m.initState } -// ParseLine parses a line of input and returns true if more data can be -// parsed. -func (m *machine) ParseLine() bool { - if m.data == nil || m.p >= m.pe { - m.err = nil - return false +// Next parses the next metric line and returns nil if it was successfully +// processed. If the line contains a syntax error an error is returned, +// otherwise if the end of file is reached before finding a metric line then +// EOF is returned. +func (m *machine) Next() error { + if m.p == m.pe && m.pe == m.eof { + return EOF } - m.err = nil - var key []byte - var yield bool + m.key = nil + m.beginMetric = false + m.finishMetric = false + return m.exec() +} + +func (m *machine) exec() error { + var err error %% write exec; - // Even if there was an error, return true. On the next call to this - // function we will attempt to scan to the next line of input and recover. - if m.err != nil { - return true + if err != nil { + return err } - // Don't check the error state in the case that we just yielded, because - // the yield indicates we just completed parsing a line. - if !yield && m.cs == LineProtocol_error { - m.err = ErrParse - return true + // This would indicate an error in the machine that was reported with a + // more specific error. We return a generic error but this should + // possibly be a panic. + if m.cs == %%{ write error; }%% { + m.cs = LineProtocol_en_discard_line + return ErrParse } - return true + // If we haven't found a metric line yet and we reached the EOF, report it + // now. This happens when the data ends with a comment or whitespace. + // + // Otherwise we have successfully parsed a metric line, so if we are at + // the EOF we will report it the next call. + if !m.beginMetric && m.p == m.pe && m.pe == m.eof { + return EOF + } + + return nil } -// Err returns the error that occurred on the last call to ParseLine. If the -// result is nil, then the line was parsed successfully. -func (m *machine) Err() error { - return m.err -} - -// Position returns the current position into the input. +// Position returns the current byte offset into the data. func (m *machine) Position() int { return m.p } +// LineOffset returns the byte offset of the current line. +func (m *machine) LineOffset() int { + return m.sol +} + +// LineNumber returns the current line number. Lines are counted based on the +// regular expression `\r?\n`. +func (m *machine) LineNumber() int { + return m.lineno +} + +// Column returns the current column. +func (m *machine) Column() int { + lineOffset := m.p - m.sol + return lineOffset + 1 +} + func (m *machine) text() []byte { return m.data[m.pb:m.p] } + +type streamMachine struct { + machine *machine + reader io.Reader +} + +func NewStreamMachine(r io.Reader, handler Handler) *streamMachine { + m := &streamMachine{ + machine: NewMachine(handler), + reader: r, + } + + m.machine.SetData(make([]byte, 1024)) + m.machine.pe = 0 + m.machine.eof = -1 + return m +} + +func (m *streamMachine) Next() error { + // Check if we are already at EOF, this should only happen if called again + // after already returning EOF. + if m.machine.p == m.machine.pe && m.machine.pe == m.machine.eof { + return EOF + } + + copy(m.machine.data, m.machine.data[m.machine.p:]) + m.machine.pe = m.machine.pe - m.machine.p + m.machine.sol = m.machine.sol - m.machine.p + m.machine.pb = 0 + m.machine.p = 0 + m.machine.eof = -1 + + m.machine.key = nil + m.machine.beginMetric = false + m.machine.finishMetric = false + + for { + // Expand the buffer if it is full + if m.machine.pe == len(m.machine.data) { + expanded := make([]byte, 2 * len(m.machine.data)) + copy(expanded, m.machine.data) + m.machine.data = expanded + } + + err := m.machine.exec() + if err != nil { + return err + } + + // If we have successfully parsed a full metric line break out + if m.machine.finishMetric { + break + } + + n, err := m.reader.Read(m.machine.data[m.machine.pe:]) + if n == 0 && err == io.EOF { + m.machine.eof = m.machine.pe + } else if err != nil && err != io.EOF { + // After the reader returns an error this function shouldn't be + // called again. This will cause the machine to return EOF this + // is done. + m.machine.p = m.machine.pe + m.machine.eof = m.machine.pe + return &readErr{Err: err} + } + + m.machine.pe += n + + } + + return nil +} + +// Position returns the current byte offset into the data. +func (m *streamMachine) Position() int { + return m.machine.Position() +} + +// LineOffset returns the byte offset of the current line. +func (m *streamMachine) LineOffset() int { + return m.machine.LineOffset() +} + +// LineNumber returns the current line number. Lines are counted based on the +// regular expression `\r?\n`. +func (m *streamMachine) LineNumber() int { + return m.machine.LineNumber() +} + +// Column returns the current column. +func (m *streamMachine) Column() int { + return m.machine.Column() +} + +// LineText returns the text of the current line that has been parsed so far. +func (m *streamMachine) LineText() string { + return string(m.machine.data[0:m.machine.p]) +} diff --git a/plugins/parsers/influx/machine_test.go b/plugins/parsers/influx/machine_test.go index 1c617919b..de5353da0 100644 --- a/plugins/parsers/influx/machine_test.go +++ b/plugins/parsers/influx/machine_test.go @@ -1,9 +1,13 @@ -package influx +package influx_test import ( + "bytes" + "errors" "fmt" + "io" "testing" + "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/stretchr/testify/require" ) @@ -11,39 +15,60 @@ type TestingHandler struct { results []Result } -func (h *TestingHandler) SetMeasurement(name []byte) { +func (h *TestingHandler) SetMeasurement(name []byte) error { + n := make([]byte, len(name)) + copy(n, name) + mname := Result{ Name: Measurement, - Value: name, + Value: n, } h.results = append(h.results, mname) + return nil } -func (h *TestingHandler) AddTag(key []byte, value []byte) { +func (h *TestingHandler) AddTag(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + tagkey := Result{ Name: TagKey, - Value: key, + Value: k, } tagvalue := Result{ Name: TagValue, - Value: value, + Value: v, } h.results = append(h.results, tagkey, tagvalue) + return nil } -func (h *TestingHandler) AddInt(key []byte, value []byte) { +func (h *TestingHandler) AddInt(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + fieldkey := Result{ Name: FieldKey, - Value: key, + Value: k, } fieldvalue := Result{ Name: FieldInt, - Value: value, + Value: v, } h.results = append(h.results, fieldkey, fieldvalue) + return nil } -func (h *TestingHandler) AddUint(key []byte, value []byte) { +func (h *TestingHandler) AddUint(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + fieldkey := Result{ Name: FieldKey, Value: key, @@ -53,94 +78,127 @@ func (h *TestingHandler) AddUint(key []byte, value []byte) { Value: value, } h.results = append(h.results, fieldkey, fieldvalue) + return nil } -func (h *TestingHandler) AddFloat(key []byte, value []byte) { +func (h *TestingHandler) AddFloat(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + fieldkey := Result{ Name: FieldKey, - Value: key, + Value: k, } fieldvalue := Result{ Name: FieldFloat, - Value: value, + Value: v, } h.results = append(h.results, fieldkey, fieldvalue) + return nil } -func (h *TestingHandler) AddString(key []byte, value []byte) { +func (h *TestingHandler) AddString(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + fieldkey := Result{ Name: FieldKey, - Value: key, + Value: k, } fieldvalue := Result{ Name: FieldString, - Value: value, + Value: v, } h.results = append(h.results, fieldkey, fieldvalue) + return nil } -func (h *TestingHandler) AddBool(key []byte, value []byte) { +func (h *TestingHandler) AddBool(key []byte, value []byte) error { + k := make([]byte, len(key)) + copy(k, key) + v := make([]byte, len(value)) + copy(v, value) + fieldkey := Result{ Name: FieldKey, - Value: key, + Value: k, } fieldvalue := Result{ Name: FieldBool, - Value: value, + Value: v, } h.results = append(h.results, fieldkey, fieldvalue) + return nil } -func (h *TestingHandler) SetTimestamp(tm []byte) { +func (h *TestingHandler) SetTimestamp(tm []byte) error { + t := make([]byte, len(tm)) + copy(t, tm) + timestamp := Result{ Name: Timestamp, - Value: tm, + Value: t, } h.results = append(h.results, timestamp) + return nil } -func (h *TestingHandler) Reset() { +func (h *TestingHandler) Result(err error) { + var res Result + if err == nil { + res = Result{ + Name: Success, + } + } else { + res = Result{ + Name: Error, + err: err, + } + } + h.results = append(h.results, res) } func (h *TestingHandler) Results() []Result { return h.results } -func (h *TestingHandler) AddError(err error) { - e := Result{ - err: err, - } - h.results = append(h.results, e) -} - type BenchmarkingHandler struct { } -func (h *BenchmarkingHandler) SetMeasurement(name []byte) { +func (h *BenchmarkingHandler) SetMeasurement(name []byte) error { + return nil } -func (h *BenchmarkingHandler) AddTag(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddTag(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) AddInt(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddInt(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) AddUint(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddUint(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) AddFloat(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddFloat(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) AddString(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddString(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) AddBool(key []byte, value []byte) { +func (h *BenchmarkingHandler) AddBool(key []byte, value []byte) error { + return nil } -func (h *BenchmarkingHandler) SetTimestamp(tm []byte) { -} - -func (h *BenchmarkingHandler) Reset() { +func (h *BenchmarkingHandler) SetTimestamp(tm []byte) error { + return nil } type TokenType int @@ -161,6 +219,8 @@ const ( EOF Punc WhiteSpace + Success + Error ) func (t TokenType) String() string { @@ -195,6 +255,10 @@ func (t TokenType) String() string { return "Punc" case WhiteSpace: return "WhiteSpace" + case Success: + return "Success" + case Error: + return "Error" default: panic("Unknown TokenType") } @@ -234,232 +298,269 @@ var tests = []struct { name: "minimal", input: []byte("cpu value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "newline", input: []byte("cpu value=42\n"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "minimal with timestamp", input: []byte("cpu value=42 1516241192000000000"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: Timestamp, Value: []byte("1516241192000000000"), }, + { + Name: Success, + }, }, }, { name: "measurement escape non-special", input: []byte(`c\pu value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte(`c\pu`), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "measurement escaped trailing backslash", input: []byte(`cpu\\ value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte(`cpu\\`), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "single char measurement", input: []byte("c value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("c"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "escape backslash in measurement", input: []byte(`cp\\u value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte(`cp\\u`), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "measurement escape space", input: []byte(`cpu\ abc value=42`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte(`cpu\ abc`), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "scientific float", input: []byte("cpu value=42e0"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42e0"), }, + { + Name: Success, + }, }, }, { name: "scientific float negative mantissa", input: []byte("cpu value=-42e0"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("-42e0"), }, + { + Name: Success, + }, }, }, { name: "scientific float negative exponent", input: []byte("cpu value=42e-1"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42e-1"), }, + { + Name: Success, + }, }, }, { name: "scientific float big e", input: []byte("cpu value=42E0"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42E0"), }, + { + Name: Success, + }, }, }, { name: "scientific float missing exponent", input: []byte("cpu value=42E"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrFieldParse, }, }, }, @@ -467,102 +568,118 @@ var tests = []struct { name: "float with decimal", input: []byte("cpu value=42.2"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42.2"), }, + { + Name: Success, + }, }, }, { name: "negative float", input: []byte("cpu value=-42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("-42"), }, + { + Name: Success, + }, }, }, { name: "float without integer digits", input: []byte("cpu value=.42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte(".42"), }, + { + Name: Success, + }, }, }, { name: "float without integer digits negative", input: []byte("cpu value=-.42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("-.42"), }, + { + Name: Success, + }, }, }, { name: "float with multiple leading 0", input: []byte("cpu value=00.42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("00.42"), }, + { + Name: Success, + }, }, }, { name: "invalid float with only dot", input: []byte("cpu value=."), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrFieldParse, }, }, }, @@ -570,297 +687,485 @@ var tests = []struct { name: "multiple fields", input: []byte("cpu x=42,y=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("x"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: FieldKey, Value: []byte("y"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "integer field", input: []byte("cpu value=42i"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldInt, Value: []byte("42i"), }, + { + Name: Success, + }, }, }, { name: "negative integer field", input: []byte("cpu value=-42i"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldInt, Value: []byte("-42i"), }, + { + Name: Success, + }, }, }, { name: "zero integer field", input: []byte("cpu value=0i"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldInt, Value: []byte("0i"), }, + { + Name: Success, + }, }, }, { name: "negative zero integer field", input: []byte("cpu value=-0i"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldInt, Value: []byte("-0i"), }, + { + Name: Success, + }, + }, + }, + { + name: "integer field overflow okay", + input: []byte("cpu value=9223372036854775808i"), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldInt, + Value: []byte("9223372036854775808i"), + }, + { + Name: Success, + }, }, }, { name: "invalid field", input: []byte("cpu value=howdy"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrFieldParse, }, }, }, { name: "string field", - input: []byte(`cpu value="42"`), + input: []byte("cpu value=\"42\""), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldString, Value: []byte("42"), }, + { + Name: Success, + }, + }, + }, + { + name: "newline in string field", + input: []byte("cpu value=\"4\n2\""), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldString, + Value: []byte("4\n2"), + }, + { + Name: Success, + }, + }, + }, + { + name: "cr in string field", + input: []byte("cpu value=\"4\r2\""), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldString, + Value: []byte("4\r2"), + }, + { + Name: Success, + }, }, }, { name: "bool field", - input: []byte(`cpu value=true`), + input: []byte("cpu value=true"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldBool, Value: []byte("true"), }, + { + Name: Success, + }, }, }, { name: "tag", - input: []byte(`cpu,host=localhost value=42`), + input: []byte("cpu,host=localhost value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte("host"), }, - Result{ + { Name: TagValue, Value: []byte("localhost"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "tag key escape space", - input: []byte(`cpu,h\ ost=localhost value=42`), + input: []byte("cpu,h\\ ost=localhost value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte(`h\ ost`), }, - Result{ + { Name: TagValue, Value: []byte("localhost"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "tag key escape comma", - input: []byte(`cpu,h\,ost=localhost value=42`), + input: []byte("cpu,h\\,ost=localhost value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte(`h\,ost`), }, - Result{ + { Name: TagValue, Value: []byte("localhost"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "tag key escape equal", - input: []byte(`cpu,h\=ost=localhost value=42`), + input: []byte("cpu,h\\=ost=localhost value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte(`h\=ost`), }, - Result{ + { Name: TagValue, Value: []byte("localhost"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "multiple tags", - input: []byte(`cpu,host=localhost,cpu=cpu0 value=42`), + input: []byte("cpu,host=localhost,cpu=cpu0 value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte("host"), }, - Result{ + { Name: TagValue, Value: []byte("localhost"), }, - Result{ + { Name: TagKey, Value: []byte("cpu"), }, - Result{ + { Name: TagValue, Value: []byte("cpu0"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, + }, + }, + { + name: "tag value escape space", + input: []byte(`cpu,host=two\ words value=42`), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: TagKey, + Value: []byte("host"), + }, + { + Name: TagValue, + Value: []byte(`two\ words`), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldFloat, + Value: []byte("42"), + }, + { + Name: Success, + }, + }, + }, + { + name: "tag value double escape space", + input: []byte(`cpu,host=two\\ words value=42`), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: TagKey, + Value: []byte("host"), + }, + { + Name: TagValue, + Value: []byte(`two\\ words`), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldFloat, + Value: []byte("42"), + }, + { + Name: Success, + }, + }, + }, + { + name: "tag value triple escape space", + input: []byte(`cpu,host=two\\\ words value=42`), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: TagKey, + Value: []byte("host"), + }, + { + Name: TagValue, + Value: []byte(`two\\\ words`), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldFloat, + Value: []byte("42"), + }, + { + Name: Success, + }, }, }, { name: "tag invalid missing separator", input: []byte("cpu,xyzzy value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrTagParse, + { + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -868,12 +1173,13 @@ var tests = []struct { name: "tag invalid missing value", input: []byte("cpu,xyzzy= value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrTagParse, + { + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -881,12 +1187,13 @@ var tests = []struct { name: "tag invalid unescaped space", input: []byte("cpu,h ost=localhost value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrTagParse, + { + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -894,12 +1201,13 @@ var tests = []struct { name: "tag invalid unescaped comma", input: []byte("cpu,h,ost=localhost value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrTagParse, + { + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -907,12 +1215,13 @@ var tests = []struct { name: "tag invalid unescaped equals", input: []byte("cpu,h=ost=localhost value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrTagParse, + { + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -920,166 +1229,193 @@ var tests = []struct { name: "timestamp negative", input: []byte("cpu value=42 -1"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: Timestamp, Value: []byte("-1"), }, + { + Name: Success, + }, }, }, { name: "timestamp zero", input: []byte("cpu value=42 0"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: Timestamp, Value: []byte("0"), }, + { + Name: Success, + }, }, }, { name: "multiline", - input: []byte("cpu value=42\n\n\ncpu value=43\n"), + input: []byte("cpu value=42\n\n\n\ncpu value=43"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { + Name: Success, + }, + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("43"), }, + { + Name: Success, + }, }, }, { name: "error recovery", - input: []byte("cpu value=howdy\ncpu\ncpu value=42\n"), + input: []byte("cpu value=howdy,value2=42\ncpu\ncpu value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrFieldParse, }, - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrTagParse, }, - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "line whitespace", input: []byte(" cpu value=42 1516241192000000000 \n\n cpu value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ + { Name: Timestamp, Value: []byte("1516241192000000000"), }, - Result{ + { + Name: Success, + }, + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "leading newline", input: []byte("\ncpu value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "invalid missing field value", input: []byte("cpu value="), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrFieldParse, }, }, }, @@ -1087,12 +1423,13 @@ var tests = []struct { name: "invalid eof field key", input: []byte("cpu value"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrFieldParse, }, }, }, @@ -1100,17 +1437,23 @@ var tests = []struct { name: "invalid measurement only", input: []byte("cpu"), results: []Result{ - Result{ - err: ErrFieldParse, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: influx.ErrTagParse, }, }, }, { - name: "invalid measurement only eol", - input: []byte("cpu\n"), + name: "invalid measurement char", + input: []byte(","), results: []Result{ - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrNameParse, }, }, }, @@ -1118,12 +1461,13 @@ var tests = []struct { name: "invalid missing tag", input: []byte("cpu, value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrTagParse, + { + Name: Error, + err: influx.ErrTagParse, }, }, }, @@ -1131,20 +1475,21 @@ var tests = []struct { name: "invalid missing field", input: []byte("cpu,x=y "), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte("x"), }, - Result{ + { Name: TagValue, Value: []byte("y"), }, - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrFieldParse, }, }, }, @@ -1152,20 +1497,21 @@ var tests = []struct { name: "invalid too many fields", input: []byte("cpu value=42 value=43"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ - err: ErrTimestampParse, + { + Name: Error, + err: influx.ErrTimestampParse, }, }, }, @@ -1173,20 +1519,21 @@ var tests = []struct { name: "invalid timestamp too long", input: []byte("cpu value=42 12345678901234567890"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ - err: ErrTimestampParse, + { + Name: Error, + err: influx.ErrTimestampParse, }, }, }, @@ -1194,135 +1541,175 @@ var tests = []struct { name: "invalid open string field", input: []byte(`cpu value="42 12345678901234567890`), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrFieldParse, - }, - }, - }, - { - name: "invalid newline in string field", - input: []byte("cpu value=\"4\n2\""), - results: []Result{ - Result{ - Name: Measurement, - Value: []byte("cpu"), - }, - Result{ - err: ErrFieldParse, - }, - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrFieldParse, }, }, }, { name: "invalid field value", - input: []byte(`cpu value=howdy`), + input: []byte("cpu value=howdy"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ - err: ErrFieldParse, + { + Name: Error, + err: influx.ErrFieldParse, }, }, }, { name: "invalid quoted timestamp", - input: []byte(`cpu value=42 "12345678901234567890"`), + input: []byte("cpu value=42 \"12345678901234567890\""), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, - Result{ - err: ErrTimestampParse, + { + Name: Error, + err: influx.ErrTimestampParse, }, }, }, + { + name: "comment only", + input: []byte("# blah blah"), + results: []Result(nil), + }, { name: "commented line", input: []byte("# blah blah\ncpu value=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, + }, + }, + { + name: "middle comment", + input: []byte("cpu value=42\n# blah blah\ncpu value=42"), + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldFloat, + Value: []byte("42"), + }, + { + Name: Success, + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldFloat, + Value: []byte("42"), + }, + { + Name: Success, + }, }, }, { name: "end with comment", input: []byte("cpu value=42\n# blah blah"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "end with comment and whitespace", input: []byte("cpu value=42\n# blah blah\n\n "), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("value"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, { name: "unicode", input: []byte("cpu ☺=42"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: FieldKey, Value: []byte("☺"), }, - Result{ + { Name: FieldFloat, Value: []byte("42"), }, + { + Name: Success, + }, }, }, } @@ -1331,22 +1718,15 @@ func TestMachine(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { handler := &TestingHandler{} - fsm := NewMachine(handler) + fsm := influx.NewMachine(handler) fsm.SetData(tt.input) - count := 0 - for fsm.ParseLine() { - if fsm.Err() != nil { - handler.AddError(fsm.Err()) - } - count++ - if count > 20 { + for i := 0; i < 20; i++ { + err := fsm.Next() + if err != nil && err == influx.EOF { break } - } - - if fsm.Err() != nil { - handler.AddError(fsm.Err()) + handler.Result(err) } results := handler.Results() @@ -1355,16 +1735,97 @@ func TestMachine(t *testing.T) { } } +var positionTests = []struct { + name string + input []byte + lineno int + column int +}{ + { + name: "empty string", + input: []byte(""), + lineno: 1, + column: 1, + }, + { + name: "minimal", + input: []byte("cpu value=42"), + lineno: 1, + column: 13, + }, + { + name: "one newline", + input: []byte("cpu value=42\ncpu value=42"), + lineno: 2, + column: 13, + }, + { + name: "several newlines", + input: []byte("cpu value=42\n\n\n"), + lineno: 4, + column: 1, + }, + { + name: "error on second line", + input: []byte("cpu value=42\ncpu value=invalid"), + lineno: 2, + column: 11, + }, + { + name: "error after comment line", + input: []byte("cpu value=42\n# comment\ncpu value=invalid"), + lineno: 3, + column: 11, + }, + { + name: "dos line endings", + input: []byte("cpu value=42\r\ncpu value=invalid"), + lineno: 2, + column: 11, + }, + { + name: "mac line endings not supported", + input: []byte("cpu value=42\rcpu value=invalid"), + lineno: 1, + column: 14, + }, +} + +func TestMachinePosition(t *testing.T) { + for _, tt := range positionTests { + t.Run(tt.name, func(t *testing.T) { + handler := &TestingHandler{} + fsm := influx.NewMachine(handler) + fsm.SetData(tt.input) + + // Parse until an error or eof + for i := 0; i < 20; i++ { + err := fsm.Next() + if err != nil { + break + } + } + + require.Equal(t, tt.lineno, fsm.LineNumber(), "lineno") + require.Equal(t, tt.column, fsm.Column(), "column") + }) + } +} + func BenchmarkMachine(b *testing.B) { for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { handler := &BenchmarkingHandler{} - fsm := NewMachine(handler) + fsm := influx.NewMachine(handler) for n := 0; n < b.N; n++ { fsm.SetData(tt.input) - for fsm.ParseLine() { + for { + err := fsm.Next() + if err != nil { + break + } } } }) @@ -1372,21 +1833,29 @@ func BenchmarkMachine(b *testing.B) { } func TestMachineProcstat(t *testing.T) { - input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") + input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") handler := &TestingHandler{} - fsm := NewMachine(handler) + fsm := influx.NewMachine(handler) fsm.SetData(input) - for fsm.ParseLine() { + for { + err := fsm.Next() + if err != nil { + break + } } } func BenchmarkMachineProcstat(b *testing.B) { - input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") + input := []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000") handler := &BenchmarkingHandler{} - fsm := NewMachine(handler) + fsm := influx.NewMachine(handler) for n := 0; n < b.N; n++ { fsm.SetData(input) - for fsm.ParseLine() { + for { + err := fsm.Next() + if err != nil { + break + } } } } @@ -1407,36 +1876,42 @@ func TestSeriesMachine(t *testing.T) { name: "no tags", input: []byte("cpu"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, + { + Name: Success, + }, }, }, { name: "tags", input: []byte("cpu,a=x,b=y"), results: []Result{ - Result{ + { Name: Measurement, Value: []byte("cpu"), }, - Result{ + { Name: TagKey, Value: []byte("a"), }, - Result{ + { Name: TagValue, Value: []byte("x"), }, - Result{ + { Name: TagKey, Value: []byte("b"), }, - Result{ + { Name: TagValue, Value: []byte("y"), }, + { + Name: Success, + }, }, }, } @@ -1444,22 +1919,15 @@ func TestSeriesMachine(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { handler := &TestingHandler{} - fsm := NewSeriesMachine(handler) + fsm := influx.NewSeriesMachine(handler) fsm.SetData(tt.input) - count := 0 - for fsm.ParseLine() { - if fsm.Err() != nil { - handler.AddError(fsm.Err()) - } - count++ - if count > 20 { + for { + err := fsm.Next() + if err != nil { break } - } - - if fsm.Err() != nil { - handler.AddError(fsm.Err()) + handler.Result(err) } results := handler.Results() @@ -1467,3 +1935,283 @@ func TestSeriesMachine(t *testing.T) { }) } } + +type MockHandler struct { + SetMeasurementF func(name []byte) error + AddTagF func(key []byte, value []byte) error + AddIntF func(key []byte, value []byte) error + AddUintF func(key []byte, value []byte) error + AddFloatF func(key []byte, value []byte) error + AddStringF func(key []byte, value []byte) error + AddBoolF func(key []byte, value []byte) error + SetTimestampF func(tm []byte) error + + TestingHandler +} + +func (h *MockHandler) SetMeasurement(name []byte) error { + h.TestingHandler.SetMeasurement(name) + return h.SetMeasurementF(name) +} + +func (h *MockHandler) AddTag(name, value []byte) error { + return h.AddTagF(name, value) +} + +func (h *MockHandler) AddInt(name, value []byte) error { + err := h.AddIntF(name, value) + if err != nil { + return err + } + h.TestingHandler.AddInt(name, value) + return nil +} + +func (h *MockHandler) AddUint(name, value []byte) error { + err := h.AddUintF(name, value) + if err != nil { + return err + } + h.TestingHandler.AddUint(name, value) + return nil +} + +func (h *MockHandler) AddFloat(name, value []byte) error { + return h.AddFloatF(name, value) +} + +func (h *MockHandler) AddString(name, value []byte) error { + return h.AddStringF(name, value) +} + +func (h *MockHandler) AddBool(name, value []byte) error { + return h.AddBoolF(name, value) +} + +func (h *MockHandler) SetTimestamp(tm []byte) error { + return h.SetTimestampF(tm) +} + +var errorRecoveryTests = []struct { + name string + input []byte + handler *MockHandler + results []Result +}{ + { + name: "integer", + input: []byte("cpu value=43i\ncpu value=42i"), + handler: &MockHandler{ + SetMeasurementF: func(name []byte) error { + return nil + }, + AddIntF: func(name, value []byte) error { + if string(value) != "42i" { + return errors.New("handler error") + } + return nil + }, + }, + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: errors.New("handler error"), + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldInt, + Value: []byte("42i"), + }, + { + Name: Success, + }, + }, + }, + { + name: "integer with timestamp", + input: []byte("cpu value=43i 1516241192000000000\ncpu value=42i"), + handler: &MockHandler{ + SetMeasurementF: func(name []byte) error { + return nil + }, + AddIntF: func(name, value []byte) error { + if string(value) != "42i" { + return errors.New("handler error") + } + return nil + }, + }, + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: errors.New("handler error"), + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldInt, + Value: []byte("42i"), + }, + { + Name: Success, + }, + }, + }, + { + name: "unsigned", + input: []byte("cpu value=43u\ncpu value=42u"), + handler: &MockHandler{ + SetMeasurementF: func(name []byte) error { + return nil + }, + AddUintF: func(name, value []byte) error { + if string(value) != "42u" { + return errors.New("handler error") + } + return nil + }, + }, + results: []Result{ + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: Error, + err: errors.New("handler error"), + }, + { + Name: Measurement, + Value: []byte("cpu"), + }, + { + Name: FieldKey, + Value: []byte("value"), + }, + { + Name: FieldUint, + Value: []byte("42u"), + }, + { + Name: Success, + }, + }, + }, +} + +func TestHandlerErrorRecovery(t *testing.T) { + for _, tt := range errorRecoveryTests { + t.Run(tt.name, func(t *testing.T) { + fsm := influx.NewMachine(tt.handler) + fsm.SetData(tt.input) + + for i := 0; i < 20; i++ { + err := fsm.Next() + if err != nil && err == influx.EOF { + break + } + tt.handler.Result(err) + } + + results := tt.handler.Results() + require.Equal(t, tt.results, results) + }) + } +} + +func TestStreamMachine(t *testing.T) { + type testcase struct { + name string + input io.Reader + results []Result + err error + } + + var tc []testcase + for _, tt := range tests { + tc = append(tc, testcase{ + name: tt.name, + input: bytes.NewBuffer([]byte(tt.input)), + results: tt.results, + err: tt.err, + }) + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + handler := &TestingHandler{} + fsm := influx.NewStreamMachine(tt.input, handler) + + // Parse only up to 20 metrics; to avoid any bugs where the parser + // isn't terminated. + for i := 0; i < 20; i++ { + err := fsm.Next() + if err != nil && err == influx.EOF { + break + } + handler.Result(err) + } + + results := handler.Results() + require.Equal(t, tt.results, results) + }) + } +} + +func TestStreamMachinePosition(t *testing.T) { + type testcase struct { + name string + input io.Reader + lineno int + column int + } + + var tc []testcase + for _, tt := range positionTests { + tc = append(tc, testcase{ + name: tt.name, + input: bytes.NewBuffer([]byte(tt.input)), + lineno: tt.lineno, + column: tt.column, + }) + } + + for _, tt := range tc { + t.Run(tt.name, func(t *testing.T) { + handler := &TestingHandler{} + fsm := influx.NewStreamMachine(tt.input, handler) + + // Parse until an error or eof + for i := 0; i < 20; i++ { + err := fsm.Next() + if err != nil { + break + } + } + + require.Equal(t, tt.lineno, fsm.LineNumber(), "lineno") + require.Equal(t, tt.column, fsm.Column(), "column") + }) + } +} diff --git a/plugins/parsers/influx/parser.go b/plugins/parsers/influx/parser.go index 0b16a2a39..620104ac6 100644 --- a/plugins/parsers/influx/parser.go +++ b/plugins/parsers/influx/parser.go @@ -3,7 +3,10 @@ package influx import ( "errors" "fmt" + "io" + "strings" "sync" + "time" "github.com/influxdata/telegraf" ) @@ -16,20 +19,45 @@ var ( ErrNoMetric = errors.New("no metric in line") ) +type TimeFunc func() time.Time + +// ParseError indicates a error in the parsing of the text. type ParseError struct { - Offset int - msg string - buf string + Offset int + LineOffset int + LineNumber int + Column int + msg string + buf string } func (e *ParseError) Error() string { - buffer := e.buf - if len(buffer) > maxErrorBufferSize { - buffer = buffer[:maxErrorBufferSize] + "..." + buffer := e.buf[e.LineOffset:] + eol := strings.IndexAny(buffer, "\r\n") + if eol >= 0 { + buffer = buffer[:eol] } - return fmt.Sprintf("metric parse error: %s at offset %d: %q", e.msg, e.Offset, buffer) + if len(buffer) > maxErrorBufferSize { + startEllipsis := true + offset := e.Offset - e.LineOffset + start := offset - maxErrorBufferSize + if start < 0 { + startEllipsis = false + start = 0 + } + // if we trimmed it the column won't line up. it'll always be the last character, + // because the parser doesn't continue past it, but point it out anyway so + // it's obvious where the issue is. + buffer = buffer[start:offset] + "<-- here" + if startEllipsis { + buffer = "..." + buffer + } + } + return fmt.Sprintf("metric parse error: %s at %d:%d: %q", e.msg, e.LineNumber, e.Column, buffer) } +// Parser is an InfluxDB Line Protocol parser that implements the +// parsers.Parser interface. type Parser struct { DefaultTags map[string]string @@ -54,19 +82,30 @@ func NewSeriesParser(handler *MetricHandler) *Parser { } } +func (h *Parser) SetTimeFunc(f TimeFunc) { + h.handler.SetTimeFunc(f) +} + func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { p.Lock() defer p.Unlock() metrics := make([]telegraf.Metric, 0) p.machine.SetData(input) - for p.machine.ParseLine() { - err := p.machine.Err() + for { + err := p.machine.Next() + if err == EOF { + break + } + if err != nil { return nil, &ParseError{ - Offset: p.machine.Position(), - msg: err.Error(), - buf: string(input), + Offset: p.machine.Position(), + LineOffset: p.machine.LineOffset(), + LineNumber: p.machine.LineNumber(), + Column: p.machine.Column(), + msg: err.Error(), + buf: string(input), } } @@ -74,7 +113,11 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { if err != nil { return nil, err } - p.handler.Reset() + + if metric == nil { + continue + } + metrics = append(metrics, metric) } @@ -83,7 +126,7 @@ func (p *Parser) Parse(input []byte) ([]telegraf.Metric, error) { } func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { - metrics, err := p.Parse([]byte(line + "\n")) + metrics, err := p.Parse([]byte(line)) if err != nil { return nil, err } @@ -105,10 +148,97 @@ func (p *Parser) applyDefaultTags(metrics []telegraf.Metric) { } for _, m := range metrics { - for k, v := range p.DefaultTags { - if !m.HasTag(k) { - m.AddTag(k, v) - } + p.applyDefaultTagsSingle(m) + } +} + +func (p *Parser) applyDefaultTagsSingle(metric telegraf.Metric) { + for k, v := range p.DefaultTags { + if !metric.HasTag(k) { + metric.AddTag(k, v) } } } + +// StreamParser is an InfluxDB Line Protocol parser. It is not safe for +// concurrent use in multiple goroutines. +type StreamParser struct { + machine *streamMachine + handler *MetricHandler +} + +func NewStreamParser(r io.Reader) *StreamParser { + handler := NewMetricHandler() + return &StreamParser{ + machine: NewStreamMachine(r, handler), + handler: handler, + } +} + +// SetTimeFunc changes the function used to determine the time of metrics +// without a timestamp. The default TimeFunc is time.Now. Useful mostly for +// testing, or perhaps if you want all metrics to have the same timestamp. +func (h *StreamParser) SetTimeFunc(f TimeFunc) { + h.handler.SetTimeFunc(f) +} + +func (h *StreamParser) SetTimePrecision(u time.Duration) { + h.handler.SetTimePrecision(u) +} + +// Next parses the next item from the stream. You can repeat calls to this +// function if it returns ParseError to get the next metric or error. +func (p *StreamParser) Next() (telegraf.Metric, error) { + err := p.machine.Next() + if err == EOF { + return nil, err + } + + if e, ok := err.(*readErr); ok { + return nil, e.Err + } + + if err != nil { + return nil, &ParseError{ + Offset: p.machine.Position(), + LineOffset: p.machine.LineOffset(), + LineNumber: p.machine.LineNumber(), + Column: p.machine.Column(), + msg: err.Error(), + buf: p.machine.LineText(), + } + } + + metric, err := p.handler.Metric() + if err != nil { + return nil, err + } + + return metric, nil +} + +// Position returns the current byte offset into the data. +func (p *StreamParser) Position() int { + return p.machine.Position() +} + +// LineOffset returns the byte offset of the current line. +func (p *StreamParser) LineOffset() int { + return p.machine.LineOffset() +} + +// LineNumber returns the current line number. Lines are counted based on the +// regular expression `\r?\n`. +func (p *StreamParser) LineNumber() int { + return p.machine.LineNumber() +} + +// Column returns the current column. +func (p *StreamParser) Column() int { + return p.machine.Column() +} + +// LineText returns the text of the current line that has been parsed so far. +func (p *StreamParser) LineText() string { + return p.machine.LineText() +} diff --git a/plugins/parsers/influx/parser_test.go b/plugins/parsers/influx/parser_test.go index 05a797442..569eb3a22 100644 --- a/plugins/parsers/influx/parser_test.go +++ b/plugins/parsers/influx/parser_test.go @@ -1,11 +1,17 @@ package influx import ( + "bytes" + "errors" + "io" + "strconv" + "strings" "testing" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -21,12 +27,11 @@ var DefaultTime = func() time.Time { } var ptests = []struct { - name string - input []byte - timeFunc func() time.Time - precision time.Duration - metrics []telegraf.Metric - err error + name string + input []byte + timeFunc func() time.Time + metrics []telegraf.Metric + err error }{ { name: "minimal", @@ -173,6 +178,63 @@ var ptests = []struct { }, err: nil, }, + { + name: "tag value escape space", + input: []byte(`cpu,host=two\ words value=42`), + metrics: []telegraf.Metric{ + Metric( + metric.New( + "cpu", + map[string]string{ + "host": "two words", + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), + ), + ), + }, + err: nil, + }, + { + name: "tag value double escape space", + input: []byte(`cpu,host=two\\ words value=42`), + metrics: []telegraf.Metric{ + Metric( + metric.New( + "cpu", + map[string]string{ + "host": `two\ words`, + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), + ), + ), + }, + err: nil, + }, + { + name: "tag value triple escape space", + input: []byte(`cpu,host=two\\\ words value=42`), + metrics: []telegraf.Metric{ + Metric( + metric.New( + "cpu", + map[string]string{ + "host": `two\\ words`, + }, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(42, 0), + ), + ), + }, + err: nil, + }, { name: "field key escape not escapable", input: []byte(`cpu va\lue=42`), @@ -259,19 +321,16 @@ var ptests = []struct { err: nil, }, { - name: "field int overflow dropped", - input: []byte("cpu value=9223372036854775808i"), - metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{}, - time.Unix(42, 0), - ), - ), + name: "field int overflow", + input: []byte("cpu value=9223372036854775808i"), + metrics: nil, + err: &ParseError{ + Offset: 30, + LineNumber: 1, + Column: 31, + msg: strconv.ErrRange.Error(), + buf: "cpu value=9223372036854775808i", }, - err: nil, }, { name: "field int max value", @@ -308,19 +367,16 @@ var ptests = []struct { err: nil, }, { - name: "field uint overflow dropped", - input: []byte("cpu value=18446744073709551616u"), - metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{}, - time.Unix(42, 0), - ), - ), + name: "field uint overflow", + input: []byte("cpu value=18446744073709551616u"), + metrics: nil, + err: &ParseError{ + Offset: 31, + LineNumber: 1, + Column: 32, + msg: strconv.ErrRange.Error(), + buf: "cpu value=18446744073709551616u", }, - err: nil, }, { name: "field uint max value", @@ -407,6 +463,23 @@ var ptests = []struct { }, err: nil, }, + { + name: "field string newline", + input: []byte("cpu value=\"4\n2\""), + metrics: []telegraf.Metric{ + Metric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": "4\n2", + }, + time.Unix(42, 0), + ), + ), + }, + err: nil, + }, { name: "no timestamp", input: []byte("cpu value=42"), @@ -425,7 +498,7 @@ var ptests = []struct { err: nil, }, { - name: "no timestamp full precision", + name: "no timestamp", input: []byte("cpu value=42"), timeFunc: func() time.Time { return time.Unix(42, 123456789) @@ -444,27 +517,6 @@ var ptests = []struct { }, err: nil, }, - { - name: "no timestamp partial precision", - input: []byte("cpu value=42"), - timeFunc: func() time.Time { - return time.Unix(42, 123456789) - }, - precision: 1 * time.Millisecond, - metrics: []telegraf.Metric{ - Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(42, 123000000), - ), - ), - }, - err: nil, - }, { name: "multiple lines", input: []byte("cpu value=42\ncpu value=42"), @@ -497,14 +549,16 @@ var ptests = []struct { input: []byte("cpu"), metrics: nil, err: &ParseError{ - Offset: 3, - msg: ErrFieldParse.Error(), - buf: "cpu", + Offset: 3, + LineNumber: 1, + Column: 4, + msg: ErrTagParse.Error(), + buf: "cpu", }, }, { name: "procstat", - input: []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,cpu_time_stolen=0,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000"), + input: []byte("procstat,exe=bash,process_name=bash voluntary_context_switches=42i,memory_rss=5103616i,rlimit_memory_data_hard=2147483647i,cpu_time_user=0.02,rlimit_file_locks_soft=2147483647i,pid=29417i,cpu_time_nice=0,rlimit_memory_locked_soft=65536i,read_count=259i,rlimit_memory_vms_hard=2147483647i,memory_swap=0i,rlimit_num_fds_soft=1024i,rlimit_nice_priority_hard=0i,cpu_time_soft_irq=0,cpu_time=0i,rlimit_memory_locked_hard=65536i,realtime_priority=0i,signals_pending=0i,nice_priority=20i,cpu_time_idle=0,memory_stack=139264i,memory_locked=0i,rlimit_memory_stack_soft=8388608i,cpu_time_iowait=0,cpu_time_guest=0,cpu_time_guest_nice=0,rlimit_memory_data_soft=2147483647i,read_bytes=0i,rlimit_cpu_time_soft=2147483647i,involuntary_context_switches=2i,write_bytes=106496i,cpu_time_system=0,cpu_time_irq=0,cpu_usage=0,memory_vms=21659648i,memory_data=1576960i,rlimit_memory_stack_hard=2147483647i,num_threads=1i,rlimit_memory_rss_soft=2147483647i,rlimit_realtime_priority_soft=0i,num_fds=4i,write_count=35i,rlimit_signals_pending_soft=78994i,cpu_time_steal=0,rlimit_num_fds_hard=4096i,rlimit_file_locks_hard=2147483647i,rlimit_cpu_time_hard=2147483647i,rlimit_signals_pending_hard=78994i,rlimit_nice_priority_soft=0i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_realtime_priority_hard=0i 1517620624000000000"), metrics: []telegraf.Metric{ Metric( metric.New( @@ -523,7 +577,6 @@ var ptests = []struct { "cpu_time_nice": float64(0), "cpu_time_soft_irq": float64(0), "cpu_time_steal": float64(0), - "cpu_time_stolen": float64(0), "cpu_time_system": float64(0), "cpu_time_user": float64(0.02), "cpu_usage": float64(0), @@ -580,14 +633,11 @@ func TestParser(t *testing.T) { for _, tt := range ptests { t.Run(tt.name, func(t *testing.T) { handler := NewMetricHandler() - handler.SetTimeFunc(DefaultTime) - if tt.timeFunc != nil { - handler.SetTimeFunc(tt.timeFunc) - } - if tt.precision > 0 { - handler.SetTimePrecision(tt.precision) - } parser := NewParser(handler) + parser.SetTimeFunc(DefaultTime) + if tt.timeFunc != nil { + parser.SetTimeFunc(tt.timeFunc) + } metrics, err := parser.Parse(tt.input) require.Equal(t, tt.err, err) @@ -617,14 +667,41 @@ func BenchmarkParser(b *testing.B) { } } +func TestStreamParser(t *testing.T) { + for _, tt := range ptests { + t.Run(tt.name, func(t *testing.T) { + r := bytes.NewBuffer(tt.input) + parser := NewStreamParser(r) + parser.SetTimeFunc(DefaultTime) + if tt.timeFunc != nil { + parser.SetTimeFunc(tt.timeFunc) + } + + var i int + for { + m, err := parser.Next() + if err != nil { + if err == EOF { + break + } + require.Equal(t, tt.err, err) + break + } + + testutil.RequireMetricEqual(t, tt.metrics[i], m) + i++ + } + }) + } +} + func TestSeriesParser(t *testing.T) { var tests = []struct { - name string - input []byte - timeFunc func() time.Time - precision time.Duration - metrics []telegraf.Metric - err error + name string + input []byte + timeFunc func() time.Time + metrics []telegraf.Metric + err error }{ { name: "empty", @@ -667,23 +744,21 @@ func TestSeriesParser(t *testing.T) { input: []byte("cpu,a="), metrics: []telegraf.Metric{}, err: &ParseError{ - Offset: 6, - msg: ErrTagParse.Error(), - buf: "cpu,a=", + Offset: 6, + LineNumber: 1, + Column: 7, + msg: ErrTagParse.Error(), + buf: "cpu,a=", }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { handler := NewMetricHandler() - handler.SetTimeFunc(DefaultTime) - if tt.timeFunc != nil { - handler.SetTimeFunc(tt.timeFunc) - } - if tt.precision > 0 { - handler.SetTimePrecision(tt.precision) - } parser := NewSeriesParser(handler) + if tt.timeFunc != nil { + parser.SetTimeFunc(tt.timeFunc) + } metrics, err := parser.Parse(tt.input) require.Equal(t, tt.err, err) @@ -696,3 +771,144 @@ func TestSeriesParser(t *testing.T) { }) } } + +func TestParserErrorString(t *testing.T) { + var ptests = []struct { + name string + input []byte + errString string + }{ + { + name: "multiple line error", + input: []byte("cpu value=42\ncpu value=invalid\ncpu value=42"), + errString: `metric parse error: expected field at 2:11: "cpu value=invalid"`, + }, + { + name: "handler error", + input: []byte("cpu value=9223372036854775808i\ncpu value=42"), + errString: `metric parse error: value out of range at 1:31: "cpu value=9223372036854775808i"`, + }, + { + name: "buffer too long", + input: []byte("cpu " + strings.Repeat("ab", maxErrorBufferSize) + "=invalid\ncpu value=42"), + errString: "metric parse error: expected field at 1:2054: \"...b" + strings.Repeat("ab", maxErrorBufferSize/2-1) + "=<-- here\"", + }, + { + name: "multiple line error", + input: []byte("cpu value=42\ncpu value=invalid\ncpu value=42\ncpu value=invalid"), + errString: `metric parse error: expected field at 2:11: "cpu value=invalid"`, + }, + } + + for _, tt := range ptests { + t.Run(tt.name, func(t *testing.T) { + handler := NewMetricHandler() + parser := NewParser(handler) + + _, err := parser.Parse(tt.input) + require.Equal(t, tt.errString, err.Error()) + }) + } +} + +func TestStreamParserErrorString(t *testing.T) { + var ptests = []struct { + name string + input []byte + errs []string + }{ + { + name: "multiple line error", + input: []byte("cpu value=42\ncpu value=invalid\ncpu value=42"), + errs: []string{ + `metric parse error: expected field at 2:11: "cpu value="`, + }, + }, + { + name: "handler error", + input: []byte("cpu value=9223372036854775808i\ncpu value=42"), + errs: []string{ + `metric parse error: value out of range at 1:31: "cpu value=9223372036854775808i"`, + }, + }, + { + name: "buffer too long", + input: []byte("cpu " + strings.Repeat("ab", maxErrorBufferSize) + "=invalid\ncpu value=42"), + errs: []string{ + "metric parse error: expected field at 1:2054: \"...b" + strings.Repeat("ab", maxErrorBufferSize/2-1) + "=<-- here\"", + }, + }, + { + name: "multiple errors", + input: []byte("foo value=1asdf2.0\nfoo value=2.0\nfoo value=3asdf2.0\nfoo value=4.0"), + errs: []string{ + `metric parse error: expected field at 1:12: "foo value=1"`, + `metric parse error: expected field at 3:12: "foo value=3"`, + }, + }, + } + + for _, tt := range ptests { + t.Run(tt.name, func(t *testing.T) { + parser := NewStreamParser(bytes.NewBuffer(tt.input)) + + var errs []error + for i := 0; i < 20; i++ { + _, err := parser.Next() + if err == EOF { + break + } + + if err != nil { + errs = append(errs, err) + } + } + + require.Equal(t, len(tt.errs), len(errs)) + for i, err := range errs { + require.Equal(t, tt.errs[i], err.Error()) + } + }) + } +} + +type MockReader struct { + ReadF func(p []byte) (int, error) +} + +func (r *MockReader) Read(p []byte) (int, error) { + return r.ReadF(p) +} + +// Errors from the Reader are returned from the Parser +func TestStreamParserReaderError(t *testing.T) { + readerErr := errors.New("error but not eof") + + parser := NewStreamParser(&MockReader{ + ReadF: func(p []byte) (int, error) { + return 0, readerErr + }, + }) + _, err := parser.Next() + require.Error(t, err) + require.Equal(t, err, readerErr) + + _, err = parser.Next() + require.Equal(t, err, EOF) +} + +func TestStreamParserProducesAllAvailableMetrics(t *testing.T) { + r, w := io.Pipe() + + parser := NewStreamParser(r) + parser.SetTimeFunc(DefaultTime) + + go w.Write([]byte("metric value=1\nmetric2 value=1\n")) + + _, err := parser.Next() + require.NoError(t, err) + + // should not block on second read + _, err = parser.Next() + require.NoError(t, err) +} diff --git a/plugins/parsers/json/README.md b/plugins/parsers/json/README.md new file mode 100644 index 000000000..3bfa60044 --- /dev/null +++ b/plugins/parsers/json/README.md @@ -0,0 +1,241 @@ +# JSON + +The JSON data format parses a [JSON][json] object or an array of objects into +metric fields. + +**NOTE:** All JSON numbers are converted to float fields. JSON String are +ignored unless specified in the `tag_key` or `json_string_fields` options. + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + + ## When strict is true and a JSON array is being parsed, all objects within the + ## array must be valid + json_strict = true + + ## Query is a GJSON path that specifies a specific chunk of JSON to be + ## parsed, if not specified the whole document will be parsed. + ## + ## GJSON query paths are described here: + ## https://github.com/tidwall/gjson/tree/v1.3.0#path-syntax + json_query = "" + + ## Tag keys is an array of keys that should be added as tags. Matching keys + ## are no longer saved as fields. + tag_keys = [ + "my_tag_1", + "my_tag_2" + ] + + ## Array of glob pattern strings keys that should be added as string fields. + json_string_fields = [] + + ## Name key is the key to use as the measurement name. + json_name_key = "" + + ## Time key is the key containing the time that should be used to create the + ## metric. + json_time_key = "" + + ## Time format is the time layout that should be used to interpret the json_time_key. + ## The time must be `unix`, `unix_ms`, `unix_us`, `unix_ns`, or a time in the + ## "reference time". To define a different format, arrange the values from + ## the "reference time" in the example to match the format you will be + ## using. For more information on the "reference time", visit + ## https://golang.org/pkg/time/#Time.Format + ## ex: json_time_format = "Mon Jan 2 15:04:05 -0700 MST 2006" + ## json_time_format = "2006-01-02T15:04:05Z07:00" + ## json_time_format = "01/02/2006 15:04:05" + ## json_time_format = "unix" + ## json_time_format = "unix_ms" + json_time_format = "" + + ## Timezone allows you to provide an override for timestamps that + ## don't already include an offset + ## e.g. 04/06/2016 12:41:45 + ## + ## Default: "" which renders UTC + ## Options are as follows: + ## 1. Local -- interpret based on machine localtime + ## 2. "America/New_York" -- Unix TZ values like those found in https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + ## 3. UTC -- or blank/unspecified, will return timestamp in UTC + json_timezone = "" +``` + +#### json_query + +The `json_query` is a [GJSON][gjson] path that can be used to limit the +portion of the overall JSON document that should be parsed. The result of the +query should contain a JSON object or an array of objects. + +Consult the GJSON [path syntax][gjson syntax] for details and examples. + +#### json_time_key, json_time_format, json_timezone + +By default the current time will be used for all created metrics, to set the +time using the JSON document you can use the `json_time_key` and +`json_time_format` options together to set the time to a value in the parsed +document. + +The `json_time_key` option specifies the key containing the time value and +`json_time_format` must be set to `unix`, `unix_ms`, `unix_us`, `unix_ns`, or +the Go "reference time" which is defined to be the specific time: +`Mon Jan 2 15:04:05 MST 2006`. + +Consult the Go [time][time parse] package for details and additional examples +on how to set the time format. + +When parsing times that don't include a timezone specifier, times are assumed +to be UTC. To default to another timezone, or to local time, specify the +`json_timezone` option. This option should be set to a +[Unix TZ value](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones), +such as `America/New_York`, to `Local` to utilize the system timezone, or to `UTC`. + +### Examples + +#### Basic Parsing +Config: +```toml +[[inputs.file]] + files = ["example"] + name_override = "myjsonmetric" + data_format = "json" +``` + +Input: +```json +{ + "a": 5, + "b": { + "c": 6 + }, + "ignored": "I'm a string" +} +``` + +Output: +``` +myjsonmetric a=5,b_c=6 +``` + +#### Name, Tags, and String Fields + +Config: +```toml +[[inputs.file]] + files = ["example"] + json_name_key = "name" + tag_keys = ["my_tag_1"] + json_string_fields = ["b_my_field"] + data_format = "json" +``` + +Input: +```json +{ + "a": 5, + "b": { + "c": 6, + "my_field": "description" + }, + "my_tag_1": "foo", + "name": "my_json" +} +``` + +Output: +``` +my_json,my_tag_1=foo a=5,b_c=6,b_my_field="description" +``` + +#### Arrays + +If the JSON data is an array, then each object within the array is parsed with +the configured settings. + +Config: +```toml +[[inputs.file]] + files = ["example"] + data_format = "json" + json_time_key = "b_time" + json_time_format = "02 Jan 06 15:04 MST" +``` + +Input: +```json +[ + { + "a": 5, + "b": { + "c": 6, + "time":"04 Jan 06 15:04 MST" + } + }, + { + "a": 7, + "b": { + "c": 8, + "time":"11 Jan 07 15:04 MST" + } + } +] +``` + +Output: +``` +file a=5,b_c=6 1136387040000000000 +file a=7,b_c=8 1168527840000000000 +``` + +#### Query + +The `json_query` option can be used to parse a subset of the document. + +Config: +```toml +[[inputs.file]] + files = ["example"] + data_format = "json" + tag_keys = ["first"] + json_string_fields = ["last"] + json_query = "obj.friends" +``` + +Input: +```json +{ + "obj": { + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44}, + {"first": "Roger", "last": "Craig", "age": 68}, + {"first": "Jane", "last": "Murphy", "age": 47} + ] + } +} +``` + +Output: +``` +file,first=Dale last="Murphy",age=44 +file,first=Roger last="Craig",age=68 +file,first=Jane last="Murphy",age=47 +``` + +[gjson]: https://github.com/tidwall/gjson +[gjson syntax]: https://github.com/tidwall/gjson#path-syntax +[json]: https://www.json.org/ +[time parse]: https://golang.org/pkg/time/#Parse diff --git a/plugins/parsers/json/parser.go b/plugins/parsers/json/parser.go index 62d17c76a..bd9dee869 100644 --- a/plugins/parsers/json/parser.go +++ b/plugins/parsers/json/parser.go @@ -3,70 +3,154 @@ package json import ( "bytes" "encoding/json" + "errors" "fmt" "log" "strconv" - "strings" "time" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" + "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/metric" + "github.com/tidwall/gjson" ) var ( - utf8BOM = []byte("\xef\xbb\xbf") + utf8BOM = []byte("\xef\xbb\xbf") + ErrWrongType = errors.New("must be an object or an array of objects") ) -type JSONParser struct { - MetricName string - TagKeys []string - DefaultTags map[string]string +type Config struct { + MetricName string + TagKeys []string + NameKey string + StringFields []string + Query string + TimeKey string + TimeFormat string + Timezone string + DefaultTags map[string]string + Strict bool } -func (p *JSONParser) parseArray(buf []byte) ([]telegraf.Metric, error) { - metrics := make([]telegraf.Metric, 0) +type Parser struct { + metricName string + tagKeys []string + stringFields filter.Filter + nameKey string + query string + timeKey string + timeFormat string + timezone string + defaultTags map[string]string + strict bool +} - var jsonOut []map[string]interface{} - err := json.Unmarshal(buf, &jsonOut) +func New(config *Config) (*Parser, error) { + stringFilter, err := filter.Compile(config.StringFields) if err != nil { - err = fmt.Errorf("unable to parse out as JSON Array, %s", err) return nil, err } - for _, item := range jsonOut { - metrics, err = p.parseObject(metrics, item) - } - return metrics, nil + + return &Parser{ + metricName: config.MetricName, + tagKeys: config.TagKeys, + nameKey: config.NameKey, + stringFields: stringFilter, + query: config.Query, + timeKey: config.TimeKey, + timeFormat: config.TimeFormat, + timezone: config.Timezone, + defaultTags: config.DefaultTags, + strict: config.Strict, + }, nil } -func (p *JSONParser) parseObject(metrics []telegraf.Metric, jsonOut map[string]interface{}) ([]telegraf.Metric, error) { +func (p *Parser) parseArray(data []interface{}, timestamp time.Time) ([]telegraf.Metric, error) { + results := make([]telegraf.Metric, 0) + for _, item := range data { + switch v := item.(type) { + case map[string]interface{}: + metrics, err := p.parseObject(v, timestamp) + if err != nil { + if p.strict { + return nil, err + } + continue + } + results = append(results, metrics...) + default: + return nil, ErrWrongType + + } + } + + return results, nil +} + +func (p *Parser) parseObject(data map[string]interface{}, timestamp time.Time) ([]telegraf.Metric, error) { tags := make(map[string]string) - for k, v := range p.DefaultTags { + for k, v := range p.defaultTags { tags[k] = v } f := JSONFlattener{} - err := f.FullFlattenJSON("", jsonOut, true, true) + err := f.FullFlattenJSON("", data, true, true) if err != nil { return nil, err } + name := p.metricName + + //checks if json_name_key is set + if p.nameKey != "" { + switch field := f.Fields[p.nameKey].(type) { + case string: + name = field + } + } + + //if time key is specified, set timestamp to it + if p.timeKey != "" { + if p.timeFormat == "" { + err := fmt.Errorf("use of 'json_time_key' requires 'json_time_format'") + return nil, err + } + + if f.Fields[p.timeKey] == nil { + err := fmt.Errorf("JSON time key could not be found") + return nil, err + } + + timestamp, err = internal.ParseTimestamp(p.timeFormat, f.Fields[p.timeKey], p.timezone) + if err != nil { + return nil, err + } + + delete(f.Fields, p.timeKey) + + //if the year is 0, set to current year + if timestamp.Year() == 0 { + timestamp = timestamp.AddDate(time.Now().Year(), 0, 0) + } + } + tags, nFields := p.switchFieldToTag(tags, f.Fields) - - metric, err := metric.New(p.MetricName, tags, nFields, time.Now().UTC()) - + metric, err := metric.New(name, tags, nFields, timestamp) if err != nil { return nil, err } - return append(metrics, metric), nil + return []telegraf.Metric{metric}, nil } //will take in field map with strings and bools, //search for TagKeys that match fieldnames and add them to tags //will delete any strings/bools that shouldn't be fields //assumes that any non-numeric values in TagKeys should be displayed as tags -func (p *JSONParser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) { - for _, name := range p.TagKeys { +func (p *Parser) switchFieldToTag(tags map[string]string, fields map[string]interface{}) (map[string]string, map[string]interface{}) { + for _, name := range p.tagKeys { //switch any fields in tagkeys into tags if fields[name] == nil { continue @@ -87,38 +171,52 @@ func (p *JSONParser) switchFieldToTag(tags map[string]string, fields map[string] } //remove any additional string/bool values from fields - for k := range fields { - switch fields[k].(type) { - case string: - delete(fields, k) - case bool: - delete(fields, k) + for fk := range fields { + switch fields[fk].(type) { + case string, bool: + if p.stringFields != nil && p.stringFields.Match(fk) { + continue + } + delete(fields, fk) } } return tags, fields } -func (p *JSONParser) Parse(buf []byte) ([]telegraf.Metric, error) { +func (p *Parser) Parse(buf []byte) ([]telegraf.Metric, error) { + if p.query != "" { + result := gjson.GetBytes(buf, p.query) + buf = []byte(result.Raw) + if !result.IsArray() && !result.IsObject() { + err := fmt.Errorf("E! Query path must lead to a JSON object or array of objects, but lead to: %v", result.Type) + return nil, err + } + } + buf = bytes.TrimSpace(buf) buf = bytes.TrimPrefix(buf, utf8BOM) if len(buf) == 0 { return make([]telegraf.Metric, 0), nil } - if !isarray(buf) { - metrics := make([]telegraf.Metric, 0) - var jsonOut map[string]interface{} - err := json.Unmarshal(buf, &jsonOut) - if err != nil { - err = fmt.Errorf("unable to parse out as JSON, %s", err) - return nil, err - } - return p.parseObject(metrics, jsonOut) + var data interface{} + err := json.Unmarshal(buf, &data) + if err != nil { + return nil, err + } + + timestamp := time.Now().UTC() + switch v := data.(type) { + case map[string]interface{}: + return p.parseObject(v, timestamp) + case []interface{}: + return p.parseArray(v, timestamp) + default: + return nil, ErrWrongType } - return p.parseArray(buf) } -func (p *JSONParser) ParseLine(line string) (telegraf.Metric, error) { +func (p *Parser) ParseLine(line string) (telegraf.Metric, error) { metrics, err := p.Parse([]byte(line + "\n")) if err != nil { @@ -126,14 +224,14 @@ func (p *JSONParser) ParseLine(line string) (telegraf.Metric, error) { } if len(metrics) < 1 { - return nil, fmt.Errorf("Can not parse the line: %s, for data format: influx ", line) + return nil, fmt.Errorf("can not parse the line: %s, for data format: json ", line) } return metrics[0], nil } -func (p *JSONParser) SetDefaultTags(tags map[string]string) { - p.DefaultTags = tags +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.defaultTags = tags } type JSONFlattener struct { @@ -161,19 +259,27 @@ func (f *JSONFlattener) FullFlattenJSON( if f.Fields == nil { f.Fields = make(map[string]interface{}) } - fieldname = strings.Trim(fieldname, "_") + switch t := v.(type) { case map[string]interface{}: for k, v := range t { - err := f.FullFlattenJSON(fieldname+"_"+k+"_", v, convertString, convertBool) + fieldkey := k + if fieldname != "" { + fieldkey = fieldname + "_" + fieldkey + } + + err := f.FullFlattenJSON(fieldkey, v, convertString, convertBool) if err != nil { return err } } case []interface{}: for i, v := range t { - k := strconv.Itoa(i) - err := f.FullFlattenJSON(fieldname+"_"+k+"_", v, convertString, convertBool) + fieldkey := strconv.Itoa(i) + if fieldname != "" { + fieldkey = fieldname + "_" + fieldkey + } + err := f.FullFlattenJSON(fieldkey, v, convertString, convertBool) if err != nil { return nil } @@ -200,13 +306,3 @@ func (f *JSONFlattener) FullFlattenJSON( } return nil } - -func isarray(buf []byte) bool { - ia := bytes.IndexByte(buf, '[') - ib := bytes.IndexByte(buf, '{') - if ia > -1 && ia < ib { - return true - } else { - return false - } -} diff --git a/plugins/parsers/json/parser_test.go b/plugins/parsers/json/parser_test.go index c26b209a2..31c507e75 100644 --- a/plugins/parsers/json/parser_test.go +++ b/plugins/parsers/json/parser_test.go @@ -1,9 +1,12 @@ package json import ( + "fmt" "testing" + "time" - "github.com/stretchr/testify/assert" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) @@ -14,6 +17,7 @@ const ( validJSONArrayMultiple = "[{\"a\": 5, \"b\": {\"c\": 6}}, {\"a\": 7, \"b\": {\"c\": 8}}]" invalidJSON = "I don't think this is JSON" invalidJSON2 = "{\"a\": 5, \"b\": \"c\": 6}}" + mixedValidityJSON = "[{\"a\": 5, \"time\": \"2006-01-02T15:04:05\"}, {\"a\": 2}]" ) const validJSONTags = ` @@ -49,150 +53,193 @@ const validJSONArrayTags = ` ` func TestParseValidJSON(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", - } + }) + require.NoError(t, err) // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Test that newlines are fine metrics, err = parser.Parse([]byte(validJSONNewline)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "d": float64(7), "b_d": float64(8), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Test that strings without TagKeys defined are ignored metrics, err = parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Test that whitespace only will parse as an empty list of metrics metrics, err = parser.Parse([]byte("\n\t")) - assert.NoError(t, err) - assert.Len(t, metrics, 0) + require.NoError(t, err) + require.Len(t, metrics, 0) // Test that an empty string will parse as an empty list of metrics metrics, err = parser.Parse([]byte("")) - assert.NoError(t, err) - assert.Len(t, metrics, 0) + require.NoError(t, err) + require.Len(t, metrics, 0) } func TestParseLineValidJSON(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", - } + }) + require.NoError(t, err) // Most basic vanilla test metric, err := parser.ParseLine(validJSON) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) // Test that newlines are fine metric, err = parser.ParseLine(validJSONNewline) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "d": float64(7), "b_d": float64(8), }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) // Test that strings without TagKeys defined are ignored metric, err = parser.ParseLine(validJSONTags) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) } func TestParseInvalidJSON(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", - } + }) + require.NoError(t, err) - _, err := parser.Parse([]byte(invalidJSON)) - assert.Error(t, err) + _, err = parser.Parse([]byte(invalidJSON)) + require.Error(t, err) _, err = parser.Parse([]byte(invalidJSON2)) - assert.Error(t, err) + require.Error(t, err) _, err = parser.ParseLine(invalidJSON) - assert.Error(t, err) + require.Error(t, err) +} + +func TestParseJSONImplicitStrictness(t *testing.T) { + parserImplicitNoStrict, err := New(&Config{ + MetricName: "json_test", + TimeKey: "time", + }) + require.NoError(t, err) + + _, err = parserImplicitNoStrict.Parse([]byte(mixedValidityJSON)) + require.NoError(t, err) +} + +func TestParseJSONExplicitStrictnessFalse(t *testing.T) { + parserNoStrict, err := New(&Config{ + MetricName: "json_test", + TimeKey: "time", + Strict: false, + }) + require.NoError(t, err) + + _, err = parserNoStrict.Parse([]byte(mixedValidityJSON)) + require.NoError(t, err) +} + +func TestParseJSONExplicitStrictnessTrue(t *testing.T) { + parserStrict, err := New(&Config{ + MetricName: "json_test", + TimeKey: "time", + Strict: true, + }) + require.NoError(t, err) + + _, err = parserStrict.Parse([]byte(mixedValidityJSON)) + require.Error(t, err) } func TestParseWithTagKeys(t *testing.T) { // Test that strings not matching tag keys are ignored - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{"wrongtagkey"}, - } + }) + require.NoError(t, err) + metrics, err := parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Test that single tag key is found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag"}, - } + }) + require.NoError(t, err) + metrics, err = parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foobar", }, metrics[0].Tags()) // Test that both tag keys are found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag", "othertag"}, - } + }) + require.NoError(t, err) metrics, err = parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foobar", "othertag": "baz", }, metrics[0].Tags()) @@ -200,83 +247,89 @@ func TestParseWithTagKeys(t *testing.T) { func TestParseLineWithTagKeys(t *testing.T) { // Test that strings not matching tag keys are ignored - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{"wrongtagkey"}, - } + }) + require.NoError(t, err) metric, err := parser.ParseLine(validJSONTags) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metric.Fields()) - assert.Equal(t, map[string]string{}, metric.Tags()) + require.Equal(t, map[string]string{}, metric.Tags()) // Test that single tag key is found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag"}, - } + }) + require.NoError(t, err) + metric, err = parser.ParseLine(validJSONTags) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metric.Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foobar", }, metric.Tags()) // Test that both tag keys are found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag", "othertag"}, - } + }) + require.NoError(t, err) + metric, err = parser.ParseLine(validJSONTags) - assert.NoError(t, err) - assert.Equal(t, "json_test", metric.Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Equal(t, "json_test", metric.Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metric.Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foobar", "othertag": "baz", }, metric.Tags()) } func TestParseValidJSONDefaultTags(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag"}, DefaultTags: map[string]string{ "t4g": "default", }, - } + }) + require.NoError(t, err) // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"t4g": "default"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"t4g": "default"}, metrics[0].Tags()) // Test that tagkeys and default tags are applied metrics, err = parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "t4g": "default", "mytag": "foobar", }, metrics[0].Tags()) @@ -284,147 +337,155 @@ func TestParseValidJSONDefaultTags(t *testing.T) { // Test that default tags are overridden by tag keys func TestParseValidJSONDefaultTagsOverride(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", TagKeys: []string{"mytag"}, DefaultTags: map[string]string{ "mytag": "default", }, - } + }) + require.NoError(t, err) // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSON)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"mytag": "default"}, metrics[0].Tags()) + require.Equal(t, map[string]string{"mytag": "default"}, metrics[0].Tags()) // Test that tagkeys override default tags metrics, err = parser.Parse([]byte(validJSONTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foobar", }, metrics[0].Tags()) } // Test that json arrays can be parsed func TestParseValidJSONArray(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_array_test", - } + }) + require.NoError(t, err) // Most basic vanilla test metrics, err := parser.Parse([]byte(validJSONArray)) - assert.NoError(t, err) - assert.Len(t, metrics, 1) - assert.Equal(t, "json_array_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 1) + require.Equal(t, "json_array_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) // Basic multiple datapoints metrics, err = parser.Parse([]byte(validJSONArrayMultiple)) - assert.NoError(t, err) - assert.Len(t, metrics, 2) - assert.Equal(t, "json_array_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 2) + require.Equal(t, "json_array_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[1].Tags()) - assert.Equal(t, "json_array_test", metrics[1].Name()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, map[string]string{}, metrics[1].Tags()) + require.Equal(t, "json_array_test", metrics[1].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(7), "b_c": float64(8), }, metrics[1].Fields()) - assert.Equal(t, map[string]string{}, metrics[1].Tags()) + require.Equal(t, map[string]string{}, metrics[1].Tags()) } func TestParseArrayWithTagKeys(t *testing.T) { // Test that strings not matching tag keys are ignored - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_array_test", TagKeys: []string{"wrongtagkey"}, - } + }) + require.NoError(t, err) + metrics, err := parser.Parse([]byte(validJSONArrayTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 2) - assert.Equal(t, "json_array_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 2) + require.Equal(t, "json_array_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + require.Equal(t, map[string]string{}, metrics[0].Tags()) - assert.Equal(t, "json_array_test", metrics[1].Name()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, "json_array_test", metrics[1].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(7), "b_c": float64(8), }, metrics[1].Fields()) - assert.Equal(t, map[string]string{}, metrics[1].Tags()) + require.Equal(t, map[string]string{}, metrics[1].Tags()) // Test that single tag key is found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_array_test", TagKeys: []string{"mytag"}, - } + }) + require.NoError(t, err) + metrics, err = parser.Parse([]byte(validJSONArrayTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 2) - assert.Equal(t, "json_array_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 2) + require.Equal(t, "json_array_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foo", }, metrics[0].Tags()) - assert.Equal(t, "json_array_test", metrics[1].Name()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, "json_array_test", metrics[1].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(7), "b_c": float64(8), }, metrics[1].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "bar", }, metrics[1].Tags()) // Test that both tag keys are found and applied - parser = JSONParser{ + parser, err = New(&Config{ MetricName: "json_array_test", TagKeys: []string{"mytag", "othertag"}, - } + }) + require.NoError(t, err) + metrics, err = parser.Parse([]byte(validJSONArrayTags)) - assert.NoError(t, err) - assert.Len(t, metrics, 2) - assert.Equal(t, "json_array_test", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ + require.NoError(t, err) + require.Len(t, metrics, 2) + require.Equal(t, "json_array_test", metrics[0].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(5), "b_c": float64(6), }, metrics[0].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "foo", "othertag": "baz", }, metrics[0].Tags()) - assert.Equal(t, "json_array_test", metrics[1].Name()) - assert.Equal(t, map[string]interface{}{ + require.Equal(t, "json_array_test", metrics[1].Name()) + require.Equal(t, map[string]interface{}{ "a": float64(7), "b_c": float64(8), }, metrics[1].Fields()) - assert.Equal(t, map[string]string{ + require.Equal(t, map[string]string{ "mytag": "bar", "othertag": "baz", }, metrics[1].Tags()) @@ -433,13 +494,14 @@ func TestParseArrayWithTagKeys(t *testing.T) { var jsonBOM = []byte("\xef\xbb\xbf[{\"value\":17}]") func TestHttpJsonBOM(t *testing.T) { - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", - } + }) + require.NoError(t, err) // Most basic vanilla test - _, err := parser.Parse(jsonBOM) - assert.NoError(t, err) + _, err = parser.Parse(jsonBOM) + require.NoError(t, err) } //for testing issue #4260 @@ -448,22 +510,441 @@ func TestJSONParseNestedArray(t *testing.T) { "total_devices": 5, "total_threads": 10, "shares": { - "total": 5, - "accepted": 5, - "rejected": 0, - "avg_find_time": 4, - "tester": "work", - "tester2": "don't want this", - "tester3": 7.93 + "total": 5, + "accepted": 5, + "rejected": 0, + "avg_find_time": 4, + "tester": "work", + "tester2": "don't want this", + "tester3": { + "hello":"sup", + "fun":"money", + "break":9 + } } }` - parser := JSONParser{ + parser, err := New(&Config{ MetricName: "json_test", - TagKeys: []string{"total_devices", "total_threads", "shares_tester", "shares_tester3"}, - } + TagKeys: []string{"total_devices", "total_threads", "shares_tester3_fun"}, + }) + require.NoError(t, err) + + metrics, err := parser.Parse([]byte(testString)) + require.Len(t, metrics, 1) + require.NoError(t, err) + require.Equal(t, 3, len(metrics[0].Tags())) +} + +func TestJSONQueryErrorOnArray(t *testing.T) { + testString := `{ + "total_devices": 5, + "total_threads": 10, + "shares": { + "total": 5, + "accepted": 6, + "test_string": "don't want this", + "test_obj": { + "hello":"sup", + "fun":"money", + "break":9 + }, + "myArr":[4,5,6] + } + }` + + parser, err := New(&Config{ + MetricName: "json_test", + TagKeys: []string{}, + Query: "shares.myArr", + }) + require.NoError(t, err) + + _, err = parser.Parse([]byte(testString)) + require.Error(t, err) +} + +func TestArrayOfObjects(t *testing.T) { + testString := `{ + "meta": { + "info":9, + "shares": [{ + "channel": 6, + "time": 1130, + "ice":"man" + }, + { + "channel": 5, + "time": 1030, + "ice":"bucket" + }, + { + "channel": 10, + "time": 330, + "ice":"cream" + }] + }, + "more_stuff":"junk" + }` + + parser, err := New(&Config{ + MetricName: "json_test", + TagKeys: []string{"ice"}, + Query: "meta.shares", + }) + require.NoError(t, err) metrics, err := parser.Parse([]byte(testString)) require.NoError(t, err) - require.Equal(t, len(parser.TagKeys), len(metrics[0].Tags())) + require.Equal(t, 3, len(metrics)) +} + +func TestUseCaseJSONQuery(t *testing.T) { + testString := `{ + "obj": { + "name": {"first": "Tom", "last": "Anderson"}, + "age":37, + "children": ["Sara","Alex","Jack"], + "fav.movie": "Deer Hunter", + "friends": [ + {"first": "Dale", "last": "Murphy", "age": 44}, + {"first": "Roger", "last": "Craig", "age": 68}, + {"first": "Jane", "last": "Murphy", "age": 47} + ] + } + }` + + parser, err := New(&Config{ + MetricName: "json_test", + StringFields: []string{"last"}, + TagKeys: []string{"first"}, + Query: "obj.friends", + }) + require.NoError(t, err) + + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, 3, len(metrics)) + require.Equal(t, metrics[0].Fields()["last"], "Murphy") +} + +func TestTimeParser(t *testing.T) { + testString := `[ + { + "a": 5, + "b": { + "c": 6, + "time":"04 Jan 06 15:04 MST" + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }, + { + "a": 7, + "b": { + "c": 8, + "time":"11 Jan 07 15:04 MST" + }, + "my_tag_1": "bar", + "my_tag_2": "baz" + } + ]` + + parser, err := New(&Config{ + MetricName: "json_test", + TimeKey: "b_time", + TimeFormat: "02 Jan 06 15:04 MST", + }) + require.NoError(t, err) + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, 2, len(metrics)) + require.Equal(t, false, metrics[0].Time() == metrics[1].Time()) +} + +func TestTimeParserWithTimezone(t *testing.T) { + testString := `{ + "time": "04 Jan 06 15:04" + }` + + parser, err := New(&Config{ + MetricName: "json_test", + TimeKey: "time", + TimeFormat: "02 Jan 06 15:04", + Timezone: "America/New_York", + }) + require.NoError(t, err) + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, 1, len(metrics)) + require.EqualValues(t, int64(1136405040000000000), metrics[0].Time().UnixNano()) +} + +func TestUnixTimeParser(t *testing.T) { + testString := `[ + { + "a": 5, + "b": { + "c": 6, + "time": "1536001411.1234567890" + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }, + { + "a": 7, + "b": { + "c": 8, + "time": 1536002769.123 + }, + "my_tag_1": "bar", + "my_tag_2": "baz" + } + ]` + + parser, err := New(&Config{ + MetricName: "json_test", + TimeKey: "b_time", + TimeFormat: "unix", + }) + require.NoError(t, err) + + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, 2, len(metrics)) + require.Equal(t, false, metrics[0].Time() == metrics[1].Time()) +} + +func TestUnixMsTimeParser(t *testing.T) { + testString := `[ + { + "a": 5, + "b": { + "c": 6, + "time": "1536001411100" + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }, + { + "a": 7, + "b": { + "c": 8, + "time": 1536002769123 + }, + "my_tag_1": "bar", + "my_tag_2": "baz" + } + ]` + + parser, err := New(&Config{ + MetricName: "json_test", + TimeKey: "b_time", + TimeFormat: "unix_ms", + }) + require.NoError(t, err) + + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, 2, len(metrics)) + require.Equal(t, false, metrics[0].Time() == metrics[1].Time()) +} + +func TestTimeErrors(t *testing.T) { + testString := `{ + "a": 5, + "b": { + "c": 6, + "time":"04 Jan 06 15:04 MST" + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }` + + parser, err := New(&Config{ + MetricName: "json_test", + TimeKey: "b_time", + TimeFormat: "02 January 06 15:04 MST", + }) + require.NoError(t, err) + + metrics, err := parser.Parse([]byte(testString)) + require.Error(t, err) + require.Equal(t, 0, len(metrics)) + + testString2 := `{ + "a": 5, + "b": { + "c": 6 + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }` + + parser, err = New(&Config{ + MetricName: "json_test", + TimeKey: "b_time", + TimeFormat: "02 January 06 15:04 MST", + }) + require.NoError(t, err) + + metrics, err = parser.Parse([]byte(testString2)) + require.Error(t, err) + require.Equal(t, 0, len(metrics)) + require.Equal(t, fmt.Errorf("JSON time key could not be found"), err) +} + +func TestShareTimestamp(t *testing.T) { + parser, err := New(&Config{ + MetricName: "json_test", + }) + require.NoError(t, err) + + metrics, err := parser.Parse([]byte(validJSONArrayMultiple)) + require.NoError(t, err) + require.Equal(t, 2, len(metrics)) + require.Equal(t, true, metrics[0].Time() == metrics[1].Time()) +} + +func TestNameKey(t *testing.T) { + testString := `{ + "a": 5, + "b": { + "c": "this is my name", + "time":"04 Jan 06 15:04 MST" + }, + "my_tag_1": "foo", + "my_tag_2": "baz" + }` + + parser, err := New(&Config{ + NameKey: "b_c", + }) + require.NoError(t, err) + + metrics, err := parser.Parse([]byte(testString)) + require.NoError(t, err) + require.Equal(t, "this is my name", metrics[0].Name()) +} + +func TestParseArrayWithWrongType(t *testing.T) { + data := `[{"answer": 42}, 123]` + + parser, err := New(&Config{}) + require.NoError(t, err) + + _, err = parser.Parse([]byte(data)) + require.Error(t, err) +} + +func TestParse(t *testing.T) { + tests := []struct { + name string + config *Config + input []byte + expected []telegraf.Metric + }{ + { + name: "tag keys with underscore issue 6705", + config: &Config{ + MetricName: "json", + TagKeys: []string{"metric___name__"}, + }, + input: []byte(`{"metric": {"__name__": "howdy", "time_idle": 42}}`), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json", + map[string]string{ + "metric___name__": "howdy", + }, + map[string]interface{}{ + "metric_time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "parse empty array", + config: &Config{}, + input: []byte(`[]`), + expected: []telegraf.Metric{}, + }, + { + name: "parse simple array", + config: &Config{ + MetricName: "json", + }, + input: []byte(`[{"answer": 42}]`), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json", + map[string]string{}, + map[string]interface{}{ + "answer": 42.0, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "string field glob", + config: &Config{ + MetricName: "json", + StringFields: []string{"*"}, + }, + input: []byte(` +{ + "color": "red", + "status": "error" +} +`), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json", + map[string]string{}, + map[string]interface{}{ + "color": "red", + "status": "error", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "time key is deleted from fields", + config: &Config{ + MetricName: "json", + TimeKey: "timestamp", + TimeFormat: "unix", + }, + input: []byte(` +{ + "value": 42, + "timestamp": 1541183052 +} +`), + expected: []telegraf.Metric{ + testutil.MustMetric( + "json", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(1541183052, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser, err := New(tt.config) + require.NoError(t, err) + + actual, err := parser.Parse(tt.input) + require.NoError(t, err) + + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.IgnoreTime()) + }) + } } diff --git a/plugins/parsers/logfmt/README.md b/plugins/parsers/logfmt/README.md new file mode 100644 index 000000000..d3e8ab66f --- /dev/null +++ b/plugins/parsers/logfmt/README.md @@ -0,0 +1,30 @@ +# Logfmt + +The `logfmt` data format parses data in [logfmt] format. + +[logfmt]: https://brandur.org/logfmt + +### Configuration + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "logfmt" +``` + +### Metrics + +Each key/value pair in the line is added to a new metric as a field. The type +of the field is automatically determined based on the contents of the value. + +### Examples + +``` +- method=GET host=example.org ts=2018-07-24T19:43:40.275Z connect=4ms service=8ms status=200 bytes=1653 ++ logfmt method="GET",host="example.org",ts="2018-07-24T19:43:40.275Z",connect="4ms",service="8ms",status=200i,bytes=1653i +``` diff --git a/plugins/parsers/logfmt/parser.go b/plugins/parsers/logfmt/parser.go new file mode 100644 index 000000000..603dbbae8 --- /dev/null +++ b/plugins/parsers/logfmt/parser.go @@ -0,0 +1,111 @@ +package logfmt + +import ( + "bytes" + "fmt" + "strconv" + "time" + + "github.com/go-logfmt/logfmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +var ( + ErrNoMetric = fmt.Errorf("no metric in line") +) + +// Parser decodes logfmt formatted messages into metrics. +type Parser struct { + MetricName string + DefaultTags map[string]string + Now func() time.Time +} + +// NewParser creates a parser. +func NewParser(metricName string, defaultTags map[string]string) *Parser { + return &Parser{ + MetricName: metricName, + DefaultTags: defaultTags, + Now: time.Now, + } +} + +// Parse converts a slice of bytes in logfmt format to metrics. +func (p *Parser) Parse(b []byte) ([]telegraf.Metric, error) { + reader := bytes.NewReader(b) + decoder := logfmt.NewDecoder(reader) + metrics := make([]telegraf.Metric, 0) + for { + ok := decoder.ScanRecord() + if !ok { + err := decoder.Err() + if err != nil { + return nil, err + } + break + } + fields := make(map[string]interface{}) + for decoder.ScanKeyval() { + if string(decoder.Value()) == "" { + continue + } + + //type conversions + value := string(decoder.Value()) + if iValue, err := strconv.ParseInt(value, 10, 64); err == nil { + fields[string(decoder.Key())] = iValue + } else if fValue, err := strconv.ParseFloat(value, 64); err == nil { + fields[string(decoder.Key())] = fValue + } else if bValue, err := strconv.ParseBool(value); err == nil { + fields[string(decoder.Key())] = bValue + } else { + fields[string(decoder.Key())] = value + } + } + if len(fields) == 0 { + continue + } + + m, err := metric.New(p.MetricName, map[string]string{}, fields, p.Now()) + if err != nil { + return nil, err + } + + metrics = append(metrics, m) + } + p.applyDefaultTags(metrics) + return metrics, nil +} + +// ParseLine converts a single line of text in logfmt format to metrics. +func (p *Parser) ParseLine(s string) (telegraf.Metric, error) { + metrics, err := p.Parse([]byte(s)) + if err != nil { + return nil, err + } + + if len(metrics) < 1 { + return nil, ErrNoMetric + } + return metrics[0], nil +} + +// SetDefaultTags adds tags to the metrics outputs of Parse and ParseLine. +func (p *Parser) SetDefaultTags(tags map[string]string) { + p.DefaultTags = tags +} + +func (p *Parser) applyDefaultTags(metrics []telegraf.Metric) { + if len(p.DefaultTags) == 0 { + return + } + + for _, m := range metrics { + for k, v := range p.DefaultTags { + if !m.HasTag(k) { + m.AddTag(k, v) + } + } + } +} diff --git a/plugins/parsers/logfmt/parser_test.go b/plugins/parsers/logfmt/parser_test.go new file mode 100644 index 000000000..dfacd8c8f --- /dev/null +++ b/plugins/parsers/logfmt/parser_test.go @@ -0,0 +1,226 @@ +package logfmt + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" +) + +func MustMetric(t *testing.T, m *testutil.Metric) telegraf.Metric { + t.Helper() + v, err := metric.New(m.Measurement, m.Tags, m.Fields, m.Time) + if err != nil { + t.Fatal(err) + } + return v +} + +func TestParse(t *testing.T) { + tests := []struct { + name string + measurement string + now func() time.Time + bytes []byte + want []telegraf.Metric + wantErr bool + }{ + { + name: "no bytes returns no metrics", + now: func() time.Time { return time.Unix(0, 0) }, + want: []telegraf.Metric{}, + }, + { + name: "test without trailing end", + bytes: []byte("foo=\"bar\""), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []telegraf.Metric{ + testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ + "foo": "bar", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "test with trailing end", + bytes: []byte("foo=\"bar\"\n"), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []telegraf.Metric{ + testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ + "foo": "bar", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "logfmt parser returns all the fields", + bytes: []byte(`ts=2018-07-24T19:43:40.275Z lvl=info msg="http request" method=POST`), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []telegraf.Metric{ + testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ + "lvl": "info", + "msg": "http request", + "method": "POST", + "ts": "2018-07-24T19:43:40.275Z", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "logfmt parser parses every line", + bytes: []byte("ts=2018-07-24T19:43:40.275Z lvl=info msg=\"http request\" method=POST\nparent_id=088876RL000 duration=7.45 log_id=09R4e4Rl000"), + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + want: []telegraf.Metric{ + testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ + "lvl": "info", + "msg": "http request", + "method": "POST", + "ts": "2018-07-24T19:43:40.275Z", + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ + "parent_id": "088876RL000", + "duration": 7.45, + "log_id": "09R4e4Rl000", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "keys without = or values are ignored", + now: func() time.Time { return time.Unix(0, 0) }, + bytes: []byte(`i am no data.`), + want: []telegraf.Metric{}, + wantErr: false, + }, + { + name: "keys without values are ignored", + now: func() time.Time { return time.Unix(0, 0) }, + bytes: []byte(`foo="" bar=`), + want: []telegraf.Metric{}, + wantErr: false, + }, + { + name: "unterminated quote produces error", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + bytes: []byte(`bar=baz foo="bar`), + want: []telegraf.Metric{}, + wantErr: true, + }, + { + name: "malformed key", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + bytes: []byte(`"foo=" bar=baz`), + want: []telegraf.Metric{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := Parser{ + MetricName: tt.measurement, + Now: tt.now, + } + got, err := l.Parse(tt.bytes) + if (err != nil) != tt.wantErr { + t.Errorf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) + return + } + + testutil.RequireMetricsEqual(t, tt.want, got) + }) + } +} + +func TestParseLine(t *testing.T) { + tests := []struct { + name string + s string + measurement string + now func() time.Time + want telegraf.Metric + wantErr bool + }{ + { + name: "No Metric In line", + now: func() time.Time { return time.Unix(0, 0) }, + want: nil, + wantErr: true, + }, + { + name: "Log parser fmt returns all fields", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + s: `ts=2018-07-24T19:43:35.207268Z lvl=5 msg="Write failed" log_id=09R4e4Rl000`, + want: testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ + "ts": "2018-07-24T19:43:35.207268Z", + "lvl": int64(5), + "msg": "Write failed", + "log_id": "09R4e4Rl000", + }, + time.Unix(0, 0), + ), + }, + { + name: "ParseLine only returns metrics from first string", + now: func() time.Time { return time.Unix(0, 0) }, + measurement: "testlog", + s: "ts=2018-07-24T19:43:35.207268Z lvl=5 msg=\"Write failed\" log_id=09R4e4Rl000\nmethod=POST parent_id=088876RL000 duration=7.45 log_id=09R4e4Rl000", + want: testutil.MustMetric( + "testlog", + map[string]string{}, + map[string]interface{}{ + "ts": "2018-07-24T19:43:35.207268Z", + "lvl": int64(5), + "msg": "Write failed", + "log_id": "09R4e4Rl000", + }, + time.Unix(0, 0), + ), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := Parser{ + MetricName: tt.measurement, + Now: tt.now, + } + got, err := l.ParseLine(tt.s) + if (err != nil) != tt.wantErr { + t.Fatalf("Logfmt.Parse error = %v, wantErr %v", err, tt.wantErr) + } + testutil.RequireMetricEqual(t, tt.want, got) + }) + } +} diff --git a/plugins/parsers/nagios/README.md b/plugins/parsers/nagios/README.md new file mode 100644 index 000000000..e9be6a0dd --- /dev/null +++ b/plugins/parsers/nagios/README.md @@ -0,0 +1,17 @@ +# Nagios + +The `nagios` data format parses the output of nagios plugins. + +### Configuration + +```toml +[[inputs.exec]] + ## Commands array + commands = ["/usr/lib/nagios/plugins/check_load -w 5,6,7 -c 7,8,9"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "nagios" +``` diff --git a/plugins/parsers/nagios/parser.go b/plugins/parsers/nagios/parser.go index 4d5f7f008..e4058852b 100644 --- a/plugins/parsers/nagios/parser.go +++ b/plugins/parsers/nagios/parser.go @@ -1,15 +1,78 @@ package nagios import ( + "bufio" + "bytes" + "errors" + "fmt" + "log" + "os/exec" "regexp" "strconv" "strings" + "syscall" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" ) +// getExitCode get the exit code from an error value which is the result +// of running a command through exec package api. +func getExitCode(err error) (int, error) { + if err == nil { + return 0, nil + } + + ee, ok := err.(*exec.ExitError) + if !ok { + // If it is not an *exec.ExitError, then it must be + // an io error, but docs do not say anything about the + // exit code in this case. + return 0, errors.New("expected *exec.ExitError") + } + + ws, ok := ee.Sys().(syscall.WaitStatus) + if !ok { + return 0, errors.New("expected syscall.WaitStatus") + } + + return ws.ExitStatus(), nil +} + +// TryAddState attempts to add a state derived from the runErr. +// If any error occurs, it is guaranteed to be returned along with +// the initial metric slice. +func TryAddState(runErr error, metrics []telegraf.Metric) ([]telegraf.Metric, error) { + state, err := getExitCode(runErr) + if err != nil { + return metrics, fmt.Errorf("exec: get exit code: %s", err) + } + + for _, m := range metrics { + if m.Name() == "nagios_state" { + m.AddField("state", state) + return metrics, nil + } + } + + var ts time.Time + if len(metrics) != 0 { + ts = metrics[0].Time() + } else { + ts = time.Now().UTC() + } + f := map[string]interface{}{ + "state": state, + } + m, err := metric.New("nagios_state", nil, f, ts) + if err != nil { + return metrics, err + } + metrics = append(metrics, m) + return metrics, nil +} + type NagiosParser struct { MetricName string DefaultTags map[string]string @@ -17,8 +80,10 @@ type NagiosParser struct { // Got from Alignak // https://github.com/Alignak-monitoring/alignak/blob/develop/alignak/misc/perfdata.py -var perfSplitRegExp, _ = regexp.Compile(`([^=]+=\S+)`) -var nagiosRegExp, _ = regexp.Compile(`^([^=]+)=([\d\.\-\+eE]+)([\w\/%]*);?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE]+)?;?([\d\.\-\+eE]+)?;?\s*`) +var ( + perfSplitRegExp = regexp.MustCompile(`([^=]+=\S+)`) + nagiosRegExp = regexp.MustCompile(`^([^=]+)=([\d\.\-\+eE]+)([\w\/%]*);?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE:~@]+)?;?([\d\.\-\+eE]+)?;?([\d\.\-\+eE]+)?;?\s*`) +) func (p *NagiosParser) ParseLine(line string) (telegraf.Metric, error) { metrics, err := p.Parse([]byte(line)) @@ -29,88 +94,160 @@ func (p *NagiosParser) SetDefaultTags(tags map[string]string) { p.DefaultTags = tags } -//> rta,host=absol,unit=ms critical=6000,min=0,value=0.332,warning=4000 1456374625003628099 -//> pl,host=absol,unit=% critical=90,min=0,value=0,warning=80 1456374625003693967 - func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) { + ts := time.Now().UTC() + + s := bufio.NewScanner(bytes.NewReader(buf)) + + var msg bytes.Buffer + var longmsg bytes.Buffer + metrics := make([]telegraf.Metric, 0) - // Convert to string - out := string(buf) - // Prepare output for splitting - // Delete escaped pipes - out = strings.Replace(out, `\|`, "___PROTECT_PIPE___", -1) - // Split lines and get the first one - lines := strings.Split(out, "\n") - // Split output and perfdatas - data_splitted := strings.Split(lines[0], "|") - if len(data_splitted) <= 1 { - // No pipe == no perf data - return nil, nil + + // Scan the first line. + if !s.Scan() && s.Err() != nil { + return nil, s.Err() } - // Get perfdatas - perfdatas := data_splitted[1] - // Add escaped pipes - perfdatas = strings.Replace(perfdatas, "___PROTECT_PIPE___", `\|`, -1) - // Split perfs - unParsedPerfs := perfSplitRegExp.FindAllSubmatch([]byte(perfdatas), -1) - // Iterate on all perfs - for _, unParsedPerfs := range unParsedPerfs { - // Get metrics - // Trim perf - trimedPerf := strings.Trim(string(unParsedPerfs[0]), " ") - // Parse perf - perf := nagiosRegExp.FindAllSubmatch([]byte(trimedPerf), -1) - // Bad string - if len(perf) == 0 { + parts := bytes.Split(s.Bytes(), []byte{'|'}) + switch len(parts) { + case 2: + ms, err := parsePerfData(string(parts[1]), ts) + if err != nil { + log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error()) + } + metrics = append(metrics, ms...) + fallthrough + case 1: + msg.Write(bytes.TrimSpace(parts[0])) + default: + return nil, errors.New("illegal output format") + } + + // Read long output. + for s.Scan() { + if bytes.Contains(s.Bytes(), []byte{'|'}) { + parts := bytes.Split(s.Bytes(), []byte{'|'}) + if longmsg.Len() != 0 { + longmsg.WriteByte('\n') + } + longmsg.Write(bytes.TrimSpace(parts[0])) + + ms, err := parsePerfData(string(parts[1]), ts) + if err != nil { + log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error()) + } + metrics = append(metrics, ms...) + break + } + if longmsg.Len() != 0 { + longmsg.WriteByte('\n') + } + longmsg.Write(bytes.TrimSpace((s.Bytes()))) + } + + // Parse extra performance data. + for s.Scan() { + ms, err := parsePerfData(s.Text(), ts) + if err != nil { + log.Printf("E! [parser.nagios] failed to parse performance data: %s\n", err.Error()) + } + metrics = append(metrics, ms...) + } + + if s.Err() != nil { + log.Printf("D! [parser.nagios] unexpected io error: %s\n", s.Err()) + } + + // Create nagios state. + fields := map[string]interface{}{ + "service_output": msg.String(), + } + if longmsg.Len() != 0 { + fields["long_service_output"] = longmsg.String() + } + + m, err := metric.New("nagios_state", nil, fields, ts) + if err == nil { + metrics = append(metrics, m) + } else { + log.Printf("E! [parser.nagios] failed to add nagios_state: %s\n", err) + } + + return metrics, nil +} + +func parsePerfData(perfdatas string, timestamp time.Time) ([]telegraf.Metric, error) { + metrics := make([]telegraf.Metric, 0) + + for _, unParsedPerf := range perfSplitRegExp.FindAllString(perfdatas, -1) { + trimedPerf := strings.TrimSpace(unParsedPerf) + perf := nagiosRegExp.FindStringSubmatch(trimedPerf) + + // verify at least `'label'=value[UOM];` existed + if len(perf) < 3 { continue } - if len(perf[0]) <= 2 { + if perf[1] == "" || perf[2] == "" { continue } - if perf[0][1] == nil || perf[0][2] == nil { - continue - } - fieldName := string(perf[0][1]) - tags := make(map[string]string) - if perf[0][3] != nil { - str := string(perf[0][3]) + + fieldName := strings.Trim(perf[1], "'") + tags := map[string]string{"perfdata": fieldName} + if perf[3] != "" { + str := string(perf[3]) if str != "" { tags["unit"] = str } } + fields := make(map[string]interface{}) - f, err := strconv.ParseFloat(string(perf[0][2]), 64) + if perf[2] == "U" { + return nil, errors.New("Value undetermined") + } + + f, err := strconv.ParseFloat(string(perf[2]), 64) if err == nil { fields["value"] = f } - // TODO should we set empty field - // if metric if there is no data ? - if perf[0][4] != nil { - f, err := strconv.ParseFloat(string(perf[0][4]), 64) + if perf[4] != "" { + low, high, err := parseThreshold(perf[4]) if err == nil { - fields["warning"] = f + if strings.Contains(perf[4], "@") { + fields["warning_le"] = low + fields["warning_ge"] = high + } else { + fields["warning_lt"] = low + fields["warning_gt"] = high + } } } - if perf[0][5] != nil { - f, err := strconv.ParseFloat(string(perf[0][5]), 64) + if perf[5] != "" { + low, high, err := parseThreshold(perf[5]) if err == nil { - fields["critical"] = f + if strings.Contains(perf[5], "@") { + fields["critical_le"] = low + fields["critical_ge"] = high + } else { + fields["critical_lt"] = low + fields["critical_gt"] = high + } } } - if perf[0][6] != nil { - f, err := strconv.ParseFloat(string(perf[0][6]), 64) + if perf[6] != "" { + f, err := strconv.ParseFloat(perf[6], 64) if err == nil { fields["min"] = f } } - if perf[0][7] != nil { - f, err := strconv.ParseFloat(string(perf[0][7]), 64) + if perf[7] != "" { + f, err := strconv.ParseFloat(perf[7], 64) if err == nil { fields["max"] = f } } + // Create metric - metric, err := metric.New(fieldName, tags, fields, time.Now().UTC()) + metric, err := metric.New("nagios", tags, fields, timestamp) if err != nil { return nil, err } @@ -120,3 +257,47 @@ func (p *NagiosParser) Parse(buf []byte) ([]telegraf.Metric, error) { return metrics, nil } + +// from math +const ( + MaxFloat64 = 1.797693134862315708145274237317043567981e+308 // 2**1023 * (2**53 - 1) / 2**52 + MinFloat64 = 4.940656458412465441765687928682213723651e-324 // 1 / 2**(1023 - 1 + 52) +) + +var ErrBadThresholdFormat = errors.New("Bad threshold format") + +// Handles all cases from https://nagios-plugins.org/doc/guidelines.html#THRESHOLDFORMAT +func parseThreshold(threshold string) (min float64, max float64, err error) { + thresh := strings.Split(threshold, ":") + switch len(thresh) { + case 1: + max, err = strconv.ParseFloat(string(thresh[0]), 64) + if err != nil { + return 0, 0, ErrBadThresholdFormat + } + + return 0, max, nil + case 2: + if thresh[0] == "~" { + min = MinFloat64 + } else { + min, err = strconv.ParseFloat(string(thresh[0]), 64) + if err != nil { + min = 0 + } + } + + if thresh[1] == "" { + max = MaxFloat64 + } else { + max, err = strconv.ParseFloat(string(thresh[1]), 64) + if err != nil { + return 0, 0, ErrBadThresholdFormat + } + } + default: + return 0, 0, ErrBadThresholdFormat + } + + return +} diff --git a/plugins/parsers/nagios/parser_test.go b/plugins/parsers/nagios/parser_test.go index b1e3d6fdd..7f5b5937e 100644 --- a/plugins/parsers/nagios/parser_test.go +++ b/plugins/parsers/nagios/parser_test.go @@ -1,88 +1,527 @@ package nagios import ( + "errors" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" ) -const validOutput1 = `PING OK - Packet loss = 0%, RTA = 0.30 ms|rta=0.298000ms;4000.000000;6000.000000;0.000000 pl=0%;80;90;0;100 +func TestGetExitCode(t *testing.T) { + tests := []struct { + name string + errF func() error + expCode int + expErr error + }{ + { + name: "nil error passed is ok", + errF: func() error { + return nil + }, + expCode: 0, + expErr: nil, + }, + { + name: "unexpected error type", + errF: func() error { + return errors.New("I am not *exec.ExitError") + }, + expCode: 0, + expErr: errors.New("expected *exec.ExitError"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := tt.errF() + code, err := getExitCode(e) + + require.Equal(t, tt.expCode, code) + require.Equal(t, tt.expErr, err) + }) + } +} + +type metricBuilder struct { + name string + tags map[string]string + fields map[string]interface{} + timestamp time.Time +} + +func mb() *metricBuilder { + return &metricBuilder{} +} + +func (b *metricBuilder) n(v string) *metricBuilder { + b.name = v + return b +} + +func (b *metricBuilder) t(k, v string) *metricBuilder { + if b.tags == nil { + b.tags = make(map[string]string) + } + b.tags[k] = v + return b +} + +func (b *metricBuilder) f(k string, v interface{}) *metricBuilder { + if b.fields == nil { + b.fields = make(map[string]interface{}) + } + b.fields[k] = v + return b +} + +func (b *metricBuilder) ts(v time.Time) *metricBuilder { + b.timestamp = v + return b +} + +func (b *metricBuilder) b() telegraf.Metric { + m, err := metric.New(b.name, b.tags, b.fields, b.timestamp) + if err != nil { + panic(err) + } + return m +} + +// assertEqual asserts two slices to be equal. Note, that the order +// of the entries matters. +func assertEqual(t *testing.T, exp, actual []telegraf.Metric) { + require.Equal(t, len(exp), len(actual)) + for i := 0; i < len(exp); i++ { + ok := testutil.MetricEqual(exp[i], actual[i]) + require.True(t, ok) + } +} + +func TestTryAddState(t *testing.T) { + tests := []struct { + name string + runErrF func() error + metrics []telegraf.Metric + assertF func(*testing.T, []telegraf.Metric, error) + }{ + { + name: "should append state=0 field to existing metric", + runErrF: func() error { + return nil + }, + metrics: []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + mb(). + n("nagios_state"). + f("service_output", "OK: system working").b(), + }, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + exp := []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + mb(). + n("nagios_state"). + f("service_output", "OK: system working"). + f("state", 0).b(), + } + assertEqual(t, exp, metrics) + require.NoError(t, err) + }, + }, + { + name: "should create 'nagios_state state=0' and same timestamp as others", + runErrF: func() error { + return nil + }, + metrics: []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + }, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + exp := []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + mb(). + n("nagios_state"). + f("state", 0).b(), + } + assertEqual(t, exp, metrics) + require.NoError(t, err) + }, + }, + { + name: "should create 'nagios_state state=0' and recent timestamp", + runErrF: func() error { + return nil + }, + metrics: []telegraf.Metric{}, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.Len(t, metrics, 1) + m := metrics[0] + require.Equal(t, "nagios_state", m.Name()) + s, ok := m.GetField("state") + require.True(t, ok) + require.Equal(t, int64(0), s) + require.WithinDuration(t, time.Now().UTC(), m.Time(), 10*time.Second) + require.NoError(t, err) + }, + }, + { + name: "should return original metrics and an error", + runErrF: func() error { + return errors.New("non parsable error") + }, + metrics: []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + }, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + exp := []telegraf.Metric{ + mb(). + n("nagios"). + f("perfdata", 0).b(), + } + expErr := "exec: get exit code: expected *exec.ExitError" + + assertEqual(t, exp, metrics) + require.Equal(t, expErr, err.Error()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics, err := TryAddState(tt.runErrF(), tt.metrics) + tt.assertF(t, metrics, err) + }) + } +} + +func assertNagiosState(t *testing.T, m telegraf.Metric, f map[string]interface{}) { + assert.Equal(t, map[string]string{}, m.Tags()) + assert.Equal(t, f, m.Fields()) +} + +func TestParse(t *testing.T) { + parser := NagiosParser{ + MetricName: "nagios_test", + } + + tests := []struct { + name string + input string + assertF func(*testing.T, []telegraf.Metric, error) + }{ + { + name: "valid output 1", + input: `PING OK - Packet loss = 0%, RTA = 0.30 ms|rta=0.298000ms;4000.000000;6000.000000;0.000000 pl=0%;80;90;0;100 This is a long output with three lines -` -const validOutput2 = "TCP OK - 0.008 second response time on port 80|time=0.008457s;;;0.000000;10.000000" -const validOutput3 = "TCP OK - 0.008 second response time on port 80|time=0.008457" -const invalidOutput3 = "PING OK - Packet loss = 0%, RTA = 0.30 ms" -const invalidOutput4 = "PING OK - Packet loss = 0%, RTA = 0.30 ms| =3;;;; dgasdg =;;;; sff=;;;;" +`, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 3) + // rta + assert.Equal(t, map[string]string{ + "unit": "ms", + "perfdata": "rta", + }, metrics[0].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.298), + "warning_lt": float64(0), + "warning_gt": float64(4000), + "critical_lt": float64(0), + "critical_gt": float64(6000), + "min": float64(0), + }, metrics[0].Fields()) -func TestParseValidOutput(t *testing.T) { - parser := NagiosParser{ - MetricName: "nagios_test", + // pl + assert.Equal(t, map[string]string{ + "unit": "%", + "perfdata": "pl", + }, metrics[1].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0), + "warning_lt": float64(0), + "warning_gt": float64(80), + "critical_lt": float64(0), + "critical_gt": float64(90), + "min": float64(0), + "max": float64(100), + }, metrics[1].Fields()) + + assertNagiosState(t, metrics[2], map[string]interface{}{ + "service_output": "PING OK - Packet loss = 0%, RTA = 0.30 ms", + "long_service_output": "This is a long output\nwith three lines", + }) + }, + }, + { + name: "valid output 2", + input: "TCP OK - 0.008 second response time on port 80|time=0.008457s;;;0.000000;10.000000", + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 2) + // time + assert.Equal(t, map[string]string{ + "unit": "s", + "perfdata": "time", + }, metrics[0].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.008457), + "min": float64(0), + "max": float64(10), + }, metrics[0].Fields()) + + assertNagiosState(t, metrics[1], map[string]interface{}{ + "service_output": "TCP OK - 0.008 second response time on port 80", + }) + }, + }, + { + name: "valid output 3", + input: "TCP OK - 0.008 second response time on port 80|time=0.008457", + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 2) + // time + assert.Equal(t, map[string]string{ + "perfdata": "time", + }, metrics[0].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.008457), + }, metrics[0].Fields()) + + assertNagiosState(t, metrics[1], map[string]interface{}{ + "service_output": "TCP OK - 0.008 second response time on port 80", + }) + }, + }, + { + name: "valid output 4", + input: "OK: Load average: 0.00, 0.01, 0.05 | 'load1'=0.00;~:4;@0:6;0; 'load5'=0.01;3;0:5;0; 'load15'=0.05;0:2;0:4;0;", + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 4) + // load1 + assert.Equal(t, map[string]string{ + "perfdata": "load1", + }, metrics[0].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.00), + "warning_lt": MinFloat64, + "warning_gt": float64(4), + "critical_le": float64(0), + "critical_ge": float64(6), + "min": float64(0), + }, metrics[0].Fields()) + + // load5 + assert.Equal(t, map[string]string{ + "perfdata": "load5", + }, metrics[1].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.01), + "warning_gt": float64(3), + "warning_lt": float64(0), + "critical_lt": float64(0), + "critical_gt": float64(5), + "min": float64(0), + }, metrics[1].Fields()) + + // load15 + assert.Equal(t, map[string]string{ + "perfdata": "load15", + }, metrics[2].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(0.05), + "warning_lt": float64(0), + "warning_gt": float64(2), + "critical_lt": float64(0), + "critical_gt": float64(4), + "min": float64(0), + }, metrics[2].Fields()) + + assertNagiosState(t, metrics[3], map[string]interface{}{ + "service_output": "OK: Load average: 0.00, 0.01, 0.05", + }) + }, + }, + { + name: "no perf data", + input: "PING OK - Packet loss = 0%, RTA = 0.30 ms", + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 1) + + assertNagiosState(t, metrics[0], map[string]interface{}{ + "service_output": "PING OK - Packet loss = 0%, RTA = 0.30 ms", + }) + }, + }, + { + name: "malformed perf data", + input: "PING OK - Packet loss = 0%, RTA = 0.30 ms| =3;;;; dgasdg =;;;; sff=;;;;", + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 1) + + assertNagiosState(t, metrics[0], map[string]interface{}{ + "service_output": "PING OK - Packet loss = 0%, RTA = 0.30 ms", + }) + }, + }, + { + name: "from https://assets.nagios.com/downloads/nagioscore/docs/nagioscore/3/en/pluginapi.html", + input: `DISK OK - free space: / 3326 MB (56%); | /=2643MB;5948;5958;0;5968 +/ 15272 MB (77%); +/boot 68 MB (69%); +/home 69357 MB (27%); +/var/log 819 MB (84%); | /boot=68MB;88;93;0;98 +/home=69357MB;253404;253409;0;253414 +/var/log=818MB;970;975;0;980 +`, + assertF: func(t *testing.T, metrics []telegraf.Metric, err error) { + require.NoError(t, err) + require.Len(t, metrics, 5) + // /=2643MB;5948;5958;0;5968 + assert.Equal(t, map[string]string{ + "unit": "MB", + "perfdata": "/", + }, metrics[0].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(2643), + "warning_lt": float64(0), + "warning_gt": float64(5948), + "critical_lt": float64(0), + "critical_gt": float64(5958), + "min": float64(0), + "max": float64(5968), + }, metrics[0].Fields()) + + // /boot=68MB;88;93;0;98 + assert.Equal(t, map[string]string{ + "unit": "MB", + "perfdata": "/boot", + }, metrics[1].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(68), + "warning_lt": float64(0), + "warning_gt": float64(88), + "critical_lt": float64(0), + "critical_gt": float64(93), + "min": float64(0), + "max": float64(98), + }, metrics[1].Fields()) + + // /home=69357MB;253404;253409;0;253414 + assert.Equal(t, map[string]string{ + "unit": "MB", + "perfdata": "/home", + }, metrics[2].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(69357), + "warning_lt": float64(0), + "warning_gt": float64(253404), + "critical_lt": float64(0), + "critical_gt": float64(253409), + "min": float64(0), + "max": float64(253414), + }, metrics[2].Fields()) + + // /var/log=818MB;970;975;0;980 + assert.Equal(t, map[string]string{ + "unit": "MB", + "perfdata": "/var/log", + }, metrics[3].Tags()) + assert.Equal(t, map[string]interface{}{ + "value": float64(818), + "warning_lt": float64(0), + "warning_gt": float64(970), + "critical_lt": float64(0), + "critical_gt": float64(975), + "min": float64(0), + "max": float64(980), + }, metrics[3].Fields()) + + assertNagiosState(t, metrics[4], map[string]interface{}{ + "service_output": "DISK OK - free space: / 3326 MB (56%);", + "long_service_output": "/ 15272 MB (77%);\n/boot 68 MB (69%);\n/home 69357 MB (27%);\n/var/log 819 MB (84%);", + }) + }, + }, } - // Output1 - metrics, err := parser.Parse([]byte(validOutput1)) - require.NoError(t, err) - assert.Len(t, metrics, 2) - // rta - assert.Equal(t, "rta", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ - "value": float64(0.298), - "warning": float64(4000), - "critical": float64(6000), - "min": float64(0), - }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"unit": "ms"}, metrics[0].Tags()) - // pl - assert.Equal(t, "pl", metrics[1].Name()) - assert.Equal(t, map[string]interface{}{ - "value": float64(0), - "warning": float64(80), - "critical": float64(90), - "min": float64(0), - "max": float64(100), - }, metrics[1].Fields()) - assert.Equal(t, map[string]string{"unit": "%"}, metrics[1].Tags()) - - // Output2 - metrics, err = parser.Parse([]byte(validOutput2)) - require.NoError(t, err) - assert.Len(t, metrics, 1) - // time - assert.Equal(t, "time", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ - "value": float64(0.008457), - "min": float64(0), - "max": float64(10), - }, metrics[0].Fields()) - assert.Equal(t, map[string]string{"unit": "s"}, metrics[0].Tags()) - - // Output3 - metrics, err = parser.Parse([]byte(validOutput3)) - require.NoError(t, err) - assert.Len(t, metrics, 1) - // time - assert.Equal(t, "time", metrics[0].Name()) - assert.Equal(t, map[string]interface{}{ - "value": float64(0.008457), - }, metrics[0].Fields()) - assert.Equal(t, map[string]string{}, metrics[0].Tags()) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics, err := parser.Parse([]byte(tt.input)) + tt.assertF(t, metrics, err) + }) + } } -func TestParseInvalidOutput(t *testing.T) { - parser := NagiosParser{ - MetricName: "nagios_test", +func TestParseThreshold(t *testing.T) { + tests := []struct { + input string + eMin float64 + eMax float64 + eErr error + }{ + { + input: "10", + eMin: 0, + eMax: 10, + eErr: nil, + }, + { + input: "10:", + eMin: 10, + eMax: MaxFloat64, + eErr: nil, + }, + { + input: "~:10", + eMin: MinFloat64, + eMax: 10, + eErr: nil, + }, + { + input: "10:20", + eMin: 10, + eMax: 20, + eErr: nil, + }, + { + input: "10:20", + eMin: 10, + eMax: 20, + eErr: nil, + }, + { + input: "10:20:30", + eMin: 0, + eMax: 0, + eErr: ErrBadThresholdFormat, + }, } - // invalidOutput3 - metrics, err := parser.Parse([]byte(invalidOutput3)) - require.NoError(t, err) - assert.Len(t, metrics, 0) - - // invalidOutput4 - metrics, err = parser.Parse([]byte(invalidOutput4)) - require.NoError(t, err) - assert.Len(t, metrics, 0) - + for i := range tests { + min, max, err := parseThreshold(tests[i].input) + require.Equal(t, tests[i].eMin, min) + require.Equal(t, tests[i].eMax, max) + require.Equal(t, tests[i].eErr, err) + } } diff --git a/plugins/parsers/registry.go b/plugins/parsers/registry.go index 58fce1722..1c3af2763 100644 --- a/plugins/parsers/registry.go +++ b/plugins/parsers/registry.go @@ -2,18 +2,25 @@ package parsers import ( "fmt" + "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/plugins/parsers/collectd" + "github.com/influxdata/telegraf/plugins/parsers/csv" "github.com/influxdata/telegraf/plugins/parsers/dropwizard" + "github.com/influxdata/telegraf/plugins/parsers/form_urlencoded" "github.com/influxdata/telegraf/plugins/parsers/graphite" + "github.com/influxdata/telegraf/plugins/parsers/grok" "github.com/influxdata/telegraf/plugins/parsers/influx" "github.com/influxdata/telegraf/plugins/parsers/json" + "github.com/influxdata/telegraf/plugins/parsers/logfmt" "github.com/influxdata/telegraf/plugins/parsers/nagios" "github.com/influxdata/telegraf/plugins/parsers/value" + "github.com/influxdata/telegraf/plugins/parsers/wavefront" ) +type ParserFunc func() (Parser, error) + // ParserInput is an interface for input plugins that are able to parse // arbitrary data formats. type ParserInput interface { @@ -21,6 +28,13 @@ type ParserInput interface { SetParser(parser Parser) } +// ParserFuncInput is an interface for input plugins that are able to parse +// arbitrary data formats. +type ParserFuncInput interface { + // GetParser returns a new parser. + SetParserFunc(fn ParserFunc) +} + // Parser is an interface defining functions that a parser plugin must satisfy. type Parser interface { // Parse takes a byte buffer separated by newlines @@ -47,46 +61,93 @@ type Parser interface { // and can be used to instantiate _any_ of the parsers. type Config struct { // Dataformat can be one of: json, influx, graphite, value, nagios - DataFormat string + DataFormat string `toml:"data_format"` // Separator only applied to Graphite data. - Separator string + Separator string `toml:"separator"` // Templates only apply to Graphite data. - Templates []string + Templates []string `toml:"templates"` // TagKeys only apply to JSON data - TagKeys []string + TagKeys []string `toml:"tag_keys"` + // Array of glob pattern strings keys that should be added as string fields. + JSONStringFields []string `toml:"json_string_fields"` + + JSONNameKey string `toml:"json_name_key"` // MetricName applies to JSON & value. This will be the name of the measurement. - MetricName string + MetricName string `toml:"metric_name"` + + // holds a gjson path for json parser + JSONQuery string `toml:"json_query"` + + // key of time + JSONTimeKey string `toml:"json_time_key"` + + // time format + JSONTimeFormat string `toml:"json_time_format"` + + // default timezone + JSONTimezone string `toml:"json_timezone"` + + // Whether to continue if a JSON object can't be coerced + JSONStrict bool `toml:"json_strict"` // Authentication file for collectd - CollectdAuthFile string + CollectdAuthFile string `toml:"collectd_auth_file"` // One of none (default), sign, or encrypt - CollectdSecurityLevel string + CollectdSecurityLevel string `toml:"collectd_security_level"` // Dataset specification for collectd - CollectdTypesDB []string + CollectdTypesDB []string `toml:"collectd_types_db"` + + // whether to split or join multivalue metrics + CollectdSplit string `toml:"collectd_split"` // DataType only applies to value, this will be the type to parse value to - DataType string + DataType string `toml:"data_type"` // DefaultTags are the default tags that will be added to all parsed metrics. - DefaultTags map[string]string + DefaultTags map[string]string `toml:"default_tags"` // an optional json path containing the metric registry object // if left empty, the whole json object is parsed as a metric registry - DropwizardMetricRegistryPath string + DropwizardMetricRegistryPath string `toml:"dropwizard_metric_registry_path"` // an optional json path containing the default time of the metrics // if left empty, the processing time is used - DropwizardTimePath string + DropwizardTimePath string `toml:"dropwizard_time_path"` // time format to use for parsing the time field // defaults to time.RFC3339 - DropwizardTimeFormat string + DropwizardTimeFormat string `toml:"dropwizard_time_format"` // an optional json path pointing to a json object with tag key/value pairs // takes precedence over DropwizardTagPathsMap - DropwizardTagsPath string + DropwizardTagsPath string `toml:"dropwizard_tags_path"` // an optional map containing tag names as keys and json paths to retrieve the tag values from as values // used if TagsPath is empty or doesn't return any tags - DropwizardTagPathsMap map[string]string + DropwizardTagPathsMap map[string]string `toml:"dropwizard_tag_paths_map"` + + //grok patterns + GrokPatterns []string `toml:"grok_patterns"` + GrokNamedPatterns []string `toml:"grok_named_patterns"` + GrokCustomPatterns string `toml:"grok_custom_patterns"` + GrokCustomPatternFiles []string `toml:"grok_custom_pattern_files"` + GrokTimezone string `toml:"grok_timezone"` + GrokUniqueTimestamp string `toml:"grok_unique_timestamp"` + + //csv configuration + CSVColumnNames []string `toml:"csv_column_names"` + CSVColumnTypes []string `toml:"csv_column_types"` + CSVComment string `toml:"csv_comment"` + CSVDelimiter string `toml:"csv_delimiter"` + CSVHeaderRowCount int `toml:"csv_header_row_count"` + CSVMeasurementColumn string `toml:"csv_measurement_column"` + CSVSkipColumns int `toml:"csv_skip_columns"` + CSVSkipRows int `toml:"csv_skip_rows"` + CSVTagColumns []string `toml:"csv_tag_columns"` + CSVTimestampColumn string `toml:"csv_timestamp_column"` + CSVTimestampFormat string `toml:"csv_timestamp_format"` + CSVTrimSpace bool `toml:"csv_trim_space"` + + // FormData configuration + FormUrlencodedTagKeys []string `toml:"form_urlencoded_tag_keys"` } // NewParser returns a Parser interface based on the given config. @@ -95,8 +156,20 @@ func NewParser(config *Config) (Parser, error) { var parser Parser switch config.DataFormat { case "json": - parser, err = NewJSONParser(config.MetricName, - config.TagKeys, config.DefaultTags) + parser, err = json.New( + &json.Config{ + MetricName: config.MetricName, + TagKeys: config.TagKeys, + NameKey: config.JSONNameKey, + StringFields: config.JSONStringFields, + Query: config.JSONQuery, + TimeKey: config.JSONTimeKey, + TimeFormat: config.JSONTimeFormat, + Timezone: config.JSONTimezone, + DefaultTags: config.DefaultTags, + Strict: config.JSONStrict, + }, + ) case "value": parser, err = NewValueParser(config.MetricName, config.DataType, config.DefaultTags) @@ -109,7 +182,7 @@ func NewParser(config *Config) (Parser, error) { config.Templates, config.DefaultTags) case "collectd": parser, err = NewCollectdParser(config.CollectdAuthFile, - config.CollectdSecurityLevel, config.CollectdTypesDB) + config.CollectdSecurityLevel, config.CollectdTypesDB, config.CollectdSplit) case "dropwizard": parser, err = NewDropwizardParser( config.DropwizardMetricRegistryPath, @@ -120,25 +193,122 @@ func NewParser(config *Config) (Parser, error) { config.DefaultTags, config.Separator, config.Templates) + case "wavefront": + parser, err = NewWavefrontParser(config.DefaultTags) + case "grok": + parser, err = newGrokParser( + config.MetricName, + config.GrokPatterns, + config.GrokNamedPatterns, + config.GrokCustomPatterns, + config.GrokCustomPatternFiles, + config.GrokTimezone, + config.GrokUniqueTimestamp) + case "csv": + parser, err = newCSVParser(config.MetricName, + config.CSVHeaderRowCount, + config.CSVSkipRows, + config.CSVSkipColumns, + config.CSVDelimiter, + config.CSVComment, + config.CSVTrimSpace, + config.CSVColumnNames, + config.CSVColumnTypes, + config.CSVTagColumns, + config.CSVMeasurementColumn, + config.CSVTimestampColumn, + config.CSVTimestampFormat, + config.DefaultTags) + case "logfmt": + parser, err = NewLogFmtParser(config.MetricName, config.DefaultTags) + case "form_urlencoded": + parser, err = NewFormUrlencodedParser( + config.MetricName, + config.DefaultTags, + config.FormUrlencodedTagKeys, + ) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } return parser, err } -func NewJSONParser( - metricName string, - tagKeys []string, - defaultTags map[string]string, -) (Parser, error) { - parser := &json.JSONParser{ - MetricName: metricName, - TagKeys: tagKeys, - DefaultTags: defaultTags, +func newCSVParser(metricName string, + headerRowCount int, + skipRows int, + skipColumns int, + delimiter string, + comment string, + trimSpace bool, + columnNames []string, + columnTypes []string, + tagColumns []string, + nameColumn string, + timestampColumn string, + timestampFormat string, + defaultTags map[string]string) (Parser, error) { + + if headerRowCount == 0 && len(columnNames) == 0 { + return nil, fmt.Errorf("`csv_header_row_count` must be defined if `csv_column_names` is not specified") } + + if delimiter != "" { + runeStr := []rune(delimiter) + if len(runeStr) > 1 { + return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", delimiter) + } + } + + if comment != "" { + runeStr := []rune(comment) + if len(runeStr) > 1 { + return nil, fmt.Errorf("csv_delimiter must be a single character, got: %s", comment) + } + } + + if len(columnNames) > 0 && len(columnTypes) > 0 && len(columnNames) != len(columnTypes) { + return nil, fmt.Errorf("csv_column_names field count doesn't match with csv_column_types") + } + + parser := &csv.Parser{ + MetricName: metricName, + HeaderRowCount: headerRowCount, + SkipRows: skipRows, + SkipColumns: skipColumns, + Delimiter: delimiter, + Comment: comment, + TrimSpace: trimSpace, + ColumnNames: columnNames, + ColumnTypes: columnTypes, + TagColumns: tagColumns, + MeasurementColumn: nameColumn, + TimestampColumn: timestampColumn, + TimestampFormat: timestampFormat, + DefaultTags: defaultTags, + TimeFunc: time.Now, + } + return parser, nil } +func newGrokParser(metricName string, + patterns []string, nPatterns []string, + cPatterns string, cPatternFiles []string, + tZone string, uniqueTimestamp string) (Parser, error) { + parser := grok.Parser{ + Measurement: metricName, + Patterns: patterns, + NamedPatterns: nPatterns, + CustomPatterns: cPatterns, + CustomPatternFiles: cPatternFiles, + Timezone: tZone, + UniqueTimestamp: uniqueTimestamp, + } + + err := parser.Compile() + return &parser, err +} + func NewNagiosParser() (Parser, error) { return &nagios.NagiosParser{}, nil } @@ -172,8 +342,9 @@ func NewCollectdParser( authFile string, securityLevel string, typesDB []string, + split string, ) (Parser, error) { - return collectd.NewCollectdParser(authFile, securityLevel, typesDB) + return collectd.NewCollectdParser(authFile, securityLevel, typesDB, split) } func NewDropwizardParser( @@ -200,3 +371,24 @@ func NewDropwizardParser( } return parser, err } + +// NewLogFmtParser returns a logfmt parser with the default options. +func NewLogFmtParser(metricName string, defaultTags map[string]string) (Parser, error) { + return logfmt.NewParser(metricName, defaultTags), nil +} + +func NewWavefrontParser(defaultTags map[string]string) (Parser, error) { + return wavefront.NewWavefrontParser(defaultTags), nil +} + +func NewFormUrlencodedParser( + metricName string, + defaultTags map[string]string, + tagKeys []string, +) (Parser, error) { + return &form_urlencoded.Parser{ + MetricName: metricName, + DefaultTags: defaultTags, + TagKeys: tagKeys, + }, nil +} diff --git a/plugins/parsers/value/README.md b/plugins/parsers/value/README.md new file mode 100644 index 000000000..db184d4e8 --- /dev/null +++ b/plugins/parsers/value/README.md @@ -0,0 +1,36 @@ +# Value + +The "value" data format translates single values into Telegraf metrics. This +is done by assigning a measurement name and setting a single field ("value") +as the parsed metric. + +### Configuration + +You **must** tell Telegraf what type of metric to collect by using the +`data_type` configuration option. Available options are: + +1. integer +2. float or long +3. string +4. boolean + +**Note:** It is also recommended that you set `name_override` to a measurement +name that makes sense for your metric, otherwise it will just be set to the +name of the plugin. + +```toml +[[inputs.exec]] + ## Commands array + commands = ["cat /proc/sys/kernel/random/entropy_avail"] + + ## override the default metric name of "exec" + name_override = "entropy_available" + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "value" + data_type = "integer" # required +``` + diff --git a/plugins/parsers/wavefront/README.md b/plugins/parsers/wavefront/README.md new file mode 100644 index 000000000..ab7c56eed --- /dev/null +++ b/plugins/parsers/wavefront/README.md @@ -0,0 +1,20 @@ +# Wavefront + +Wavefront Data Format is metrics are parsed directly into Telegraf metrics. +For more information about the Wavefront Data Format see +[here](https://docs.wavefront.com/wavefront_data_format.html). + +### Configuration + +There are no additional configuration options for Wavefront Data Format line-protocol. + +```toml +[[inputs.file]] + files = ["example"] + + ## Data format to consume. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "wavefront" +``` diff --git a/plugins/parsers/wavefront/element.go b/plugins/parsers/wavefront/element.go new file mode 100644 index 000000000..5ed37645c --- /dev/null +++ b/plugins/parsers/wavefront/element.go @@ -0,0 +1,244 @@ +package wavefront + +import ( + "errors" + "fmt" + "strconv" + "time" +) + +var ( + ErrEOF = errors.New("EOF") + ErrInvalidTimestamp = errors.New("Invalid timestamp") +) + +// Interface for parsing line elements. +type ElementParser interface { + parse(p *PointParser, pt *Point) error +} + +type NameParser struct{} +type ValueParser struct{} +type TimestampParser struct { + optional bool +} +type WhiteSpaceParser struct { + nextOptional bool +} +type TagParser struct{} +type LoopedParser struct { + wrappedParser ElementParser + wsParser *WhiteSpaceParser +} +type LiteralParser struct { + literal string +} + +func (ep *NameParser) parse(p *PointParser, pt *Point) error { + //Valid characters are: a-z, A-Z, 0-9, hyphen ("-"), underscore ("_"), dot ("."). + // Forward slash ("/") and comma (",") are allowed if metricName is enclosed in double quotes. + // Delta (U+2206) is allowed as the first character of the + // metricName + name, err := parseLiteral(p) + + if err != nil { + return err + } + pt.Name = name + return nil +} + +func (ep *ValueParser) parse(p *PointParser, pt *Point) error { + tok, lit := p.scan() + if tok == EOF { + return fmt.Errorf("found %q, expected number", lit) + } + + p.writeBuf.Reset() + if tok == MINUS_SIGN { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + + for tok != EOF && (tok == LETTER || tok == NUMBER || tok == DOT || tok == MINUS_SIGN) { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + p.unscan() + + pt.Value = p.writeBuf.String() + _, err := strconv.ParseFloat(pt.Value, 64) + if err != nil { + return fmt.Errorf("invalid metric value %s", pt.Value) + } + return nil +} + +func (ep *TimestampParser) parse(p *PointParser, pt *Point) error { + tok, lit := p.scan() + if tok == EOF { + if ep.optional { + p.unscanTokens(2) + return setTimestamp(pt, 0, 1) + } + return fmt.Errorf("found %q, expected number", lit) + } + + if tok != NUMBER { + if ep.optional { + p.unscanTokens(2) + return setTimestamp(pt, 0, 1) + } + return ErrInvalidTimestamp + } + + p.writeBuf.Reset() + for tok != EOF && tok == NUMBER { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + p.unscan() + + tsStr := p.writeBuf.String() + ts, err := strconv.ParseInt(tsStr, 10, 64) + if err != nil { + return err + } + return setTimestamp(pt, ts, len(tsStr)) +} + +func setTimestamp(pt *Point, ts int64, numDigits int) error { + + if numDigits == 19 { + // nanoseconds + ts = ts / 1e9 + } else if numDigits == 16 { + // microseconds + ts = ts / 1e6 + } else if numDigits == 13 { + // milliseconds + ts = ts / 1e3 + } else if numDigits != 10 { + // must be in seconds, return error if not 0 + if ts == 0 { + ts = getCurrentTime() + } else { + return ErrInvalidTimestamp + } + } + pt.Timestamp = ts + return nil +} + +func (ep *LoopedParser) parse(p *PointParser, pt *Point) error { + for { + err := ep.wrappedParser.parse(p, pt) + if err != nil { + return err + } + err = ep.wsParser.parse(p, pt) + if err == ErrEOF { + break + } + } + return nil +} + +func (ep *TagParser) parse(p *PointParser, pt *Point) error { + k, err := parseLiteral(p) + if err != nil { + if k == "" { + return nil + } + return err + } + + next, lit := p.scan() + if next != EQUALS { + return fmt.Errorf("found %q, expected equals", lit) + } + + v, err := parseLiteral(p) + if err != nil { + return err + } + if len(pt.Tags) == 0 { + pt.Tags = make(map[string]string) + } + pt.Tags[k] = v + return nil +} + +func (ep *WhiteSpaceParser) parse(p *PointParser, pt *Point) error { + tok := WS + for tok != EOF && tok == WS { + tok, _ = p.scan() + } + + if tok == EOF { + if !ep.nextOptional { + return ErrEOF + } + return nil + } + p.unscan() + return nil +} + +func (ep *LiteralParser) parse(p *PointParser, pt *Point) error { + l, err := parseLiteral(p) + if err != nil { + return err + } + + if l != ep.literal { + return fmt.Errorf("found %s, expected %s", l, ep.literal) + } + return nil +} + +func parseQuotedLiteral(p *PointParser) (string, error) { + p.writeBuf.Reset() + + escaped := false + tok, lit := p.scan() + for tok != EOF && (tok != QUOTES || (tok == QUOTES && escaped)) { + // let everything through + escaped = tok == BACKSLASH + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + } + if tok == EOF { + return "", fmt.Errorf("found %q, expected quotes", lit) + } + return p.writeBuf.String(), nil +} + +func parseLiteral(p *PointParser) (string, error) { + tok, lit := p.scan() + if tok == EOF { + return "", fmt.Errorf("found %q, expected literal", lit) + } + + if tok == QUOTES { + return parseQuotedLiteral(p) + } + + p.writeBuf.Reset() + for tok != EOF && tok > literal_beg && tok < literal_end { + p.writeBuf.WriteString(lit) + tok, lit = p.scan() + if tok == DELTA { + return "", errors.New("found delta inside metric name") + } + } + if tok == QUOTES { + return "", errors.New("found quote inside unquoted literal") + } + p.unscan() + return p.writeBuf.String(), nil +} + +func getCurrentTime() int64 { + return time.Now().UnixNano() / 1e9 +} diff --git a/plugins/parsers/wavefront/parser.go b/plugins/parsers/wavefront/parser.go new file mode 100644 index 000000000..7ae455d47 --- /dev/null +++ b/plugins/parsers/wavefront/parser.go @@ -0,0 +1,226 @@ +package wavefront + +import ( + "bufio" + "bytes" + "io" + "log" + "strconv" + "sync" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +const MAX_BUFFER_SIZE = 2 + +type Point struct { + Name string + Value string + Timestamp int64 + Source string + Tags map[string]string +} + +type WavefrontParser struct { + parsers *sync.Pool + defaultTags map[string]string +} + +// PointParser is a thread-unsafe parser and must be kept in a pool. +type PointParser struct { + s *PointScanner + buf struct { + tok []Token // last read n tokens + lit []string // last read n literals + n int // unscanned buffer size (max=2) + } + scanBuf bytes.Buffer // buffer reused for scanning tokens + writeBuf bytes.Buffer // buffer reused for parsing elements + Elements []ElementParser + parent *WavefrontParser +} + +// Returns a slice of ElementParser's for the Graphite format +func NewWavefrontElements() []ElementParser { + var elements []ElementParser + wsParser := WhiteSpaceParser{} + wsParserNextOpt := WhiteSpaceParser{nextOptional: true} + repeatParser := LoopedParser{wrappedParser: &TagParser{}, wsParser: &wsParser} + elements = append(elements, &NameParser{}, &wsParser, &ValueParser{}, &wsParserNextOpt, + &TimestampParser{optional: true}, &wsParserNextOpt, &repeatParser) + return elements +} + +func NewWavefrontParser(defaultTags map[string]string) *WavefrontParser { + wp := &WavefrontParser{defaultTags: defaultTags} + wp.parsers = &sync.Pool{ + New: func() interface{} { + return NewPointParser(wp) + }, + } + return wp +} + +func NewPointParser(parent *WavefrontParser) *PointParser { + elements := NewWavefrontElements() + return &PointParser{Elements: elements, parent: parent} +} + +func (p *WavefrontParser) ParseLine(line string) (telegraf.Metric, error) { + buf := []byte(line) + + metrics, err := p.Parse(buf) + if err != nil { + return nil, err + } + + if len(metrics) > 0 { + return metrics[0], nil + } + + return nil, nil +} + +func (p *WavefrontParser) Parse(buf []byte) ([]telegraf.Metric, error) { + pp := p.parsers.Get().(*PointParser) + defer p.parsers.Put(pp) + return pp.Parse(buf) +} + +func (p *PointParser) Parse(buf []byte) ([]telegraf.Metric, error) { + + // parse even if the buffer begins with a newline + buf = bytes.TrimPrefix(buf, []byte("\n")) + // add newline to end if not exists: + if len(buf) > 0 && !bytes.HasSuffix(buf, []byte("\n")) { + buf = append(buf, []byte("\n")...) + } + + points := make([]Point, 0) + + buffer := bytes.NewBuffer(buf) + reader := bufio.NewReader(buffer) + for { + // Read up to the next newline. + buf, err := reader.ReadBytes('\n') + if err == io.EOF { + break + } + + p.reset(buf) + point := Point{} + for _, element := range p.Elements { + err := element.parse(p, &point) + if err != nil { + return nil, err + } + } + + points = append(points, point) + } + + metrics, err := p.convertPointToTelegrafMetric(points) + if err != nil { + return nil, err + } + return metrics, nil +} + +func (p *WavefrontParser) SetDefaultTags(tags map[string]string) { + p.defaultTags = tags +} + +func (p *PointParser) convertPointToTelegrafMetric(points []Point) ([]telegraf.Metric, error) { + + metrics := make([]telegraf.Metric, 0) + + for _, point := range points { + tags := make(map[string]string) + for k, v := range point.Tags { + tags[k] = v + } + // apply default tags after parsed tags + for k, v := range p.parent.defaultTags { + tags[k] = v + } + + // single field for value + fields := make(map[string]interface{}) + v, err := strconv.ParseFloat(point.Value, 64) + if err != nil { + return nil, err + } + fields["value"] = v + + m, err := metric.New(point.Name, tags, fields, time.Unix(point.Timestamp, 0)) + if err != nil { + return nil, err + } + + metrics = append(metrics, m) + } + + return metrics, nil +} + +// scan returns the next token from the underlying scanner. +// If a token has been unscanned then read that from the internal buffer instead. +func (p *PointParser) scan() (Token, string) { + // If we have a token on the buffer, then return it. + if p.buf.n != 0 { + idx := p.buf.n % MAX_BUFFER_SIZE + tok, lit := p.buf.tok[idx], p.buf.lit[idx] + p.buf.n -= 1 + return tok, lit + } + + // Otherwise read the next token from the scanner. + tok, lit := p.s.Scan() + + // Save it to the buffer in case we unscan later. + p.buffer(tok, lit) + + return tok, lit +} + +func (p *PointParser) buffer(tok Token, lit string) { + // create the buffer if it is empty + if len(p.buf.tok) == 0 { + p.buf.tok = make([]Token, MAX_BUFFER_SIZE) + p.buf.lit = make([]string, MAX_BUFFER_SIZE) + } + + // for now assume a simple circular buffer of length two + p.buf.tok[0], p.buf.lit[0] = p.buf.tok[1], p.buf.lit[1] + p.buf.tok[1], p.buf.lit[1] = tok, lit +} + +// unscan pushes the previously read token back onto the buffer. +func (p *PointParser) unscan() { + p.unscanTokens(1) +} + +func (p *PointParser) unscanTokens(n int) { + if n > MAX_BUFFER_SIZE { + // just log for now + log.Printf("cannot unscan more than %d tokens", MAX_BUFFER_SIZE) + } + p.buf.n += n +} + +func (p *PointParser) reset(buf []byte) { + + // reset the scan buffer and write new byte + p.scanBuf.Reset() + p.scanBuf.Write(buf) + + if p.s == nil { + p.s = NewScanner(&p.scanBuf) + } else { + // reset p.s.r passing in the buffer as the reader + p.s.r.Reset(&p.scanBuf) + } + p.buf.n = 0 +} diff --git a/plugins/parsers/wavefront/parser_test.go b/plugins/parsers/wavefront/parser_test.go new file mode 100644 index 000000000..fed31b5f2 --- /dev/null +++ b/plugins/parsers/wavefront/parser_test.go @@ -0,0 +1,248 @@ +package wavefront + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func TestParse(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetrics, err := parser.Parse([]byte("test.metric 1")) + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + assert.NoError(t, err) + assert.Equal(t, parsedMetrics[0].Name(), testMetric.Name()) + assert.Equal(t, parsedMetrics[0].Fields(), testMetric.Fields()) + + parsedMetrics, err = parser.Parse([]byte("\u2206test.delta 1 1530939936")) + assert.NoError(t, err) + testMetric, err = metric.New("\u2206test.delta", map[string]string{}, + map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1 1530939936")) + assert.NoError(t, err) + testMetric, err = metric.New("\u0394test.delta", map[string]string{}, + map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\u0394test.delta 1.234 1530939936 source=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("\u0394test.delta", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" -1.1234 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": -1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e04 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e04}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234e-04 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234e-04}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + +} + +func TestParseLine(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetric, err := parser.ParseLine("test.metric 1") + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + assert.NoError(t, err) + assert.Equal(t, parsedMetric.Name(), testMetric.Name()) + assert.Equal(t, parsedMetric.Fields(), testMetric.Fields()) + + parsedMetric, err = parser.ParseLine("test.metric 1 1530939936") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 source=mysource") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 source=\"mysource\"") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("test.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) +} + +func TestParseMultiple(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetrics, err := parser.Parse([]byte("test.metric 1\ntest.metric2 2 1530939936")) + assert.NoError(t, err) + testMetric1, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(0, 0)) + assert.NoError(t, err) + testMetric2, err := metric.New("test.metric2", map[string]string{}, map[string]interface{}{"value": 2.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics := []telegraf.Metric{testMetric1, testMetric2} + assert.Equal(t, parsedMetrics[0].Name(), testMetrics[0].Name()) + assert.Equal(t, parsedMetrics[0].Fields(), testMetrics[0].Fields()) + assert.EqualValues(t, parsedMetrics[1], testMetrics[1]) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"")) + assert.NoError(t, err) + testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics = []telegraf.Metric{testMetric1, testMetric2} + assert.EqualValues(t, parsedMetrics, testMetrics) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 \"source\"=\"mysource\" tag2=value2\ntest.metric 1.1234 1530939936 source=\"mysource\" tag2=value2 ")) + assert.NoError(t, err) + testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource", "tag2": "value2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics = []telegraf.Metric{testMetric1, testMetric2} + assert.EqualValues(t, parsedMetrics, testMetrics) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource\n\"test.metric\" 1.1234 1530939936 source=\"mysource\"\ntest.metric3 333 1530939936 tagit=valueit")) + assert.NoError(t, err) + testMetric1, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric2, err = metric.New("test.metric", map[string]string{"source": "mysource"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetric3, err := metric.New("test.metric3", map[string]string{"tagit": "valueit"}, map[string]interface{}{"value": 333.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + testMetrics = []telegraf.Metric{testMetric1, testMetric2, testMetric3} + assert.EqualValues(t, parsedMetrics, testMetrics) + +} + +func TestParseSpecial(t *testing.T) { + parser := NewWavefrontParser(nil) + + parsedMetric, err := parser.ParseLine("\"test.metric\" 1 1530939936") + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + + parsedMetric, err = parser.ParseLine("test.metric 1 1530939936 tag1=\"val\\\"ue1\"") + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"tag1": "val\\\"ue1"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetric, testMetric) + +} + +func TestParseInvalid(t *testing.T) { + parser := NewWavefrontParser(nil) + + _, err := parser.Parse([]byte("test.metric")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric string")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 string")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.\u2206delta 1")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_no_pair")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag_broken_value=\"")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("\"test.metric 1 1530939936")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("test.metric 1 1530939936 tag1=val\\\"ue1")) + assert.Error(t, err) + + _, err = parser.Parse([]byte("\"test.metric\" -1.12-34 1530939936 \"source\"=\"mysource\" tag2=value2")) + assert.Error(t, err) + +} + +func TestParseDefaultTags(t *testing.T) { + parser := NewWavefrontParser(map[string]string{"myDefault": "value1", "another": "test2"}) + + parsedMetrics, err := parser.Parse([]byte("test.metric 1 1530939936")) + assert.NoError(t, err) + testMetric, err := metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("test.metric 1 1530939936 source=mysource")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2", "source": "mysource"}, map[string]interface{}{"value": 1.}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + + parsedMetrics, err = parser.Parse([]byte("\"test.metric\" 1.1234 1530939936 another=\"test3\"")) + assert.NoError(t, err) + testMetric, err = metric.New("test.metric", map[string]string{"myDefault": "value1", "another": "test2"}, map[string]interface{}{"value": 1.1234}, time.Unix(1530939936, 0)) + assert.NoError(t, err) + assert.EqualValues(t, parsedMetrics[0], testMetric) + +} diff --git a/plugins/parsers/wavefront/scanner.go b/plugins/parsers/wavefront/scanner.go new file mode 100644 index 000000000..a528f72ee --- /dev/null +++ b/plugins/parsers/wavefront/scanner.go @@ -0,0 +1,71 @@ +package wavefront + +import ( + "bufio" + "io" +) + +// Lexical Point Scanner +type PointScanner struct { + r *bufio.Reader +} + +func NewScanner(r io.Reader) *PointScanner { + return &PointScanner{r: bufio.NewReader(r)} +} + +// read reads the next rune from the buffered reader. +// Returns rune(0) if an error occurs (or io.EOF is returned). +func (s *PointScanner) read() rune { + ch, _, err := s.r.ReadRune() + if err != nil { + return eof + } + return ch +} + +// unread places the previously read rune back on the reader. +func (s *PointScanner) unread() { + _ = s.r.UnreadRune() +} + +// Scan returns the next token and literal value. +func (s *PointScanner) Scan() (Token, string) { + + // Read the next rune + ch := s.read() + if isWhitespace(ch) { + return WS, string(ch) + } else if isLetter(ch) { + return LETTER, string(ch) + } else if isNumber(ch) { + return NUMBER, string(ch) + } else if isDelta(ch) { + return DELTA, string(ch) + } + + // Otherwise read the individual character. + switch ch { + case eof: + return EOF, "" + case '\n': + return NEWLINE, string(ch) + case '.': + return DOT, string(ch) + case '-': + return MINUS_SIGN, string(ch) + case '_': + return UNDERSCORE, string(ch) + case '/': + return SLASH, string(ch) + case '\\': + return BACKSLASH, string(ch) + case ',': + return COMMA, string(ch) + case '"': + return QUOTES, string(ch) + case '=': + return EQUALS, string(ch) + } + return ILLEGAL, string(ch) +} diff --git a/plugins/parsers/wavefront/token.go b/plugins/parsers/wavefront/token.go new file mode 100644 index 000000000..5b77d0cdb --- /dev/null +++ b/plugins/parsers/wavefront/token.go @@ -0,0 +1,46 @@ +package wavefront + +type Token int + +const ( + // Special tokens + ILLEGAL Token = iota + EOF + WS + + // Literals + literal_beg + LETTER // metric name, source/point tags + NUMBER + MINUS_SIGN + UNDERSCORE + DOT + SLASH + BACKSLASH + COMMA + DELTA + literal_end + + // Misc characters + QUOTES + EQUALS + NEWLINE +) + +func isWhitespace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' +} + +func isLetter(ch rune) bool { + return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') +} + +func isNumber(ch rune) bool { + return ch >= '0' && ch <= '9' +} + +func isDelta(ch rune) bool { + return ch == '\u2206' || ch == '\u0394' +} + +var eof = rune(0) diff --git a/plugins/processors/all/all.go b/plugins/processors/all/all.go index c06bbd426..dbf8a12e5 100644 --- a/plugins/processors/all/all.go +++ b/plugins/processors/all/all.go @@ -1,10 +1,24 @@ package all import ( + _ "github.com/influxdata/telegraf/plugins/processors/clone" _ "github.com/influxdata/telegraf/plugins/processors/converter" + _ "github.com/influxdata/telegraf/plugins/processors/date" + _ "github.com/influxdata/telegraf/plugins/processors/dedup" + _ "github.com/influxdata/telegraf/plugins/processors/defaults" _ "github.com/influxdata/telegraf/plugins/processors/enum" + _ "github.com/influxdata/telegraf/plugins/processors/filepath" _ "github.com/influxdata/telegraf/plugins/processors/override" + _ "github.com/influxdata/telegraf/plugins/processors/parser" + _ "github.com/influxdata/telegraf/plugins/processors/pivot" + _ "github.com/influxdata/telegraf/plugins/processors/port_name" _ "github.com/influxdata/telegraf/plugins/processors/printer" _ "github.com/influxdata/telegraf/plugins/processors/regex" + _ "github.com/influxdata/telegraf/plugins/processors/rename" + _ "github.com/influxdata/telegraf/plugins/processors/s2geo" + _ "github.com/influxdata/telegraf/plugins/processors/strings" + _ "github.com/influxdata/telegraf/plugins/processors/tag_limit" + _ "github.com/influxdata/telegraf/plugins/processors/template" _ "github.com/influxdata/telegraf/plugins/processors/topk" + _ "github.com/influxdata/telegraf/plugins/processors/unpivot" ) diff --git a/plugins/processors/clone/README.md b/plugins/processors/clone/README.md new file mode 100644 index 000000000..7ae33d36b --- /dev/null +++ b/plugins/processors/clone/README.md @@ -0,0 +1,38 @@ +# Clone Processor Plugin + +The clone processor plugin create a copy of each metric passing through it, +preserving untouched the original metric and allowing modifications in the +copied one. + +The modifications allowed are the ones supported by input plugins and aggregators: + +* name_override +* name_prefix +* name_suffix +* tags + +Select the metrics to modify using the standard +[measurement filtering](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md#measurement-filtering) +options. + +Values of *name_override*, *name_prefix*, *name_suffix* and already present +*tags* with conflicting keys will be overwritten. Absent *tags* will be +created. + +A typical use-case is gathering metrics once and cloning them to simulate +having several hosts (modifying ``host`` tag). + +### Configuration: + +```toml +# Apply metric modifications using override semantics. +[[processors.clone]] + ## All modifications on inputs and aggregators can be overridden: + # name_override = "new_name" + # name_prefix = "new_name_prefix" + # name_suffix = "new_name_suffix" + + ## Tags to be added (all values must be strings) + # [processors.clone.tags] + # additional_tag = "tag_value" +``` diff --git a/plugins/processors/clone/clone.go b/plugins/processors/clone/clone.go new file mode 100644 index 000000000..ad03fd3e4 --- /dev/null +++ b/plugins/processors/clone/clone.go @@ -0,0 +1,60 @@ +package clone + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +var sampleConfig = ` + ## All modifications on inputs and aggregators can be overridden: + # name_override = "new_name" + # name_prefix = "new_name_prefix" + # name_suffix = "new_name_suffix" + + ## Tags to be added (all values must be strings) + # [processors.clone.tags] + # additional_tag = "tag_value" +` + +type Clone struct { + NameOverride string + NamePrefix string + NameSuffix string + Tags map[string]string +} + +func (c *Clone) SampleConfig() string { + return sampleConfig +} + +func (c *Clone) Description() string { + return "Clone metrics and apply modifications." +} + +func (c *Clone) Apply(in ...telegraf.Metric) []telegraf.Metric { + cloned := []telegraf.Metric{} + + for _, metric := range in { + cloned = append(cloned, metric.Copy()) + + if len(c.NameOverride) > 0 { + metric.SetName(c.NameOverride) + } + if len(c.NamePrefix) > 0 { + metric.AddPrefix(c.NamePrefix) + } + if len(c.NameSuffix) > 0 { + metric.AddSuffix(c.NameSuffix) + } + for key, value := range c.Tags { + metric.AddTag(key, value) + } + } + return append(in, cloned...) +} + +func init() { + processors.Add("clone", func() telegraf.Processor { + return &Clone{} + }) +} diff --git a/plugins/processors/clone/clone_test.go b/plugins/processors/clone/clone_test.go new file mode 100644 index 000000000..f1b8dc5b2 --- /dev/null +++ b/plugins/processors/clone/clone_test.go @@ -0,0 +1,83 @@ +package clone + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func createTestMetric() telegraf.Metric { + metric, _ := metric.New("m1", + map[string]string{"metric_tag": "from_metric"}, + map[string]interface{}{"value": int64(1)}, + time.Now(), + ) + return metric +} + +func calculateProcessedTags(processor Clone, metric telegraf.Metric) map[string]string { + processed := processor.Apply(metric) + return processed[0].Tags() +} + +func TestRetainsTags(t *testing.T) { + processor := Clone{} + + tags := calculateProcessedTags(processor, createTestMetric()) + + value, present := tags["metric_tag"] + assert.True(t, present, "Tag of metric was not present") + assert.Equal(t, "from_metric", value, "Value of Tag was changed") +} + +func TestAddTags(t *testing.T) { + processor := Clone{Tags: map[string]string{"added_tag": "from_config", "another_tag": ""}} + + tags := calculateProcessedTags(processor, createTestMetric()) + + value, present := tags["added_tag"] + assert.True(t, present, "Additional Tag of metric was not present") + assert.Equal(t, "from_config", value, "Value of Tag was changed") + assert.Equal(t, 3, len(tags), "Should have one previous and two added tags.") +} + +func TestOverwritesPresentTagValues(t *testing.T) { + processor := Clone{Tags: map[string]string{"metric_tag": "from_config"}} + + tags := calculateProcessedTags(processor, createTestMetric()) + + value, present := tags["metric_tag"] + assert.True(t, present, "Tag of metric was not present") + assert.Equal(t, 1, len(tags), "Should only have one tag.") + assert.Equal(t, "from_config", value, "Value of Tag was not changed") +} + +func TestOverridesName(t *testing.T) { + processor := Clone{NameOverride: "overridden"} + + processed := processor.Apply(createTestMetric()) + + assert.Equal(t, "overridden", processed[0].Name(), "Name was not overridden") + assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified") +} + +func TestNamePrefix(t *testing.T) { + processor := Clone{NamePrefix: "Pre-"} + + processed := processor.Apply(createTestMetric()) + + assert.Equal(t, "Pre-m1", processed[0].Name(), "Prefix was not applied") + assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified") +} + +func TestNameSuffix(t *testing.T) { + processor := Clone{NameSuffix: "-suff"} + + processed := processor.Apply(createTestMetric()) + + assert.Equal(t, "m1-suff", processed[0].Name(), "Suffix was not applied") + assert.Equal(t, "m1", processed[1].Name(), "Original metric was modified") +} diff --git a/plugins/processors/converter/README.md b/plugins/processors/converter/README.md index f69c8728e..d916c8764 100644 --- a/plugins/processors/converter/README.md +++ b/plugins/processors/converter/README.md @@ -9,16 +9,17 @@ Values that cannot be converted are dropped. uniquely identifiable. Fields with the same series key (measurement + tags) will overwrite one another. -### Configuration: +### Configuration ```toml # Convert values to another metric value type -[processors.converter] +[[processors.converter]] ## Tags to convert ## ## The table key determines the target type, and the array of key-values ## select the keys to convert. The array may contain globs. ## = [...] [processors.converter.tags] + measurement = [] string = [] integer = [] unsigned = [] @@ -31,6 +32,7 @@ will overwrite one another. ## select the keys to convert. The array may contain globs. ## = [...] [processors.converter.fields] + measurement = [] tag = [] string = [] integer = [] @@ -39,19 +41,40 @@ will overwrite one another. float = [] ``` -### Examples: +### Example +Convert `port` tag to a string field: ```toml -[processors.converter] +[[processors.converter]] [processors.converter.tags] string = ["port"] - - [processors.converter.fields] - integer = ["scboard_*"] - tag = ["ParseServerConfigGeneration"] ``` ```diff -- apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerConfigGeneration=3,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 1502489900000000000 -+ apache,server=debian-stretch-apache,ParentServerConfigGeneration=3 port="80",BusyWorkers=1,BytesPerReq=0,BytesPerSec=0,CPUChildrenSystem=0,CPUChildrenUser=0,CPULoad=0.00995025,CPUSystem=0.01,CPUUser=0.01,ConnsAsyncClosing=0,ConnsAsyncKeepAlive=0,ConnsAsyncWriting=0,ConnsTotal=0,IdleWorkers=49,Load1=0.01,Load15=0,Load5=0,ParentServerMPMGeneration=2,ReqPerSec=0.00497512,ServerUptimeSeconds=201,TotalAccesses=1,TotalkBytes=0,Uptime=201,scboard_closing=0i,scboard_dnslookup=0i,scboard_finishing=0i,scboard_idle_cleanup=0i,scboard_keepalive=0i,scboard_logging=0i,scboard_open=100i,scboard_reading=0i,scboard_sending=1i,scboard_starting=0i,scboard_waiting=49i 1502489900000000000 +- apache,port=80,server=debian-stretch-apache BusyWorkers=1,BytesPerReq=0 ++ apache,server=debian-stretch-apache port="80",BusyWorkers=1,BytesPerReq=0 +``` + +Convert all `scboard_*` fields to an integer: +```toml +[[processors.converter]] + [processors.converter.fields] + integer = ["scboard_*"] +``` + +```diff +- apache scboard_closing=0,scboard_dnslookup=0,scboard_finishing=0,scboard_idle_cleanup=0,scboard_keepalive=0,scboard_logging=0,scboard_open=100,scboard_reading=0,scboard_sending=1,scboard_starting=0,scboard_waiting=49 ++ apache scboard_closing=0i,scboard_dnslookup=0i,scboard_finishing=0i,scboard_idle_cleanup=0i,scboard_keepalive=0i,scboard_logging=0i,scboard_open=100i,scboard_reading=0i,scboard_sending=1i,scboard_starting=0i,scboard_waiting=49i +``` + +Rename the measurement from a tag value: +```toml +[[processors.converter]] + [processors.converter.tags] + measurement = ["topic"] +``` + +```diff +- mqtt_consumer,topic=sensor temp=42 ++ sensor temp=42 ``` diff --git a/plugins/processors/converter/converter.go b/plugins/processors/converter/converter.go index 50fd195e0..55a2a2d09 100644 --- a/plugins/processors/converter/converter.go +++ b/plugins/processors/converter/converter.go @@ -2,7 +2,6 @@ package converter import ( "fmt" - "log" "math" "strconv" @@ -18,6 +17,7 @@ var sampleConfig = ` ## select the keys to convert. The array may contain globs. ## = [...] [processors.converter.tags] + measurement = [] string = [] integer = [] unsigned = [] @@ -30,6 +30,7 @@ var sampleConfig = ` ## select the keys to convert. The array may contain globs. ## = [...] [processors.converter.fields] + measurement = [] tag = [] string = [] integer = [] @@ -39,30 +40,32 @@ var sampleConfig = ` ` type Conversion struct { - Tag []string `toml:"tag"` - String []string `toml:"string"` - Integer []string `toml:"integer"` - Unsigned []string `toml:"unsigned"` - Boolean []string `toml:"boolean"` - Float []string `toml:"float"` + Measurement []string `toml:"measurement"` + Tag []string `toml:"tag"` + String []string `toml:"string"` + Integer []string `toml:"integer"` + Unsigned []string `toml:"unsigned"` + Boolean []string `toml:"boolean"` + Float []string `toml:"float"` } type Converter struct { - Tags *Conversion `toml:"tags"` - Fields *Conversion `toml:"fields"` + Tags *Conversion `toml:"tags"` + Fields *Conversion `toml:"fields"` + Log telegraf.Logger `toml:"-"` - initialized bool tagConversions *ConversionFilter fieldConversions *ConversionFilter } type ConversionFilter struct { - Tag filter.Filter - String filter.Filter - Integer filter.Filter - Unsigned filter.Filter - Boolean filter.Filter - Float filter.Filter + Measurement filter.Filter + Tag filter.Filter + String filter.Filter + Integer filter.Filter + Unsigned filter.Filter + Boolean filter.Filter + Float filter.Filter } func (p *Converter) SampleConfig() string { @@ -73,15 +76,11 @@ func (p *Converter) Description() string { return "Convert values to another metric value type" } -func (p *Converter) Apply(metrics ...telegraf.Metric) []telegraf.Metric { - if !p.initialized { - err := p.compile() - if err != nil { - logPrintf("initialization error: %v\n", err) - return metrics - } - } +func (p *Converter) Init() error { + return p.compile() +} +func (p *Converter) Apply(metrics ...telegraf.Metric) []telegraf.Metric { for _, metric := range metrics { p.convertTags(metric) p.convertFields(metric) @@ -106,7 +105,6 @@ func (p *Converter) compile() error { p.tagConversions = tf p.fieldConversions = ff - p.initialized = true return nil } @@ -117,6 +115,11 @@ func compileFilter(conv *Conversion) (*ConversionFilter, error) { var err error cf := &ConversionFilter{} + cf.Measurement, err = filter.Compile(conv.Measurement) + if err != nil { + return nil, err + } + cf.Tag, err = filter.Compile(conv.Tag) if err != nil { return nil, err @@ -150,13 +153,19 @@ func compileFilter(conv *Conversion) (*ConversionFilter, error) { return cf, nil } -// convertTags converts tags into fields +// convertTags converts tags into measurements or fields. func (p *Converter) convertTags(metric telegraf.Metric) { if p.tagConversions == nil { return } for key, value := range metric.Tags() { + if p.tagConversions.Measurement != nil && p.tagConversions.Measurement.Match(key) { + metric.RemoveTag(key) + metric.SetName(value) + continue + } + if p.tagConversions.String != nil && p.tagConversions.String.Match(key) { metric.RemoveTag(key) metric.AddField(key, value) @@ -167,7 +176,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) { v, ok := toInteger(value) if !ok { metric.RemoveTag(key) - logPrintf("error converting to integer [%T]: %v\n", value, value) + p.Log.Errorf("error converting to integer [%T]: %v", value, value) continue } @@ -179,7 +188,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) { v, ok := toUnsigned(value) if !ok { metric.RemoveTag(key) - logPrintf("error converting to unsigned [%T]: %v\n", value, value) + p.Log.Errorf("error converting to unsigned [%T]: %v", value, value) continue } @@ -192,7 +201,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) { v, ok := toBool(value) if !ok { metric.RemoveTag(key) - logPrintf("error converting to boolean [%T]: %v\n", value, value) + p.Log.Errorf("error converting to boolean [%T]: %v", value, value) continue } @@ -205,7 +214,7 @@ func (p *Converter) convertTags(metric telegraf.Metric) { v, ok := toFloat(value) if !ok { metric.RemoveTag(key) - logPrintf("error converting to float [%T]: %v\n", value, value) + p.Log.Errorf("error converting to float [%T]: %v", value, value) continue } @@ -216,18 +225,31 @@ func (p *Converter) convertTags(metric telegraf.Metric) { } } -// convertFields converts fields into tags or other field types +// convertFields converts fields into measurements, tags, or other field types. func (p *Converter) convertFields(metric telegraf.Metric) { if p.fieldConversions == nil { return } for key, value := range metric.Fields() { + if p.fieldConversions.Measurement != nil && p.fieldConversions.Measurement.Match(key) { + v, ok := toString(value) + if !ok { + metric.RemoveField(key) + p.Log.Errorf("error converting to measurement [%T]: %v", value, value) + continue + } + + metric.RemoveField(key) + metric.SetName(v) + continue + } + if p.fieldConversions.Tag != nil && p.fieldConversions.Tag.Match(key) { v, ok := toString(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to tag [%T]: %v\n", value, value) + p.Log.Errorf("error converting to tag [%T]: %v", value, value) continue } @@ -240,7 +262,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toFloat(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to integer [%T]: %v\n", value, value) + p.Log.Errorf("error converting to float [%T]: %v", value, value) continue } @@ -253,7 +275,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toInteger(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to integer [%T]: %v\n", value, value) + p.Log.Errorf("error converting to integer [%T]: %v", value, value) continue } @@ -266,7 +288,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toUnsigned(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to unsigned [%T]: %v\n", value, value) + p.Log.Errorf("error converting to unsigned [%T]: %v", value, value) continue } @@ -279,7 +301,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toBool(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to bool [%T]: %v\n", value, value) + p.Log.Errorf("error converting to bool [%T]: %v", value, value) continue } @@ -292,7 +314,7 @@ func (p *Converter) convertFields(metric telegraf.Metric) { v, ok := toString(value) if !ok { metric.RemoveField(key) - logPrintf("error converting to string [%T]: %v\n", value, value) + p.Log.Errorf("Error converting to string [%T]: %v", value, value) continue } @@ -305,12 +327,12 @@ func (p *Converter) convertFields(metric telegraf.Metric) { func toBool(v interface{}) (bool, bool) { switch value := v.(type) { - case int64, uint64, float64: - if value != 0 { - return true, true - } else { - return false, false - } + case int64: + return value != 0, true + case uint64: + return value != 0, true + case float64: + return value != 0, true case bool: return value, true case string: @@ -336,7 +358,7 @@ func toInteger(v interface{}) (int64, bool) { } else if value > float64(math.MaxInt64) { return math.MaxInt64, true } else { - return int64(value), true + return int64(math.Round(value)), true } case bool: if value { @@ -345,8 +367,16 @@ func toInteger(v interface{}) (int64, bool) { return 0, true } case string: - result, err := strconv.ParseInt(value, 10, 64) - return result, err == nil + result, err := strconv.ParseInt(value, 0, 64) + + if err != nil { + result, err := strconv.ParseFloat(value, 64) + if err != nil { + return 0, false + } + return toInteger(result) + } + return result, true } return 0, false } @@ -367,7 +397,7 @@ func toUnsigned(v interface{}) (uint64, bool) { } else if value > float64(math.MaxUint64) { return math.MaxUint64, true } else { - return uint64(value), true + return uint64(math.Round(value)), true } case bool: if value { @@ -376,8 +406,16 @@ func toUnsigned(v interface{}) (uint64, bool) { return 0, true } case string: - result, err := strconv.ParseUint(value, 10, 64) - return result, err == nil + result, err := strconv.ParseUint(value, 0, 64) + + if err != nil { + result, err := strconv.ParseFloat(value, 64) + if err != nil { + return 0, false + } + return toUnsigned(result) + } + return result, true } return 0, false } @@ -419,10 +457,6 @@ func toString(v interface{}) (string, bool) { return "", false } -func logPrintf(format string, v ...interface{}) { - log.Printf("D! [processors.converter] "+format, v...) -} - func init() { processors.Add("converter", func() telegraf.Processor { return &Converter{} diff --git a/plugins/processors/converter/converter_test.go b/plugins/processors/converter/converter_test.go index 76839760d..efde0bcd9 100644 --- a/plugins/processors/converter/converter_test.go +++ b/plugins/processors/converter/converter_test.go @@ -6,48 +6,17 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" "github.com/stretchr/testify/require" ) -func Metric(v telegraf.Metric, err error) telegraf.Metric { - if err != nil { - panic(err) - } - return v -} - func TestConverter(t *testing.T) { tests := []struct { name string converter *Converter input telegraf.Metric - expected telegraf.Metric + expected []telegraf.Metric }{ - { - name: "empty", - converter: &Converter{}, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), - ), - expected: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "value": 42.0, - }, - time.Unix(0, 0), - ), - ), - }, { name: "from tag", converter: &Converter{ @@ -60,23 +29,21 @@ func TestConverter(t *testing.T) { Tag: []string{"tag"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{ - "float": "42", - "int": "42", - "uint": "42", - "bool": "true", - "string": "howdy", - "tag": "tag", - }, - map[string]interface{}{}, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{ + "float": "42", + "int": "42", + "uint": "42", + "bool": "true", + "string": "howdy", + "tag": "tag", + }, + map[string]interface{}{}, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "tag": "tag", @@ -90,7 +57,7 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, { name: "from tag unconvertible", @@ -102,71 +69,79 @@ func TestConverter(t *testing.T) { Float: []string{"float"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{ - "float": "a", - "int": "b", - "uint": "c", - "bool": "maybe", - }, - map[string]interface{}{}, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{ + "float": "a", + "int": "b", + "uint": "c", + "bool": "maybe", + }, + map[string]interface{}{}, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0), ), - ), + }, }, { name: "from string field", converter: &Converter{ Fields: &Conversion{ String: []string{"a"}, - Integer: []string{"b"}, - Unsigned: []string{"c"}, + Integer: []string{"b", "b1", "b2", "b3"}, + Unsigned: []string{"c", "c1", "c2", "c3"}, Boolean: []string{"d"}, Float: []string{"e"}, Tag: []string{"f"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": "howdy", - "b": "42", - "c": "42", - "d": "true", - "e": "42.0", - "f": "foo", - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": "howdy", + "b": "42", + "b1": "42.2", + "b2": "42.5", + "b3": "0x2A", + "c": "42", + "c1": "42.2", + "c2": "42.5", + "c3": "0x2A", + "d": "true", + "e": "42.0", + "f": "foo", + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "f": "foo", }, map[string]interface{}{ - "a": "howdy", - "b": int64(42), - "c": uint64(42), - "d": true, - "e": 42.0, + "a": "howdy", + "b": int64(42), + "b1": int64(42), + "b2": int64(43), + "b3": int64(42), + "c": uint64(42), + "c1": uint64(42), + "c2": uint64(43), + "c3": uint64(42), + "d": true, + "e": 42.0, }, time.Unix(0, 0), ), - ), + }, }, { name: "from string field unconvertible", @@ -178,27 +153,25 @@ func TestConverter(t *testing.T) { Float: []string{"d"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": "a", - "b": "b", - "c": "c", - "d": "d", - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": "a", + "b": "b", + "c": "c", + "d": "d", + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{}, time.Unix(0, 0), ), - ), + }, }, { name: "from integer field", @@ -207,29 +180,28 @@ func TestConverter(t *testing.T) { String: []string{"a"}, Integer: []string{"b"}, Unsigned: []string{"c", "negative_uint"}, - Boolean: []string{"d"}, + Boolean: []string{"d", "bool_zero"}, Float: []string{"e"}, Tag: []string{"f"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": int64(42), - "b": int64(42), - "c": int64(42), - "d": int64(42), - "e": int64(42), - "f": int64(42), - "negative_uint": int64(-42), - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": int64(42), + "b": int64(42), + "c": int64(42), + "d": int64(42), + "e": int64(42), + "f": int64(42), + "negative_uint": int64(-42), + "bool_zero": int64(0), + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "f": "42", @@ -241,10 +213,11 @@ func TestConverter(t *testing.T) { "d": true, "e": 42.0, "negative_uint": uint64(0), + "bool_zero": false, }, time.Unix(0, 0), ), - ), + }, }, { name: "from unsigned field", @@ -253,29 +226,28 @@ func TestConverter(t *testing.T) { String: []string{"a"}, Integer: []string{"b", "overflow_int"}, Unsigned: []string{"c"}, - Boolean: []string{"d"}, + Boolean: []string{"d", "bool_zero"}, Float: []string{"e"}, Tag: []string{"f"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": uint64(42), - "b": uint64(42), - "c": uint64(42), - "d": uint64(42), - "e": uint64(42), - "f": uint64(42), - "overflow_int": uint64(math.MaxUint64), - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": uint64(42), + "b": uint64(42), + "c": uint64(42), + "d": uint64(42), + "e": uint64(42), + "f": uint64(42), + "overflow_int": uint64(math.MaxUint64), + "bool_zero": uint64(0), + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "f": "42", @@ -287,10 +259,11 @@ func TestConverter(t *testing.T) { "d": true, "e": 42.0, "overflow_int": int64(math.MaxInt64), + "bool_zero": false, }, time.Unix(0, 0), ), - ), + }, }, { name: "out of range for unsigned", @@ -299,19 +272,17 @@ func TestConverter(t *testing.T) { Unsigned: []string{"a", "b"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": int64(-42), - "b": math.MaxFloat64, - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": int64(-42), + "b": math.MaxFloat64, + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{ @@ -320,7 +291,7 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, { name: "boolean field", @@ -334,29 +305,27 @@ func TestConverter(t *testing.T) { Tag: []string{"f", "ff"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": true, - "b": true, - "c": true, - "d": true, - "e": true, - "f": true, - "af": false, - "bf": false, - "cf": false, - "df": false, - "ef": false, - "ff": false, - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": true, + "b": true, + "c": true, + "d": true, + "e": true, + "f": true, + "af": false, + "bf": false, + "cf": false, + "df": false, + "ef": false, + "ff": false, + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "f": "true", @@ -376,7 +345,7 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, { name: "from float field", @@ -385,33 +354,32 @@ func TestConverter(t *testing.T) { String: []string{"a"}, Integer: []string{"b", "too_large_int", "too_small_int"}, Unsigned: []string{"c", "negative_uint", "too_large_uint", "too_small_uint"}, - Boolean: []string{"d"}, + Boolean: []string{"d", "bool_zero"}, Float: []string{"e"}, Tag: []string{"f"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "a": 42.0, - "b": 42.0, - "c": 42.0, - "d": 42.0, - "e": 42.0, - "f": 42.0, - "too_large_int": math.MaxFloat64, - "too_large_uint": math.MaxFloat64, - "too_small_int": -math.MaxFloat64, - "too_small_uint": -math.MaxFloat64, - "negative_uint": -42.0, - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "a": 42.0, + "b": 42.0, + "c": 42.0, + "d": 42.0, + "e": 42.0, + "f": 42.0, + "too_large_int": math.MaxFloat64, + "too_large_uint": math.MaxFloat64, + "too_small_int": -math.MaxFloat64, + "too_small_uint": -math.MaxFloat64, + "negative_uint": -42.0, + "bool_zero": 0.0, + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{ "f": "42", @@ -427,10 +395,11 @@ func TestConverter(t *testing.T) { "too_small_int": int64(math.MinInt64), "too_small_uint": uint64(0), "negative_uint": uint64(0), + "bool_zero": false, }, time.Unix(0, 0), ), - ), + }, }, { name: "globbing", @@ -439,20 +408,18 @@ func TestConverter(t *testing.T) { Integer: []string{"int_*"}, }, }, - input: Metric( - metric.New( - "cpu", - map[string]string{}, - map[string]interface{}{ - "int_a": "1", - "int_b": "2", - "float_a": 1.0, - }, - time.Unix(0, 0), - ), + input: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "int_a": "1", + "int_b": "2", + "float_a": 1.0, + }, + time.Unix(0, 0), ), - expected: Metric( - metric.New( + expected: []telegraf.Metric{ + testutil.MustMetric( "cpu", map[string]string{}, map[string]interface{}{ @@ -462,18 +429,102 @@ func TestConverter(t *testing.T) { }, time.Unix(0, 0), ), - ), + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - metrics := tt.converter.Apply(tt.input) + tt.converter.Log = testutil.Logger{} - require.Equal(t, 1, len(metrics)) - require.Equal(t, tt.expected.Name(), metrics[0].Name()) - require.Equal(t, tt.expected.Tags(), metrics[0].Tags()) - require.Equal(t, tt.expected.Fields(), metrics[0].Fields()) - require.Equal(t, tt.expected.Time(), metrics[0].Time()) + err := tt.converter.Init() + require.NoError(t, err) + actual := tt.converter.Apply(tt.input) + + testutil.RequireMetricsEqual(t, tt.expected, actual) }) } } + +func TestMeasurement(t *testing.T) { + tests := []struct { + name string + converter *Converter + input telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "measurement from tag", + converter: &Converter{ + Tags: &Conversion{ + Measurement: []string{"filepath"}, + }, + }, + input: testutil.MustMetric( + "file", + map[string]string{ + "filepath": "/var/log/syslog", + }, + map[string]interface{}{ + "msg": "howdy", + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "/var/log/syslog", + map[string]string{}, + map[string]interface{}{ + "msg": "howdy", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "measurement from field", + converter: &Converter{ + Fields: &Conversion{ + Measurement: []string{"topic"}, + }, + }, + input: testutil.MustMetric( + "file", + map[string]string{}, + map[string]interface{}{ + "v": 1, + "topic": "telegraf", + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "telegraf", + map[string]string{}, + map[string]interface{}{ + "v": 1, + }, + time.Unix(0, 0), + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.converter.Log = testutil.Logger{} + err := tt.converter.Init() + require.NoError(t, err) + + actual := tt.converter.Apply(tt.input) + + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} + +func TestEmptyConfigInitError(t *testing.T) { + converter := &Converter{ + Log: testutil.Logger{}, + } + err := converter.Init() + require.Error(t, err) +} diff --git a/plugins/processors/date/README.md b/plugins/processors/date/README.md new file mode 100644 index 000000000..9a093fe0e --- /dev/null +++ b/plugins/processors/date/README.md @@ -0,0 +1,56 @@ +# Date Processor Plugin + +Use the `date` processor to add the metric timestamp as a human readable tag. + +A common use is to add a tag that can be used to group by month or year. + +A few example usecases include: +1) consumption data for utilities on per month basis +2) bandwidth capacity per month +3) compare energy production or sales on a yearly or monthly basis + +### Configuration + +```toml +[[processors.date]] + ## New tag to create + tag_key = "month" + + ## New field to create (cannot set both field_key and tag_key) + # field_key = "month" + + ## Date format string, must be a representation of the Go "reference time" + ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". + date_format = "Jan" + + ## If destination is a field, date format can also be one of + ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. + # date_format = "unix" + + ## Offset duration added to the date string when writing the new tag. + # date_offset = "0s" + + ## Timezone to use when creating the tag or field using a reference time + ## string. This can be set to one of "UTC", "Local", or to a location name + ## in the IANA Time Zone database. + ## example: timezone = "America/Los_Angeles" + # timezone = "UTC" +``` + +#### timezone + +On Windows, only the `Local` and `UTC` zones are available by default. To use +other timezones, set the `ZONEINFO` environment variable to the location of +[`zoneinfo.zip`][zoneinfo]: +``` +set ZONEINFO=C:\zoneinfo.zip +``` + +### Example + +```diff +- throughput lower=10i,upper=1000i,mean=500i 1560540094000000000 ++ throughput,month=Jun lower=10i,upper=1000i,mean=500i 1560540094000000000 +``` + +[zoneinfo]: https://github.com/golang/go/raw/50bd1c4d4eb4fac8ddeb5f063c099daccfb71b26/lib/time/zoneinfo.zip diff --git a/plugins/processors/date/date.go b/plugins/processors/date/date.go new file mode 100644 index 000000000..ef8609811 --- /dev/null +++ b/plugins/processors/date/date.go @@ -0,0 +1,101 @@ +package date + +import ( + "errors" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/processors" +) + +const sampleConfig = ` + ## New tag to create + tag_key = "month" + + ## New field to create (cannot set both field_key and tag_key) + # field_key = "month" + + ## Date format string, must be a representation of the Go "reference time" + ## which is "Mon Jan 2 15:04:05 -0700 MST 2006". + date_format = "Jan" + + ## If destination is a field, date format can also be one of + ## "unix", "unix_ms", "unix_us", or "unix_ns", which will insert an integer field. + # date_format = "unix" + + ## Offset duration added to the date string when writing the new tag. + # date_offset = "0s" + + ## Timezone to use when creating the tag or field using a reference time + ## string. This can be set to one of "UTC", "Local", or to a location name + ## in the IANA Time Zone database. + ## example: timezone = "America/Los_Angeles" + # timezone = "UTC" +` + +const defaultTimezone = "UTC" + +type Date struct { + TagKey string `toml:"tag_key"` + FieldKey string `toml:"field_key"` + DateFormat string `toml:"date_format"` + DateOffset internal.Duration `toml:"date_offset"` + Timezone string `toml:"timezone"` + + location *time.Location +} + +func (d *Date) SampleConfig() string { + return sampleConfig +} + +func (d *Date) Description() string { + return "Dates measurements, tags, and fields that pass through this filter." +} + +func (d *Date) Init() error { + // Check either TagKey or FieldKey specified + if len(d.FieldKey) > 0 && len(d.TagKey) > 0 { + return errors.New("Only one of field_key or tag_key can be specified") + } else if len(d.FieldKey) == 0 && len(d.TagKey) == 0 { + return errors.New("One of field_key or tag_key must be specified") + } + + var err error + // LoadLocation returns UTC if timezone is the empty string. + d.location, err = time.LoadLocation(d.Timezone) + return err +} + +func (d *Date) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, point := range in { + tm := point.Time().In(d.location).Add(d.DateOffset.Duration) + if len(d.TagKey) > 0 { + point.AddTag(d.TagKey, tm.Format(d.DateFormat)) + } else if len(d.FieldKey) > 0 { + switch d.DateFormat { + case "unix": + point.AddField(d.FieldKey, tm.Unix()) + case "unix_ms": + point.AddField(d.FieldKey, tm.UnixNano()/1000000) + case "unix_us": + point.AddField(d.FieldKey, tm.UnixNano()/1000) + case "unix_ns": + point.AddField(d.FieldKey, tm.UnixNano()) + default: + point.AddField(d.FieldKey, tm.Format(d.DateFormat)) + } + } + } + + return in +} + +func init() { + processors.Add("date", func() telegraf.Processor { + return &Date{ + Timezone: defaultTimezone, + } + }) +} diff --git a/plugins/processors/date/date_test.go b/plugins/processors/date/date_test.go new file mode 100644 index 000000000..42e094c93 --- /dev/null +++ b/plugins/processors/date/date_test.go @@ -0,0 +1,199 @@ +package date + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func MustMetric(name string, tags map[string]string, fields map[string]interface{}, metricTime time.Time) telegraf.Metric { + if tags == nil { + tags = map[string]string{} + } + if fields == nil { + fields = map[string]interface{}{} + } + m, _ := metric.New(name, tags, fields, metricTime) + return m +} + +func TestTagAndField(t *testing.T) { + dateFormatTagAndField := Date{ + TagKey: "month", + FieldKey: "month", + } + err := dateFormatTagAndField.Init() + require.Error(t, err) + +} + +func TestNoOutputSpecified(t *testing.T) { + dateFormatNoOutput := Date{} + err := dateFormatNoOutput.Init() + require.Error(t, err) +} + +func TestMonthTag(t *testing.T) { + dateFormatMonth := Date{ + TagKey: "month", + DateFormat: "Jan", + } + err := dateFormatMonth.Init() + require.NoError(t, err) + + currentTime := time.Now() + month := currentTime.Format("Jan") + + m1 := MustMetric("foo", nil, nil, currentTime) + m2 := MustMetric("bar", nil, nil, currentTime) + m3 := MustMetric("baz", nil, nil, currentTime) + monthApply := dateFormatMonth.Apply(m1, m2, m3) + assert.Equal(t, map[string]string{"month": month}, monthApply[0].Tags(), "should add tag 'month'") + assert.Equal(t, map[string]string{"month": month}, monthApply[1].Tags(), "should add tag 'month'") + assert.Equal(t, map[string]string{"month": month}, monthApply[2].Tags(), "should add tag 'month'") +} + +func TestMonthField(t *testing.T) { + dateFormatMonth := Date{ + FieldKey: "month", + DateFormat: "Jan", + } + + err := dateFormatMonth.Init() + require.NoError(t, err) + + currentTime := time.Now() + month := currentTime.Format("Jan") + + m1 := MustMetric("foo", nil, nil, currentTime) + m2 := MustMetric("bar", nil, nil, currentTime) + m3 := MustMetric("baz", nil, nil, currentTime) + monthApply := dateFormatMonth.Apply(m1, m2, m3) + assert.Equal(t, map[string]interface{}{"month": month}, monthApply[0].Fields(), "should add field 'month'") + assert.Equal(t, map[string]interface{}{"month": month}, monthApply[1].Fields(), "should add field 'month'") + assert.Equal(t, map[string]interface{}{"month": month}, monthApply[2].Fields(), "should add field 'month'") +} + +func TestOldDateTag(t *testing.T) { + dateFormatYear := Date{ + TagKey: "year", + DateFormat: "2006", + } + + err := dateFormatYear.Init() + require.NoError(t, err) + + m7 := MustMetric("foo", nil, nil, time.Date(1993, 05, 27, 0, 0, 0, 0, time.UTC)) + customDateApply := dateFormatYear.Apply(m7) + assert.Equal(t, map[string]string{"year": "1993"}, customDateApply[0].Tags(), "should add tag 'year'") +} + +func TestFieldUnix(t *testing.T) { + dateFormatUnix := Date{ + FieldKey: "unix", + DateFormat: "unix", + } + + err := dateFormatUnix.Init() + require.NoError(t, err) + + currentTime := time.Now() + unixTime := currentTime.Unix() + + m8 := MustMetric("foo", nil, nil, currentTime) + unixApply := dateFormatUnix.Apply(m8) + assert.Equal(t, map[string]interface{}{"unix": unixTime}, unixApply[0].Fields(), "should add unix time in s as field 'unix'") +} + +func TestFieldUnixNano(t *testing.T) { + dateFormatUnixNano := Date{ + FieldKey: "unix_ns", + DateFormat: "unix_ns", + } + + err := dateFormatUnixNano.Init() + require.NoError(t, err) + + currentTime := time.Now() + unixNanoTime := currentTime.UnixNano() + + m9 := MustMetric("foo", nil, nil, currentTime) + unixNanoApply := dateFormatUnixNano.Apply(m9) + assert.Equal(t, map[string]interface{}{"unix_ns": unixNanoTime}, unixNanoApply[0].Fields(), "should add unix time in ns as field 'unix_ns'") +} + +func TestFieldUnixMillis(t *testing.T) { + dateFormatUnixMillis := Date{ + FieldKey: "unix_ms", + DateFormat: "unix_ms", + } + + err := dateFormatUnixMillis.Init() + require.NoError(t, err) + + currentTime := time.Now() + unixMillisTime := currentTime.UnixNano() / 1000000 + + m10 := MustMetric("foo", nil, nil, currentTime) + unixMillisApply := dateFormatUnixMillis.Apply(m10) + assert.Equal(t, map[string]interface{}{"unix_ms": unixMillisTime}, unixMillisApply[0].Fields(), "should add unix time in ms as field 'unix_ms'") +} + +func TestFieldUnixMicros(t *testing.T) { + dateFormatUnixMicros := Date{ + FieldKey: "unix_us", + DateFormat: "unix_us", + } + + err := dateFormatUnixMicros.Init() + require.NoError(t, err) + + currentTime := time.Now() + unixMicrosTime := currentTime.UnixNano() / 1000 + + m11 := MustMetric("foo", nil, nil, currentTime) + unixMicrosApply := dateFormatUnixMicros.Apply(m11) + assert.Equal(t, map[string]interface{}{"unix_us": unixMicrosTime}, unixMicrosApply[0].Fields(), "should add unix time in us as field 'unix_us'") +} + +func TestDateOffset(t *testing.T) { + plugin := &Date{ + TagKey: "hour", + DateFormat: "15", + DateOffset: internal.Duration{Duration: 2 * time.Hour}, + } + + err := plugin.Init() + require.NoError(t, err) + + metric := testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(1578603600, 0), + ) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "hour": "23", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(1578603600, 0), + ), + } + + actual := plugin.Apply(metric) + testutil.RequireMetricsEqual(t, expected, actual) +} diff --git a/plugins/processors/dedup/README.md b/plugins/processors/dedup/README.md new file mode 100644 index 000000000..d0b516c27 --- /dev/null +++ b/plugins/processors/dedup/README.md @@ -0,0 +1,24 @@ +# Dedup Processor Plugin + +Filter metrics whose field values are exact repetitions of the previous values. + +### Configuration + +```toml +[[processors.dedup]] + ## Maximum time to suppress output + dedup_interval = "600s" +``` + +### Example + +```diff +- cpu,cpu=cpu0 time_idle=42i,time_guest=1i +- cpu,cpu=cpu0 time_idle=42i,time_guest=2i +- cpu,cpu=cpu0 time_idle=42i,time_guest=2i +- cpu,cpu=cpu0 time_idle=44i,time_guest=2i +- cpu,cpu=cpu0 time_idle=44i,time_guest=2i ++ cpu,cpu=cpu0 time_idle=42i,time_guest=1i ++ cpu,cpu=cpu0 time_idle=42i,time_guest=2i ++ cpu,cpu=cpu0 time_idle=44i,time_guest=2i +``` diff --git a/plugins/processors/dedup/dedup.go b/plugins/processors/dedup/dedup.go new file mode 100644 index 000000000..3dd7516a6 --- /dev/null +++ b/plugins/processors/dedup/dedup.go @@ -0,0 +1,128 @@ +package dedup + +import ( + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/plugins/processors" +) + +var sampleConfig = ` + ## Maximum time to suppress output + dedup_interval = "600s" +` + +type Dedup struct { + DedupInterval internal.Duration `toml:"dedup_interval"` + FlushTime time.Time + Cache map[uint64]telegraf.Metric +} + +func (d *Dedup) SampleConfig() string { + return sampleConfig +} + +func (d *Dedup) Description() string { + return "Filter metrics with repeating field values" +} + +// Remove single item from slice +func remove(slice []telegraf.Metric, i int) []telegraf.Metric { + slice[len(slice)-1], slice[i] = slice[i], slice[len(slice)-1] + return slice[:len(slice)-1] +} + +// Remove expired items from cache +func (d *Dedup) cleanup() { + // No need to cleanup cache too often. Lets save some CPU + if time.Since(d.FlushTime) < d.DedupInterval.Duration { + return + } + d.FlushTime = time.Now() + keep := make(map[uint64]telegraf.Metric, 0) + for id, metric := range d.Cache { + if time.Since(metric.Time()) < d.DedupInterval.Duration { + keep[id] = metric + } + } + d.Cache = keep +} + +// Save item to cache +func (d *Dedup) save(metric telegraf.Metric, id uint64) { + d.Cache[id] = metric.Copy() + d.Cache[id].Accept() +} + +// main processing method +func (d *Dedup) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + for idx, metric := range metrics { + id := metric.HashID() + m, ok := d.Cache[id] + + // If not in cache then just save it + if !ok { + d.save(metric, id) + continue + } + + // If cache item has expired then refresh it + if time.Since(m.Time()) >= d.DedupInterval.Duration { + d.save(metric, id) + continue + } + + // For each field compare value with the cached one + changed := false + added := false + sametime := metric.Time() == m.Time() + for _, f := range metric.FieldList() { + if value, ok := m.GetField(f.Key); ok { + if value != f.Value { + changed = true + break + } + } else if sametime { + // This field isn't in the cached metric but it's the + // same series and timestamp. Merge it into the cached + // metric. + + // Metrics have a ValueType that applies to all values + // in the metric. If an input needs to produce values + // with different ValueTypes but the same timestamp, + // they have to produce multiple metrics. (See the + // system input for an example.) In this case, dedup + // ignores the ValueTypes of the metrics and merges + // the fields into one metric for the dup check. + + m.AddField(f.Key, f.Value) + added = true + } + } + // If any field value has changed then refresh the cache + if changed { + d.save(metric, id) + continue + } + + if sametime && added { + continue + } + + // In any other case remove metric from the output + metrics = remove(metrics, idx) + } + d.cleanup() + return metrics +} + +func init() { + processors.Add("dedup", func() telegraf.Processor { + return &Dedup{ + DedupInterval: internal.Duration{Duration: 10 * time.Minute}, + FlushTime: time.Now(), + Cache: make(map[uint64]telegraf.Metric), + } + }) +} diff --git a/plugins/processors/dedup/dedup_test.go b/plugins/processors/dedup/dedup_test.go new file mode 100644 index 000000000..cae2bf1a5 --- /dev/null +++ b/plugins/processors/dedup/dedup_test.go @@ -0,0 +1,194 @@ +package dedup + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" +) + +func createMetric(name string, value int64, when time.Time) telegraf.Metric { + m, _ := metric.New(name, + map[string]string{"tag": "tag_value"}, + map[string]interface{}{"value": value}, + when, + ) + return m +} + +func createDedup(initTime time.Time) Dedup { + return Dedup{ + DedupInterval: internal.Duration{Duration: 10 * time.Minute}, + FlushTime: initTime, + Cache: make(map[uint64]telegraf.Metric), + } +} + +func assertCacheRefresh(t *testing.T, proc *Dedup, item telegraf.Metric) { + id := item.HashID() + name := item.Name() + // cache is not empty + require.NotEqual(t, 0, len(proc.Cache)) + // cache has metric with proper id + cache, present := proc.Cache[id] + require.True(t, present) + // cache has metric with proper name + require.Equal(t, name, cache.Name()) + // cached metric has proper field + cValue, present := cache.GetField("value") + require.True(t, present) + iValue, _ := item.GetField("value") + require.Equal(t, cValue, iValue) + // cached metric has proper timestamp + require.Equal(t, cache.Time(), item.Time()) +} + +func assertCacheHit(t *testing.T, proc *Dedup, item telegraf.Metric) { + id := item.HashID() + name := item.Name() + // cache is not empty + require.NotEqual(t, 0, len(proc.Cache)) + // cache has metric with proper id + cache, present := proc.Cache[id] + require.True(t, present) + // cache has metric with proper name + require.Equal(t, name, cache.Name()) + // cached metric has proper field + cValue, present := cache.GetField("value") + require.True(t, present) + iValue, _ := item.GetField("value") + require.Equal(t, cValue, iValue) + // cached metric did NOT change timestamp + require.NotEqual(t, cache.Time(), item.Time()) +} + +func assertMetricPassed(t *testing.T, target []telegraf.Metric, source telegraf.Metric) { + // target is not empty + require.NotEqual(t, 0, len(target)) + // target has metric with proper name + require.Equal(t, "m1", target[0].Name()) + // target metric has proper field + tValue, present := target[0].GetField("value") + require.True(t, present) + sValue, present := source.GetField("value") + require.Equal(t, tValue, sValue) + // target metric has proper timestamp + require.Equal(t, target[0].Time(), source.Time()) +} + +func assertMetricSuppressed(t *testing.T, target []telegraf.Metric, source telegraf.Metric) { + // target is empty + require.Equal(t, 0, len(target)) +} + +func TestProcRetainsMetric(t *testing.T) { + deduplicate := createDedup(time.Now()) + source := createMetric("m1", 1, time.Now()) + target := deduplicate.Apply(source) + + assertCacheRefresh(t, &deduplicate, source) + assertMetricPassed(t, target, source) +} + +func TestSuppressRepeatedValue(t *testing.T) { + deduplicate := createDedup(time.Now()) + // Create metric in the past + source := createMetric("m1", 1, time.Now().Add(-1*time.Second)) + target := deduplicate.Apply(source) + source = createMetric("m1", 1, time.Now()) + target = deduplicate.Apply(source) + + assertCacheHit(t, &deduplicate, source) + assertMetricSuppressed(t, target, source) +} + +func TestPassUpdatedValue(t *testing.T) { + deduplicate := createDedup(time.Now()) + // Create metric in the past + source := createMetric("m1", 1, time.Now().Add(-1*time.Second)) + target := deduplicate.Apply(source) + source = createMetric("m1", 2, time.Now()) + target = deduplicate.Apply(source) + + assertCacheRefresh(t, &deduplicate, source) + assertMetricPassed(t, target, source) +} + +func TestPassAfterCacheExpire(t *testing.T) { + deduplicate := createDedup(time.Now()) + // Create metric in the past + source := createMetric("m1", 1, time.Now().Add(-1*time.Hour)) + target := deduplicate.Apply(source) + source = createMetric("m1", 1, time.Now()) + target = deduplicate.Apply(source) + + assertCacheRefresh(t, &deduplicate, source) + assertMetricPassed(t, target, source) +} + +func TestCacheRetainsMetrics(t *testing.T) { + deduplicate := createDedup(time.Now()) + // Create metric in the past 3sec + source := createMetric("m1", 1, time.Now().Add(-3*time.Hour)) + deduplicate.Apply(source) + // Create metric in the past 2sec + source = createMetric("m1", 1, time.Now().Add(-2*time.Hour)) + deduplicate.Apply(source) + source = createMetric("m1", 1, time.Now()) + deduplicate.Apply(source) + + assertCacheRefresh(t, &deduplicate, source) +} + +func TestCacheShrink(t *testing.T) { + // Time offset is more than 2 * DedupInterval + deduplicate := createDedup(time.Now().Add(-2 * time.Hour)) + // Time offset is more than 1 * DedupInterval + source := createMetric("m1", 1, time.Now().Add(-1*time.Hour)) + deduplicate.Apply(source) + + require.Equal(t, 0, len(deduplicate.Cache)) +} + +func TestSameTimestamp(t *testing.T) { + now := time.Now() + dedup := createDedup(now) + var in telegraf.Metric + var out []telegraf.Metric + + in, _ = metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"foo": 1}, // field + now, + ) + out = dedup.Apply(in) + require.Equal(t, []telegraf.Metric{in}, out) // pass + + in, _ = metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"bar": 1}, // different field + now, + ) + out = dedup.Apply(in) + require.Equal(t, []telegraf.Metric{in}, out) // pass + + in, _ = metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"bar": 2}, // same field different value + now, + ) + out = dedup.Apply(in) + require.Equal(t, []telegraf.Metric{in}, out) // pass + + in, _ = metric.New("metric", + map[string]string{"tag": "value"}, + map[string]interface{}{"bar": 2}, // same field same value + now, + ) + out = dedup.Apply(in) + require.Equal(t, []telegraf.Metric{}, out) // drop +} diff --git a/plugins/processors/defaults/README.md b/plugins/processors/defaults/README.md new file mode 100644 index 000000000..638a3dac7 --- /dev/null +++ b/plugins/processors/defaults/README.md @@ -0,0 +1,42 @@ +# Defaults Processor + +The *Defaults* processor allows you to ensure certain fields will always exist with a specified default value on your metric(s). + +There are three cases where this processor will insert a configured default field. + +1. The field is nil on the incoming metric +1. The field is not nil, but its value is an empty string. +1. The field is not nil, but its value is a string of one or more empty spaces. + +### Configuration +```toml +## Set default fields on your metric(s) when they are nil or empty +[[processors.defaults]] + +## This table determines what fields will be inserted in your metric(s) + [processors.defaults.fields] + field_1 = "bar" + time_idle = 0 + is_error = true +``` + +### Example +Ensure a _status\_code_ field with _N/A_ is inserted in the metric when one it's not set in the metric be default: + +```toml +[[processors.defaults]] + [processors.defaults.fields] + status_code = "N/A" +``` + +```diff +- lb,http_method=GET cache_status=HIT,latency=230 ++ lb,http_method=GET cache_status=HIT,latency=230,status_code="N/A" +``` + +Ensure an empty string gets replaced by a default: + +```diff +- lb,http_method=GET cache_status=HIT,latency=230,status_code="" ++ lb,http_method=GET cache_status=HIT,latency=230,status_code="N/A" +``` diff --git a/plugins/processors/defaults/defaults.go b/plugins/processors/defaults/defaults.go new file mode 100644 index 000000000..eaffdf81a --- /dev/null +++ b/plugins/processors/defaults/defaults.go @@ -0,0 +1,72 @@ +package defaults + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" + "strings" +) + +const sampleConfig = ` + ## Ensures a set of fields always exists on your metric(s) with their + ## respective default value. + ## For any given field pair (key = default), if it's not set, a field + ## is set on the metric with the specified default. + ## + ## A field is considered not set if it is nil on the incoming metric; + ## or it is not nil but its value is an empty string or is a string + ## of one or more spaces. + ## = + # [processors.defaults.fields] + # field_1 = "bar" + # time_idle = 0 + # is_error = true +` + +// Defaults is a processor for ensuring certain fields always exist +// on your Metrics with at least a default value. +type Defaults struct { + DefaultFieldsSets map[string]interface{} `toml:"fields"` +} + +// SampleConfig represents a sample toml config for this plugin. +func (def *Defaults) SampleConfig() string { + return sampleConfig +} + +// Description is a brief description of this processor plugin's behaviour. +func (def *Defaults) Description() string { + return "Defaults sets default value(s) for specified fields that are not set on incoming metrics." +} + +// Apply contains the main implementation of this processor. +// For each metric in 'inputMetrics', it goes over each default pair. +// If the field in the pair does not exist on the metric, the associated default is added. +// If the field was found, then, if its value is the empty string or one or more spaces, it is replaced +// by the associated default. +func (def *Defaults) Apply(inputMetrics ...telegraf.Metric) []telegraf.Metric { + for _, metric := range inputMetrics { + for defField, defValue := range def.DefaultFieldsSets { + if maybeCurrent, isSet := metric.GetField(defField); !isSet { + metric.AddField(defField, defValue) + } else if trimmed, isStr := maybeTrimmedString(maybeCurrent); isStr && trimmed == "" { + metric.RemoveField(defField) + metric.AddField(defField, defValue) + } + } + } + return inputMetrics +} + +func maybeTrimmedString(v interface{}) (string, bool) { + switch value := v.(type) { + case string: + return strings.TrimSpace(value), true + } + return "", false +} + +func init() { + processors.Add("defaults", func() telegraf.Processor { + return &Defaults{} + }) +} diff --git a/plugins/processors/defaults/defaults_test.go b/plugins/processors/defaults/defaults_test.go new file mode 100644 index 000000000..c0e930fc6 --- /dev/null +++ b/plugins/processors/defaults/defaults_test.go @@ -0,0 +1,131 @@ +package defaults + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" +) + +func TestDefaults(t *testing.T) { + scenarios := []struct { + name string + defaults *Defaults + input telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "Test that no values are changed since they are not nil or empty", + defaults: &Defaults{ + DefaultFieldsSets: map[string]interface{}{ + "usage": 30, + "wind_feel": "very chill", + "is_dead": true, + }, + }, + input: testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "usage": 45, + "wind_feel": "a dragon's breath", + "is_dead": false, + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "usage": 45, + "wind_feel": "a dragon's breath", + "is_dead": false, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "Tests that the missing fields are set on the metric", + defaults: &Defaults{ + DefaultFieldsSets: map[string]interface{}{ + "max_clock_gz": 6, + "wind_feel": "Unknown", + "boost_enabled": false, + "variance": 1.2, + }, + }, + input: testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "usage": 45, + "temperature": 64, + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "usage": 45, + "temperature": 64, + "max_clock_gz": 6, + "wind_feel": "Unknown", + "boost_enabled": false, + "variance": 1.2, + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "Tests that set but empty fields are replaced by specified defaults", + defaults: &Defaults{ + DefaultFieldsSets: map[string]interface{}{ + "max_clock_gz": 6, + "wind_feel": "Unknown", + "fan_loudness": "Inaudible", + "boost_enabled": false, + }, + }, + input: testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "max_clock_gz": "", + "wind_feel": " ", + "fan_loudness": " ", + }, + time.Unix(0, 0), + ), + expected: []telegraf.Metric{ + testutil.MustMetric( + "CPU metrics", + map[string]string{}, + map[string]interface{}{ + "max_clock_gz": 6, + "wind_feel": "Unknown", + "fan_loudness": "Inaudible", + "boost_enabled": false, + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + defaults := scenario.defaults + + resultMetrics := defaults.Apply(scenario.input) + assert.Len(t, resultMetrics, 1) + testutil.RequireMetricsEqual(t, scenario.expected, resultMetrics) + }) + } +} diff --git a/plugins/processors/enum/README.md b/plugins/processors/enum/README.md index f0ed58566..72a055625 100644 --- a/plugins/processors/enum/README.md +++ b/plugins/processors/enum/README.md @@ -1,33 +1,50 @@ # Enum Processor Plugin -The Enum Processor allows the configuration of value mappings for metric fields. +The Enum Processor allows the configuration of value mappings for metric tags or fields. The main use-case for this is to rewrite status codes such as _red_, _amber_ and -_green_ by numeric values such as 0, 1, 2. The plugin supports string and bool -types for the field values. Multiple Fields can be configured with separate -value mappings for each field. Default mapping values can be configured to be +_green_ by numeric values such as 0, 1, 2. The plugin supports string, int and bool +types for the field values. Multiple tags or fields can be configured with separate +value mappings for each. Default mapping values can be configured to be used for all values, which are not contained in the value_mappings. The -processor supports explicit configuration of a destination field. By default the -source field is overwritten. +processor supports explicit configuration of a destination tag or field. By default the +source tag or field is overwritten. ### Configuration: ```toml [[processors.enum]] - [[processors.enum.fields]] + [[processors.enum.mapping]] ## Name of the field to map - source = "name" + field = "status" - ## Destination field to be used for the mapped value. By default the source - ## field is used, overwriting the original value. - # destination = "mapped" + ## Name of the tag to map + # tag = "status" + + ## Destination tag or field to be used for the mapped value. By default the + ## source tag or field is used, overwriting the original value. + dest = "status_code" ## Default value to be used for all values not contained in the mapping - ## table. When unset, the unmodified value for the field will be used if no - ## match is found. + ## table. When unset and no match is found, the original field will remain + ## unmodified and the destination tag or field will not be created. # default = 0 ## Table of mappings - [processors.enum.fields.value_mappings] - value1 = 1 - value2 = 2 + [processors.enum.mapping.value_mappings] + green = 1 + amber = 2 + red = 3 +``` + +### Example: + +```diff +- xyzzy status="green" 1502489900000000000 ++ xyzzy status="green",status_code=1i 1502489900000000000 +``` + +With unknown value and no default set: +```diff +- xyzzy status="black" 1502489900000000000 ++ xyzzy status="black" 1502489900000000000 ``` diff --git a/plugins/processors/enum/enum.go b/plugins/processors/enum/enum.go index 134a02bb1..a96e7d509 100644 --- a/plugins/processors/enum/enum.go +++ b/plugins/processors/enum/enum.go @@ -1,6 +1,7 @@ package enum import ( + "fmt" "strconv" "github.com/influxdata/telegraf" @@ -8,13 +9,16 @@ import ( ) var sampleConfig = ` - [[processors.enum.fields]] + [[processors.enum.mapping]] ## Name of the field to map - source = "name" + field = "status" - ## Destination field to be used for the mapped value. By default the source - ## field is used, overwriting the original value. - # destination = "mapped" + ## Name of the tag to map + # tag = "status" + + ## Destination tag or field to be used for the mapped value. By default the + ## source tag or field is used, overwriting the original value. + dest = "status_code" ## Default value to be used for all values not contained in the mapping ## table. When unset, the unmodified value for the field will be used if no @@ -22,18 +26,20 @@ var sampleConfig = ` # default = 0 ## Table of mappings - [processors.enum.fields.value_mappings] - value1 = 1 - value2 = 2 + [processors.enum.mapping.value_mappings] + green = 1 + amber = 2 + red = 3 ` type EnumMapper struct { - Fields []Mapping + Mappings []Mapping `toml:"mapping"` } type Mapping struct { - Source string - Destination string + Tag string + Field string + Dest string Default interface{} ValueMappings map[string]interface{} } @@ -54,11 +60,25 @@ func (mapper *EnumMapper) Apply(in ...telegraf.Metric) []telegraf.Metric { } func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric { - for _, mapping := range mapper.Fields { - if originalValue, isPresent := metric.GetField(mapping.Source); isPresent == true { - if adjustedValue, isString := adjustBoolValue(originalValue).(string); isString == true { - if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent == true { - writeField(metric, mapping.getDestination(), mappedValue) + for _, mapping := range mapper.Mappings { + if mapping.Field != "" { + if originalValue, isPresent := metric.GetField(mapping.Field); isPresent { + if adjustedValue, isString := adjustValue(originalValue).(string); isString { + if mappedValue, isMappedValuePresent := mapping.mapValue(adjustedValue); isMappedValuePresent { + writeField(metric, mapping.getDestination(), mappedValue) + } + } + } + } + if mapping.Tag != "" { + if originalValue, isPresent := metric.GetTag(mapping.Tag); isPresent { + if mappedValue, isMappedValuePresent := mapping.mapValue(originalValue); isMappedValuePresent { + switch val := mappedValue.(type) { + case string: + writeTag(metric, mapping.getDestinationTag(), val) + default: + writeTag(metric, mapping.getDestinationTag(), fmt.Sprintf("%v", val)) + } } } } @@ -66,11 +86,17 @@ func (mapper *EnumMapper) applyMappings(metric telegraf.Metric) telegraf.Metric return metric } -func adjustBoolValue(in interface{}) interface{} { - if mappedBool, isBool := in.(bool); isBool == true { - return strconv.FormatBool(mappedBool) +func adjustValue(in interface{}) interface{} { + switch val := in.(type) { + case bool: + return strconv.FormatBool(val) + case int64: + return strconv.FormatInt(val, 10) + case uint64: + return strconv.FormatUint(val, 10) + default: + return in } - return in } func (mapping *Mapping) mapValue(original string) (interface{}, bool) { @@ -84,19 +110,29 @@ func (mapping *Mapping) mapValue(original string) (interface{}, bool) { } func (mapping *Mapping) getDestination() string { - if mapping.Destination != "" { - return mapping.Destination + if mapping.Dest != "" { + return mapping.Dest } - return mapping.Source + return mapping.Field +} + +func (mapping *Mapping) getDestinationTag() string { + if mapping.Dest != "" { + return mapping.Dest + } + return mapping.Tag } func writeField(metric telegraf.Metric, name string, value interface{}) { - if metric.HasField(name) { - metric.RemoveField(name) - } + metric.RemoveField(name) metric.AddField(name, value) } +func writeTag(metric telegraf.Metric, name string, value string) { + metric.RemoveTag(name) + metric.AddTag(name, value) +} + func init() { processors.Add("enum", func() telegraf.Processor { return &EnumMapper{} diff --git a/plugins/processors/enum/enum_test.go b/plugins/processors/enum/enum_test.go index 2185b91b6..de13aad15 100644 --- a/plugins/processors/enum/enum_test.go +++ b/plugins/processors/enum/enum_test.go @@ -14,7 +14,9 @@ func createTestMetric() telegraf.Metric { map[string]string{"tag": "tag_value"}, map[string]interface{}{ "string_value": "test", - "int_value": int(13), + "int_value": int(200), + "uint_value": uint(500), + "float_value": float64(3.14), "true_value": true, }, time.Now(), @@ -27,12 +29,23 @@ func calculateProcessedValues(mapper EnumMapper, metric telegraf.Metric) map[str return processed[0].Fields() } +func calculateProcessedTags(mapper EnumMapper, metric telegraf.Metric) map[string]string { + processed := mapper.Apply(metric) + return processed[0].Tags() +} + func assertFieldValue(t *testing.T, expected interface{}, field string, fields map[string]interface{}) { value, present := fields[field] assert.True(t, present, "value of field '"+field+"' was not present") assert.EqualValues(t, expected, value) } +func assertTagValue(t *testing.T, expected interface{}, tag string, tags map[string]string) { + value, present := tags[tag] + assert.True(t, present, "value of tag '"+tag+"' was not present") + assert.EqualValues(t, expected, value) +} + func TestRetainsMetric(t *testing.T) { mapper := EnumMapper{} source := createTestMetric() @@ -41,39 +54,70 @@ func TestRetainsMetric(t *testing.T) { fields := target.Fields() assertFieldValue(t, "test", "string_value", fields) - assertFieldValue(t, 13, "int_value", fields) + assertFieldValue(t, 200, "int_value", fields) + assertFieldValue(t, 500, "uint_value", fields) assertFieldValue(t, true, "true_value", fields) assert.Equal(t, "m1", target.Name()) assert.Equal(t, source.Tags(), target.Tags()) assert.Equal(t, source.Time(), target.Time()) } -func TestMapsSingleStringValue(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "string_value", ValueMappings: map[string]interface{}{"test": int64(1)}}}} +func TestMapsSingleStringValueTag(t *testing.T) { + mapper := EnumMapper{Mappings: []Mapping{{Tag: "tag", ValueMappings: map[string]interface{}{"tag_value": "valuable"}}}} - fields := calculateProcessedValues(mapper, createTestMetric()) + tags := calculateProcessedTags(mapper, createTestMetric()) - assertFieldValue(t, 1, "string_value", fields) + assertTagValue(t, "valuable", "tag", tags) } -func TestNoFailureOnMappingsOnNonStringValuedFields(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "int_value", ValueMappings: map[string]interface{}{"13i": int64(7)}}}} +func TestNoFailureOnMappingsOnNonSupportedValuedFields(t *testing.T) { + mapper := EnumMapper{Mappings: []Mapping{{Field: "float_value", ValueMappings: map[string]interface{}{"3.14": "pi"}}}} fields := calculateProcessedValues(mapper, createTestMetric()) - assertFieldValue(t, 13, "int_value", fields) + assertFieldValue(t, float64(3.14), "float_value", fields) } -func TestMapSingleBoolValue(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "true_value", ValueMappings: map[string]interface{}{"true": int64(1)}}}} +func TestMappings(t *testing.T) { + mappings := []map[string][]interface{}{ + { + "field_name": []interface{}{"string_value"}, + "target_values": []interface{}{"test", "test", "test", "not_test", "50", "true"}, + "mapped_values": []interface{}{"test_1", 5, true, "test_1", 10, false}, + "expected_values": []interface{}{"test_1", 5, true, "test", "test", "test"}, + }, + { + "field_name": []interface{}{"true_value"}, + "target_value": []interface{}{"true", "true", "true", "false", "test", "5"}, + "mapped_value": []interface{}{false, 1, "false", false, false, false}, + "expected_value": []interface{}{false, 1, "false", true, true, true}, + }, + { + "field_name": []interface{}{"int_value"}, + "target_value": []interface{}{"200", "200", "200", "200", "test", "5"}, + "mapped_value": []interface{}{"http_ok", true, 1, float64(200.001), false, false}, + "expected_value": []interface{}{"http_ok", true, 1, float64(200.001), 200, 200}, + }, + { + "field_name": []interface{}{"uint_value"}, + "target_value": []interface{}{"500", "500", "500", "test", "false", "5"}, + "mapped_value": []interface{}{"internal_error", 1, false, false, false, false}, + "expected_value": []interface{}{"internal_error", 1, false, 500, 500, 500}, + }, + } - fields := calculateProcessedValues(mapper, createTestMetric()) - - assertFieldValue(t, 1, "true_value", fields) + for _, mapping := range mappings { + field_name := mapping["field_name"][0].(string) + for index := range mapping["target_value"] { + mapper := EnumMapper{Mappings: []Mapping{{Field: field_name, ValueMappings: map[string]interface{}{mapping["target_value"][index].(string): mapping["mapped_value"][index]}}}} + fields := calculateProcessedValues(mapper, createTestMetric()) + assertFieldValue(t, mapping["expected_value"][index], field_name, fields) + } + } } func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"other": int64(1)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"other": int64(1)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) @@ -81,7 +125,7 @@ func TestMapsToDefaultValueOnUnknownSourceValue(t *testing.T) { } func TestDoNotMapToDefaultValueKnownSourceValue(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"test": int64(1)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Default: int64(42), ValueMappings: map[string]interface{}{"test": int64(1)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) @@ -89,7 +133,7 @@ func TestDoNotMapToDefaultValueKnownSourceValue(t *testing.T) { } func TestNoMappingWithoutDefaultOrDefinedMappingValue(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "string_value", ValueMappings: map[string]interface{}{"other": int64(1)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", ValueMappings: map[string]interface{}{"other": int64(1)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) @@ -97,10 +141,21 @@ func TestNoMappingWithoutDefaultOrDefinedMappingValue(t *testing.T) { } func TestWritesToDestination(t *testing.T) { - mapper := EnumMapper{Fields: []Mapping{{Source: "string_value", Destination: "string_code", ValueMappings: map[string]interface{}{"test": int64(1)}}}} + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: "string_code", ValueMappings: map[string]interface{}{"test": int64(1)}}}} fields := calculateProcessedValues(mapper, createTestMetric()) assertFieldValue(t, "test", "string_value", fields) assertFieldValue(t, 1, "string_code", fields) } + +func TestDoNotWriteToDestinationWithoutDefaultOrDefinedMapping(t *testing.T) { + field := "string_code" + mapper := EnumMapper{Mappings: []Mapping{{Field: "string_value", Dest: field, ValueMappings: map[string]interface{}{"other": int64(1)}}}} + + fields := calculateProcessedValues(mapper, createTestMetric()) + + assertFieldValue(t, "test", "string_value", fields) + _, present := fields[field] + assert.False(t, present, "value of field '"+field+"' was present") +} diff --git a/plugins/processors/filepath/README.md b/plugins/processors/filepath/README.md new file mode 100644 index 000000000..f4473ff62 --- /dev/null +++ b/plugins/processors/filepath/README.md @@ -0,0 +1,207 @@ +# Filepath Processor Plugin + +The `filepath` processor plugin maps certain go functions from [path/filepath](https://golang.org/pkg/path/filepath/) +onto tag and field values. Values can be modified in place or stored in another key. + +Implemented functions are: + +* [Base](https://golang.org/pkg/path/filepath/#Base) (accessible through `[[processors.filepath.basename]]`) +* [Rel](https://golang.org/pkg/path/filepath/#Rel) (accessible through `[[processors.filepath.rel]]`) +* [Dir](https://golang.org/pkg/path/filepath/#Dir) (accessible through `[[processors.filepath.dir]]`) +* [Clean](https://golang.org/pkg/path/filepath/#Clean) (accessible through `[[processors.filepath.clean]]`) +* [ToSlash](https://golang.org/pkg/path/filepath/#ToSlash) (accessible through `[[processors.filepath.toslash]]`) + + On top of that, the plugin provides an extra function to retrieve the final path component without its extension. This + function is accessible through the `[[processors.filepath.stem]]` configuration item. + +Please note that, in this implementation, these functions are processed in the order that they appear above( except for +`stem` that is applied in the first place). + +Specify the `tag` and/or `field` that you want processed in each section and optionally a `dest` if you want the result +stored in a new tag or field. + +If you plan to apply multiple transformations to the same `tag`/`field`, bear in mind the processing order stated above. + +## Configuration + +```toml +[[processors.filepath]] + ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag + # [[processors.filepath.basename]] + # tag = "path" + # dest = "basepath" + + ## Treat the field value as a path and keep all but the last element of path, typically the path's directory + # [[processors.filepath.dirname]] + # field = "path" + + ## Treat the tag value as a path, converting it to its the last element without its suffix + # [[processors.filepath.stem]] + # tag = "path" + + ## Treat the tag value as a path, converting it to the shortest path name equivalent + ## to path by purely lexical processing + # [[processors.filepath.clean]] + # tag = "path" + + ## Treat the tag value as a path, converting it to a relative path that is lexically + ## equivalent to the source path when joined to 'base_path' + # [[processors.filepath.rel]] + # tag = "path" + # base_path = "/var/log" + + ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only + ## effect on Windows + # [[processors.filepath.toslash]] + # tag = "path" +``` + +## Considerations + +### Clean + +Even though `clean` is provided a standalone function, it is also invoked when using the `rel` and `dirname` functions, +so there is no need to use it along with them. + +That is: + + ```toml +[[processors.filepath]] + [[processors.filepath.dir]] + tag = "path" + [[processors.filepath.clean]] + tag = "path" + ``` + +Is equivalent to: + + ```toml +[[processors.filepath]] + [[processors.filepath.dir]] + tag = "path" + ``` + +### ToSlash + +The effects of this function are only noticeable on Windows platforms, because of the underlying golang implementation. + +## Examples + +### Basename + +```toml +[[processors.filepath]] + [[processors.filepath.basename]] + tag = "path" +``` + +```diff +- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 ++ my_metric,path="ajob.log" duration_seconds=134 1587920425000000000 +``` + +### Dirname + +```toml +[[processors.filepath]] + [[processors.filepath.dirname]] + field = "path" + dest = "folder" +``` + +```diff +- my_metric path="/var/log/batch/ajob.log",duration_seconds=134 1587920425000000000 ++ my_metric path="/var/log/batch/ajob.log",folder="/var/log/batch",duration_seconds=134 1587920425000000000 +``` + +### Stem + +```toml +[[processors.filepath]] + [[processors.filepath.stem]] + tag = "path" +``` + +```diff +- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 ++ my_metric,path="ajob" duration_seconds=134 1587920425000000000 +``` + +### Clean + +```toml +[[processors.filepath]] + [[processors.filepath.clean]] + tag = "path" +``` + +```diff +- my_metric,path="/var/log/dummy/../batch//ajob.log" duration_seconds=134 1587920425000000000 ++ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 +``` + +### Rel + +```toml +[[processors.filepath]] + [[processors.filepath.rel]] + tag = "path" + base_path = "/var/log" +``` + +```diff +- my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 ++ my_metric,path="batch/ajob.log" duration_seconds=134 1587920425000000000 +``` + +### ToSlash + +```toml +[[processors.filepath]] + [[processors.filepath.rel]] + tag = "path" +``` + +```diff +- my_metric,path="\var\log\batch\ajob.log" duration_seconds=134 1587920425000000000 ++ my_metric,path="/var/log/batch/ajob.log" duration_seconds=134 1587920425000000000 +``` + +### Processing paths from tail plugin + +This plugin can be used together with the +[tail input plugn](https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail) to make modifications +to the `path` tag injected for every file. + +Scenario: + +* A log file `/var/log/myjobs/mysql_backup.log`, containing logs for a job execution. Whenever the job ends, a line is +written to the log file following this format: `2020-04-05 11:45:21 total time execution: 70 seconds` +* We want to generate a measurement that captures the duration of the script as a field and includes the `path` as a +tag + * We are interested in the filename without its extensions, since it might be enough information for plotting our + execution times in a dashboard + * Just in case, we don't want to override the original path (if for some reason we end up having duplicates we might + want this information) + +For this purpose, we will use the `tail` input plugin, the `grok` parser plugin and the `filepath` processor. + +```toml +[[inputs.tail]] + files = ["/var/log/myjobs/**.log"] + data_format = "grok" + grok_patterns = ['%{TIMESTAMP_ISO8601:timestamp:ts-"2006-01-02 15:04:05"} total time execution: %{NUMBER:duration_seconds:int}'] + name_override = "myjobs" + +[[processors.filepath]] + [[processors.filepath.stem]] + tag = "path" + dest = "stempath" + +``` + +The resulting output for a job taking 70 seconds for the mentioned log file would look like: + +```text +myjobs_duration_seconds,host="my-host",path="/var/log/myjobs/mysql_backup.log",stempath="mysql_backup" 70 1587920425000000000 +``` diff --git a/plugins/processors/filepath/filepath.go b/plugins/processors/filepath/filepath.go new file mode 100644 index 000000000..70013de17 --- /dev/null +++ b/plugins/processors/filepath/filepath.go @@ -0,0 +1,150 @@ +package filepath + +import ( + "path/filepath" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Options struct { + BaseName []BaseOpts `toml:"basename"` + DirName []BaseOpts `toml:"dirname"` + Stem []BaseOpts + Clean []BaseOpts + Rel []RelOpts + ToSlash []BaseOpts `toml:"toslash"` +} + +type ProcessorFunc func(s string) string + +// BaseOpts contains options applicable to every function +type BaseOpts struct { + Field string + Tag string + Dest string +} + +type RelOpts struct { + BaseOpts + BasePath string +} + +const sampleConfig = ` + ## Treat the tag value as a path and convert it to its last element, storing the result in a new tag + # [[processors.filepath.basename]] + # tag = "path" + # dest = "basepath" + + ## Treat the field value as a path and keep all but the last element of path, typically the path's directory + # [[processors.filepath.dirname]] + # field = "path" + + ## Treat the tag value as a path, converting it to its the last element without its suffix + # [[processors.filepath.stem]] + # tag = "path" + + ## Treat the tag value as a path, converting it to the shortest path name equivalent + ## to path by purely lexical processing + # [[processors.filepath.clean]] + # tag = "path" + + ## Treat the tag value as a path, converting it to a relative path that is lexically + ## equivalent to the source path when joined to 'base_path' + # [[processors.filepath.rel]] + # tag = "path" + # base_path = "/var/log" + + ## Treat the tag value as a path, replacing each separator character in path with a '/' character. Has only + ## effect on Windows + # [[processors.filepath.toslash]] + # tag = "path" +` + +func (o *Options) SampleConfig() string { + return sampleConfig +} + +func (o *Options) Description() string { + return "Performs file path manipulations on tags and fields" +} + +// applyFunc applies the specified function to the metric +func (o *Options) applyFunc(bo BaseOpts, fn ProcessorFunc, metric telegraf.Metric) { + if bo.Tag != "" { + if v, ok := metric.GetTag(bo.Tag); ok { + targetTag := bo.Tag + + if bo.Dest != "" { + targetTag = bo.Dest + } + metric.AddTag(targetTag, fn(v)) + } + } + + if bo.Field != "" { + if v, ok := metric.GetField(bo.Field); ok { + targetField := bo.Field + + if bo.Dest != "" { + targetField = bo.Dest + } + + // Only string fields are considered + if v, ok := v.(string); ok { + metric.AddField(targetField, fn(v)) + } + + } + } +} + +func stemFilePath(path string) string { + return strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) +} + +// processMetric processes fields and tag values for a given metric applying the selected transformations +func (o *Options) processMetric(metric telegraf.Metric) { + // Stem + for _, v := range o.Stem { + o.applyFunc(v, stemFilePath, metric) + } + // Basename + for _, v := range o.BaseName { + o.applyFunc(v, filepath.Base, metric) + } + // Rel + for _, v := range o.Rel { + o.applyFunc(v.BaseOpts, func(s string) string { + relPath, _ := filepath.Rel(v.BasePath, s) + return relPath + }, metric) + } + // Dirname + for _, v := range o.DirName { + o.applyFunc(v, filepath.Dir, metric) + } + // Clean + for _, v := range o.Clean { + o.applyFunc(v, filepath.Clean, metric) + } + // ToSlash + for _, v := range o.ToSlash { + o.applyFunc(v, filepath.ToSlash, metric) + } +} + +func (o *Options) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, m := range in { + o.processMetric(m) + } + + return in +} + +func init() { + processors.Add("filepath", func() telegraf.Processor { + return &Options{} + }) +} diff --git a/plugins/processors/filepath/filepath_test.go b/plugins/processors/filepath/filepath_test.go new file mode 100644 index 000000000..a305c4c5c --- /dev/null +++ b/plugins/processors/filepath/filepath_test.go @@ -0,0 +1,70 @@ +// +build !windows + +package filepath + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +var samplePath = "/my/test//c/../path/file.log" + +func TestOptions_Apply(t *testing.T) { + tests := []testCase{ + { + name: "Smoke Test", + o: newOptions("/my/test/"), + inputMetrics: getSmokeTestInputMetrics(samplePath), + expectedMetrics: []telegraf.Metric{ + testutil.MustMetric( + smokeMetricName, + map[string]string{ + "baseTag": "file.log", + "dirTag": "/my/test/path", + "stemTag": "file", + "cleanTag": "/my/test/path/file.log", + "relTag": "path/file.log", + "slashTag": "/my/test//c/../path/file.log", + }, + map[string]interface{}{ + "baseField": "file.log", + "dirField": "/my/test/path", + "stemField": "file", + "cleanField": "/my/test/path/file.log", + "relField": "path/file.log", + "slashField": "/my/test//c/../path/file.log", + }, + time.Now()), + }, + }, + { + name: "Test Dest Option", + o: &Options{ + BaseName: []BaseOpts{ + { + Field: "sourcePath", + Tag: "sourcePath", + Dest: "basePath", + }, + }}, + inputMetrics: []telegraf.Metric{ + testutil.MustMetric( + "testMetric", + map[string]string{"sourcePath": samplePath}, + map[string]interface{}{"sourcePath": samplePath}, + time.Now()), + }, + expectedMetrics: []telegraf.Metric{ + testutil.MustMetric( + "testMetric", + map[string]string{"sourcePath": samplePath, "basePath": "file.log"}, + map[string]interface{}{"sourcePath": samplePath, "basePath": "file.log"}, + time.Now()), + }, + }, + } + runTestOptionsApply(t, tests) +} diff --git a/plugins/processors/filepath/filepath_test_helpers.go b/plugins/processors/filepath/filepath_test_helpers.go new file mode 100644 index 000000000..571730b54 --- /dev/null +++ b/plugins/processors/filepath/filepath_test_helpers.go @@ -0,0 +1,100 @@ +package filepath + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +const smokeMetricName = "testmetric" + +type testCase struct { + name string + o *Options + inputMetrics []telegraf.Metric + expectedMetrics []telegraf.Metric +} + +func newOptions(basePath string) *Options { + return &Options{ + BaseName: []BaseOpts{ + { + Field: "baseField", + Tag: "baseTag", + }, + }, + DirName: []BaseOpts{ + { + Field: "dirField", + Tag: "dirTag", + }, + }, + Stem: []BaseOpts{ + { + Field: "stemField", + Tag: "stemTag", + }, + }, + Clean: []BaseOpts{ + { + Field: "cleanField", + Tag: "cleanTag", + }, + }, + Rel: []RelOpts{ + { + BaseOpts: BaseOpts{ + Field: "relField", + Tag: "relTag", + }, + BasePath: basePath, + }, + }, + ToSlash: []BaseOpts{ + { + Field: "slashField", + Tag: "slashTag", + }, + }, + } +} + +func getSampleMetricTags(path string) map[string]string { + return map[string]string{ + "baseTag": path, + "dirTag": path, + "stemTag": path, + "cleanTag": path, + "relTag": path, + "slashTag": path, + } +} + +func getSampleMetricFields(path string) map[string]interface{} { + return map[string]interface{}{ + "baseField": path, + "dirField": path, + "stemField": path, + "cleanField": path, + "relField": path, + "slashField": path, + } +} + +func getSmokeTestInputMetrics(path string) []telegraf.Metric { + return []telegraf.Metric{ + testutil.MustMetric(smokeMetricName, getSampleMetricTags(path), getSampleMetricFields(path), + time.Now()), + } +} + +func runTestOptionsApply(t *testing.T, tests []testCase) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.o.Apply(tt.inputMetrics...) + testutil.RequireMetricsEqual(t, tt.expectedMetrics, got, testutil.SortMetrics(), testutil.IgnoreTime()) + }) + } +} diff --git a/plugins/processors/filepath/filepath_windows_test.go b/plugins/processors/filepath/filepath_windows_test.go new file mode 100644 index 000000000..daca33d18 --- /dev/null +++ b/plugins/processors/filepath/filepath_windows_test.go @@ -0,0 +1,43 @@ +package filepath + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +var samplePath = "c:\\my\\test\\\\c\\..\\path\\file.log" + +func TestOptions_Apply(t *testing.T) { + tests := []testCase{ + { + name: "Smoke Test", + o: newOptions("c:\\my\\test\\"), + inputMetrics: getSmokeTestInputMetrics(samplePath), + expectedMetrics: []telegraf.Metric{ + testutil.MustMetric( + smokeMetricName, + map[string]string{ + "baseTag": "file.log", + "dirTag": "c:\\my\\test\\path", + "stemTag": "file", + "cleanTag": "c:\\my\\test\\path\\file.log", + "relTag": "path\\file.log", + "slashTag": "c:/my/test//c/../path/file.log", + }, + map[string]interface{}{ + "baseField": "file.log", + "dirField": "c:\\my\\test\\path", + "stemField": "file", + "cleanField": "c:\\my\\test\\path\\file.log", + "relField": "path\\file.log", + "slashField": "c:/my/test//c/../path/file.log", + }, + time.Now()), + }, + }, + } + runTestOptionsApply(t, tests) +} diff --git a/plugins/processors/parser/README.md b/plugins/processors/parser/README.md new file mode 100644 index 000000000..134bbb59e --- /dev/null +++ b/plugins/processors/parser/README.md @@ -0,0 +1,45 @@ +# Parser Processor Plugin + +This plugin parses defined fields containing the specified data format and +creates new metrics based on the contents of the field. + +## Configuration +```toml +[[processors.parser]] + ## The name of the fields whose value will be parsed. + parse_fields = ["message"] + + ## If true, incoming metrics are not emitted. + drop_original = false + + ## If set to override, emitted metrics will be merged by overriding the + ## original metric using the newly parsed metrics. + merge = "override" + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +``` + +### Example: + +```toml +[[processors.parser]] + parse_fields = ["message"] + merge = "override" + data_format = "logfmt" +``` + +**Input**: +``` +syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org\ (influxdb.example.org),severity=info facility_code=3i,message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"",procid="6629",severity_code=6i,timestamp=1533848508138040000i,version=1i +``` + +**Output**: +``` +syslog,appname=influxd,facility=daemon,hostname=http://influxdb.example.org\ (influxdb.example.org),severity=info facility_code=3i,log_id="09p7QbOG000",lvl="info",message=" ts=2018-08-09T21:01:48.137963Z lvl=info msg=\"Executing query\" log_id=09p7QbOG000 service=query query=\"SHOW DATABASES\"",msg="Executing query",procid="6629",query="SHOW DATABASES",service="query",severity_code=6i,timestamp=1533848508138040000i,ts="2018-08-09T21:01:48.137963Z",version=1i +``` + + diff --git a/plugins/processors/parser/parser.go b/plugins/processors/parser/parser.go new file mode 100644 index 000000000..63230763a --- /dev/null +++ b/plugins/processors/parser/parser.go @@ -0,0 +1,124 @@ +package parser + +import ( + "log" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Parser struct { + parsers.Config + DropOriginal bool `toml:"drop_original"` + Merge string `toml:"merge"` + ParseFields []string `toml:"parse_fields"` + Parser parsers.Parser +} + +var SampleConfig = ` + ## The name of the fields whose value will be parsed. + parse_fields = [] + + ## If true, incoming metrics are not emitted. + drop_original = false + + ## If set to override, emitted metrics will be merged by overriding the + ## original metric using the newly parsed metrics. + merge = "override" + + ## The dataformat to be read from files + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "influx" +` + +func (p *Parser) SampleConfig() string { + return SampleConfig +} + +func (p *Parser) Description() string { + return "Parse a value in a specified field/tag(s) and add the result in a new metric" +} + +func (p *Parser) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + if p.Parser == nil { + var err error + p.Parser, err = parsers.NewParser(&p.Config) + if err != nil { + log.Printf("E! [processors.parser] could not create parser: %v", err) + return metrics + } + } + + results := []telegraf.Metric{} + + for _, metric := range metrics { + newMetrics := []telegraf.Metric{} + if !p.DropOriginal { + newMetrics = append(newMetrics, metric) + } + + for _, key := range p.ParseFields { + for _, field := range metric.FieldList() { + if field.Key == key { + switch value := field.Value.(type) { + case string: + fromFieldMetric, err := p.parseField(value) + if err != nil { + log.Printf("E! [processors.parser] could not parse field %s: %v", key, err) + } + + for _, m := range fromFieldMetric { + if m.Name() == "" { + m.SetName(metric.Name()) + } + } + + // multiple parsed fields shouldn't create multiple + // metrics so we'll merge tags/fields down into one + // prior to returning. + newMetrics = append(newMetrics, fromFieldMetric...) + default: + log.Printf("E! [processors.parser] field '%s' not a string, skipping", key) + } + } + } + } + + if len(newMetrics) == 0 { + continue + } + + if p.Merge == "override" { + results = append(results, merge(newMetrics[0], newMetrics[1:])) + } else { + results = append(results, newMetrics...) + } + } + return results +} + +func merge(base telegraf.Metric, metrics []telegraf.Metric) telegraf.Metric { + for _, metric := range metrics { + for _, field := range metric.FieldList() { + base.AddField(field.Key, field.Value) + } + for _, tag := range metric.TagList() { + base.AddTag(tag.Key, tag.Value) + } + base.SetName(metric.Name()) + } + return base +} + +func (p *Parser) parseField(value string) ([]telegraf.Metric, error) { + return p.Parser.Parse([]byte(value)) +} + +func init() { + processors.Add("parser", func() telegraf.Processor { + return &Parser{DropOriginal: false} + }) +} diff --git a/plugins/processors/parser/parser_test.go b/plugins/processors/parser/parser_test.go new file mode 100644 index 000000000..ac042848f --- /dev/null +++ b/plugins/processors/parser/parser_test.go @@ -0,0 +1,670 @@ +package parser + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/parsers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +//compares metrics without comparing time +func compareMetrics(t *testing.T, expected, actual []telegraf.Metric) { + assert.Equal(t, len(expected), len(actual)) + for i, metric := range actual { + require.Equal(t, expected[i].Name(), metric.Name()) + require.Equal(t, expected[i].Fields(), metric.Fields()) + require.Equal(t, expected[i].Tags(), metric.Tags()) + } +} + +func Metric(v telegraf.Metric, err error) telegraf.Metric { + if err != nil { + panic(err) + } + return v +} + +func TestApply(t *testing.T) { + tests := []struct { + name string + parseFields []string + config parsers.Config + dropOriginal bool + merge string + input telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "parse one field drop original", + parseFields: []string{"sample"}, + dropOriginal: true, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{ + "ts", + "lvl", + "msg", + "method", + }, + }, + input: Metric( + metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "singleField", + map[string]string{ + "ts": "2018-07-24T19:43:40.275Z", + "lvl": "info", + "msg": "http request", + "method": "POST", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "parse one field with merge", + parseFields: []string{"sample"}, + dropOriginal: false, + merge: "override", + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{ + "ts", + "lvl", + "msg", + "method", + }, + }, + input: Metric( + metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "singleField", + map[string]string{ + "some": "tag", + "ts": "2018-07-24T19:43:40.275Z", + "lvl": "info", + "msg": "http request", + "method": "POST", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))), + }, + }, + { + name: "parse one field keep", + parseFields: []string{"sample"}, + dropOriginal: false, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{ + "ts", + "lvl", + "msg", + "method", + }, + }, + input: Metric( + metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "singleField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "sample": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))), + Metric(metric.New( + "singleField", + map[string]string{ + "ts": "2018-07-24T19:43:40.275Z", + "lvl": "info", + "msg": "http request", + "method": "POST", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "parse one field keep with measurement name", + parseFields: []string{"message"}, + config: parsers.Config{ + DataFormat: "influx", + }, + dropOriginal: false, + input: Metric( + metric.New( + "influxField", + map[string]string{}, + map[string]interface{}{ + "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "influxField", + map[string]string{}, + map[string]interface{}{ + "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", + }, + time.Unix(0, 0))), + Metric(metric.New( + "deal", + map[string]string{ + "computer_name": "hosta", + }, + map[string]interface{}{ + "message": "stuff", + }, + time.Unix(0, 0))), + }, + }, + { + name: "parse one field override replaces name", + parseFields: []string{"message"}, + dropOriginal: false, + merge: "override", + config: parsers.Config{ + DataFormat: "influx", + }, + input: Metric( + metric.New( + "influxField", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "message": "deal,computer_name=hosta message=\"stuff\" 1530654676316265790", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "deal", + map[string]string{ + "computer_name": "hosta", + "some": "tag", + }, + map[string]interface{}{ + "message": "stuff", + }, + time.Unix(0, 0))), + }, + }, + { + name: "parse grok field", + parseFields: []string{"grokSample"}, + dropOriginal: true, + config: parsers.Config{ + DataFormat: "grok", + GrokPatterns: []string{"%{COMBINED_LOG_FORMAT}"}, + }, + input: Metric( + metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "grokSample": "127.0.0.1 - - [11/Dec/2013:00:01:45 -0800] \"GET /xampp/status.php HTTP/1.1\" 200 3891 \"http://cadenza/xampp/navi.php\" \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0\"", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "success", + map[string]string{ + "resp_code": "200", + "verb": "GET", + }, + map[string]interface{}{ + "resp_bytes": int64(3891), + "auth": "-", + "request": "/xampp/status.php", + "referrer": "http://cadenza/xampp/navi.php", + "agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0", + "client_ip": "127.0.0.1", + "ident": "-", + "http_version": float64(1.1), + }, + time.Unix(0, 0))), + }, + }, + { + name: "parse two fields [replace]", + parseFields: []string{"field_1", "field_2"}, + dropOriginal: true, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl", "err"}, + }, + input: Metric( + metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "bigMeasure", + map[string]string{ + "lvl": "info", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + Metric(metric.New( + "bigMeasure", + map[string]string{ + "err": "fatal", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "parse two fields [merge]", + parseFields: []string{"field_1", "field_2"}, + dropOriginal: false, + merge: "override", + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl", "msg", "err", "fatal"}, + }, + input: Metric( + metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "bigMeasure", + map[string]string{ + "lvl": "info", + "msg": "http request", + "err": "fatal", + "fatal": "security threat", + }, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0))), + }, + }, + { + name: "parse two fields [keep]", + parseFields: []string{"field_1", "field_2"}, + dropOriginal: false, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl", "msg", "err", "fatal"}, + }, + input: Metric( + metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "bigMeasure", + map[string]string{}, + map[string]interface{}{ + "field_1": `{"lvl":"info","msg":"http request"}`, + "field_2": `{"err":"fatal","fatal":"security threat"}`, + }, + time.Unix(0, 0))), + Metric(metric.New( + "bigMeasure", + map[string]string{ + "lvl": "info", + "msg": "http request", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + Metric(metric.New( + "bigMeasure", + map[string]string{ + "err": "fatal", + "fatal": "security threat", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "Fail to parse one field but parses other [keep]", + parseFields: []string{"good", "bad"}, + dropOriginal: false, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl"}, + }, + input: Metric( + metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0))), + Metric(metric.New( + "success", + map[string]string{ + "lvl": "info", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "Fail to parse one field but parses other [keep] v2", + parseFields: []string{"bad", "good", "ok"}, + dropOriginal: false, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl", "thing"}, + }, + input: Metric( + metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "bad": "why", + "good": `{"lvl":"info"}`, + "ok": `{"thing":"thang"}`, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "success", + map[string]string{}, + map[string]interface{}{ + "bad": "why", + "good": `{"lvl":"info"}`, + "ok": `{"thing":"thang"}`, + }, + time.Unix(0, 0))), + Metric(metric.New( + "success", + map[string]string{ + "lvl": "info", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + Metric(metric.New( + "success", + map[string]string{ + "thing": "thang", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + { + name: "Fail to parse one field but parses other [merge]", + parseFields: []string{"good", "bad"}, + dropOriginal: false, + merge: "override", + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl"}, + }, + input: Metric( + metric.New( + "success", + map[string]string{ + "a": "tag", + }, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "success", + map[string]string{ + "a": "tag", + "lvl": "info", + }, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0))), + }, + }, + { + name: "Fail to parse one field but parses other [replace]", + parseFields: []string{"good", "bad"}, + dropOriginal: true, + config: parsers.Config{ + DataFormat: "json", + TagKeys: []string{"lvl"}, + }, + input: Metric( + metric.New( + "success", + map[string]string{ + "thing": "tag", + }, + map[string]interface{}{ + "good": `{"lvl":"info"}`, + "bad": "why", + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "success", + map[string]string{ + "lvl": "info", + }, + map[string]interface{}{}, + time.Unix(0, 0))), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := Parser{ + Config: tt.config, + ParseFields: tt.parseFields, + DropOriginal: tt.dropOriginal, + Merge: tt.merge, + } + + output := parser.Apply(tt.input) + t.Logf("Testing: %s", tt.name) + compareMetrics(t, tt.expected, output) + }) + } +} + +func TestBadApply(t *testing.T) { + tests := []struct { + name string + parseFields []string + config parsers.Config + input telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "field not found", + parseFields: []string{"bad_field"}, + config: parsers.Config{ + DataFormat: "json", + }, + input: Metric( + metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0))), + }, + }, + { + name: "non string field", + parseFields: []string{"some_field"}, + config: parsers.Config{ + DataFormat: "json", + }, + input: Metric( + metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0))), + expected: []telegraf.Metric{ + Metric(metric.New( + "bad", + map[string]string{}, + map[string]interface{}{ + "some_field": 5, + }, + time.Unix(0, 0))), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parser := Parser{ + Config: tt.config, + ParseFields: tt.parseFields, + } + + output := parser.Apply(tt.input) + + compareMetrics(t, output, tt.expected) + }) + } +} + +// Benchmarks + +func getMetricFields(metric telegraf.Metric) interface{} { + key := "field3" + if value, ok := metric.Fields()[key]; ok { + return value + } + return nil +} + +func getMetricFieldList(metric telegraf.Metric) interface{} { + key := "field3" + fields := metric.FieldList() + for _, field := range fields { + if field.Key == key { + return field.Value + } + } + return nil +} + +func BenchmarkFieldListing(b *testing.B) { + metric := Metric(metric.New( + "test", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "field0": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field1": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field2": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field3": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field4": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field5": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field6": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))) + + for n := 0; n < b.N; n++ { + getMetricFieldList(metric) + } +} + +func BenchmarkFields(b *testing.B) { + metric := Metric(metric.New( + "test", + map[string]string{ + "some": "tag", + }, + map[string]interface{}{ + "field0": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field1": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field2": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field3": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field4": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field5": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + "field6": `{"ts":"2018-07-24T19:43:40.275Z","lvl":"info","msg":"http request","method":"POST"}`, + }, + time.Unix(0, 0))) + + for n := 0; n < b.N; n++ { + getMetricFields(metric) + } +} diff --git a/plugins/processors/pivot/README.md b/plugins/processors/pivot/README.md new file mode 100644 index 000000000..b3eb06fd3 --- /dev/null +++ b/plugins/processors/pivot/README.md @@ -0,0 +1,30 @@ +# Pivot Processor + +You can use the `pivot` processor to rotate single valued metrics into a multi +field metric. This transformation often results in data that is more easily +to apply mathematical operators and comparisons between, and flatten into a +more compact representation for write operations with some output data +formats. + +To perform the reverse operation use the [unpivot] processor. + +### Configuration + +```toml +[[processors.pivot]] + ## Tag to use for naming the new field. + tag_key = "name" + ## Field to use as the value of the new field. + value_key = "value" +``` + +### Example + +```diff +- cpu,cpu=cpu0,name=time_idle value=42i +- cpu,cpu=cpu0,name=time_user value=43i ++ cpu,cpu=cpu0 time_idle=42i ++ cpu,cpu=cpu0 time_user=43i +``` + +[unpivot]: /plugins/processors/unpivot/README.md diff --git a/plugins/processors/pivot/pivot.go b/plugins/processors/pivot/pivot.go new file mode 100644 index 000000000..b20c7f758 --- /dev/null +++ b/plugins/processors/pivot/pivot.go @@ -0,0 +1,54 @@ +package pivot + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +const ( + description = "Rotate a single valued metric into a multi field metric" + sampleConfig = ` + ## Tag to use for naming the new field. + tag_key = "name" + ## Field to use as the value of the new field. + value_key = "value" +` +) + +type Pivot struct { + TagKey string `toml:"tag_key"` + ValueKey string `toml:"value_key"` +} + +func (p *Pivot) SampleConfig() string { + return sampleConfig +} + +func (p *Pivot) Description() string { + return description +} + +func (p *Pivot) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + for _, m := range metrics { + key, ok := m.GetTag(p.TagKey) + if !ok { + continue + } + + value, ok := m.GetField(p.ValueKey) + if !ok { + continue + } + + m.RemoveTag(p.TagKey) + m.RemoveField(p.ValueKey) + m.AddField(key, value) + } + return metrics +} + +func init() { + processors.Add("pivot", func() telegraf.Processor { + return &Pivot{} + }) +} diff --git a/plugins/processors/pivot/pivot_test.go b/plugins/processors/pivot/pivot_test.go new file mode 100644 index 000000000..34924f8fa --- /dev/null +++ b/plugins/processors/pivot/pivot_test.go @@ -0,0 +1,111 @@ +package pivot + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +func TestPivot(t *testing.T) { + now := time.Now() + tests := []struct { + name string + pivot *Pivot + metrics []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "simple", + pivot: &Pivot{ + TagKey: "name", + ValueKey: "value", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_time", + }, + map[string]interface{}{ + "value": int64(42), + }, + now, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "idle_time": int64(42), + }, + now, + ), + }, + }, + { + name: "missing tag", + pivot: &Pivot{ + TagKey: "name", + ValueKey: "value", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "foo": "idle_time", + }, + map[string]interface{}{ + "value": int64(42), + }, + now, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "foo": "idle_time", + }, + map[string]interface{}{ + "value": int64(42), + }, + now, + ), + }, + }, + { + name: "missing field", + pivot: &Pivot{ + TagKey: "name", + ValueKey: "value", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_time", + }, + map[string]interface{}{ + "foo": int64(42), + }, + now, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_time", + }, + map[string]interface{}{ + "foo": int64(42), + }, + now, + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.pivot.Apply(tt.metrics...) + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} diff --git a/plugins/processors/port_name/README.md b/plugins/processors/port_name/README.md new file mode 100644 index 000000000..c078fe1c4 --- /dev/null +++ b/plugins/processors/port_name/README.md @@ -0,0 +1,26 @@ +# Port Name Lookup Processor Plugin + +Use the `port_name` processor to convert a tag containing a well-known port number to the registered service name. + +Tag can contain a number ("80") or number and protocol separated by slash ("443/tcp"). If protocol is not provided it defaults to tcp but can be changed with the default_protocol setting. + +### Configuration + +```toml +[[processors.port_name]] + ## Name of tag holding the port number + # tag = "port" + + ## Name of output tag where service name will be added + # dest = "service" + + ## Default tcp or udp + # default_protocol = "tcp" +``` + +### Example + +```diff +- measurement,port=80 field=123 1560540094000000000 ++ measurement,port=80,service=http field=123 1560540094000000000 +``` diff --git a/plugins/processors/port_name/port_name.go b/plugins/processors/port_name/port_name.go new file mode 100644 index 000000000..50c893e60 --- /dev/null +++ b/plugins/processors/port_name/port_name.go @@ -0,0 +1,174 @@ +package portname + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +var sampleConfig = ` +[[processors.port_name]] + ## Name of tag holding the port number + # tag = "port" + + ## Name of output tag where service name will be added + # dest = "service" + + ## Default tcp or udp + # default_protocol = "tcp" +` + +type sMap map[string]map[int]string // "https" == services["tcp"][443] + +var services sMap + +type PortName struct { + SourceTag string `toml:"tag"` + DestTag string `toml:"dest"` + DefaultProtocol string `toml:"default_protocol"` + + Log telegraf.Logger `toml:"-"` +} + +func (d *PortName) SampleConfig() string { + return sampleConfig +} + +func (d *PortName) Description() string { + return "Given a tag of a TCP or UDP port number, add a tag of the service name looked up in the system services file" +} + +func readServicesFile() { + file, err := os.Open(servicesPath()) + if err != nil { + return + } + defer file.Close() + + services = readServices(file) +} + +// Read the services file into a map. +// +// This function takes a similar approach to parsing as the go +// standard library (see src/net/port_unix.go in golang source) but +// maps protocol and port number to service name, not protocol and +// service to port number. +func readServices(r io.Reader) sMap { + services = make(sMap) + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + // "http 80/tcp www www-http # World Wide Web HTTP" + if i := strings.Index(line, "#"); i >= 0 { + line = line[:i] + } + f := strings.Fields(line) + if len(f) < 2 { + continue + } + service := f[0] // "http" + portProto := f[1] // "80/tcp" + portProtoSlice := strings.SplitN(portProto, "/", 2) + if len(portProtoSlice) < 2 { + continue + } + port, err := strconv.Atoi(portProtoSlice[0]) // "80" + if err != nil || port <= 0 { + continue + } + proto := portProtoSlice[1] // "tcp" + proto = strings.ToLower(proto) + + protoMap, ok := services[proto] + if !ok { + protoMap = make(map[int]string) + services[proto] = protoMap + } + protoMap[port] = service + } + return services +} + +func (d *PortName) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + for _, m := range metrics { + portProto, ok := m.GetTag(d.SourceTag) + if !ok { + // Nonexistent tag + continue + } + portProtoSlice := strings.SplitN(portProto, "/", 2) + l := len(portProtoSlice) + + if l == 0 { + // Empty tag + d.Log.Errorf("empty port tag: %v", d.SourceTag) + continue + } + + var port int + if l > 0 { + var err error + val := portProtoSlice[0] + port, err = strconv.Atoi(val) + if err != nil { + // Can't convert port to string + d.Log.Errorf("error converting port to integer: %v", val) + continue + } + } + + proto := d.DefaultProtocol + if l > 1 && len(portProtoSlice[1]) > 0 { + proto = portProtoSlice[1] + } + proto = strings.ToLower(proto) + + protoMap, ok := services[proto] + if !ok { + // Unknown protocol + // + // Protocol is normally tcp or udp. The services file + // normally has entries for both, so our map does too. If + // not, it's very likely the source tag or the services + // file doesn't make sense. + d.Log.Errorf("protocol not found in services map: %v", proto) + continue + } + + service, ok := protoMap[port] + if !ok { + // Unknown port + // + // Not all ports are named so this isn't an error, but + // it's helpful to know when debugging. + d.Log.Debugf("port not found in services map: %v", port) + continue + } + + m.AddTag(d.DestTag, service) + } + + return metrics +} + +func (h *PortName) Init() error { + services = make(sMap) + readServicesFile() + return nil +} + +func init() { + processors.Add("port_name", func() telegraf.Processor { + return &PortName{ + SourceTag: "port", + DestTag: "service", + DefaultProtocol: "tcp", + } + }) +} diff --git a/plugins/processors/port_name/port_name_test.go b/plugins/processors/port_name/port_name_test.go new file mode 100644 index 000000000..b58f95a9e --- /dev/null +++ b/plugins/processors/port_name/port_name_test.go @@ -0,0 +1,261 @@ +package portname + +import ( + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +var fakeServices = ` +http 80/tcp www # WorldWideWeb HTTP +https 443/tcp # http protocol over TLS/SSL +tftp 69/udp` + +func TestReadServicesFile(t *testing.T) { + readServicesFile() + require.NotZero(t, len(services)) +} + +func TestFakeServices(t *testing.T) { + r := strings.NewReader(fakeServices) + m := readServices(r) + require.Equal(t, sMap{"tcp": {80: "http", 443: "https"}, "udp": {69: "tftp"}}, m) +} + +func TestTable(t *testing.T) { + var tests = []struct { + name string + tag string + dest string + prot string + input []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "ordinary tcp default", + tag: "port", + dest: "service", + prot: "tcp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "443", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "443", + "service": "https", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "force udp default", + tag: "port", + dest: "service", + prot: "udp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "69", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "69", + "service": "tftp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "override default protocol", + tag: "port", + dest: "service", + prot: "foobar", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80/tcp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80/tcp", + "service": "http", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "multiple metrics, multiple protocols", + tag: "port", + dest: "service", + prot: "tcp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + testutil.MustMetric( + "meas", + map[string]string{ + "port": "69/udp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80", + "service": "http", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + testutil.MustMetric( + "meas", + map[string]string{ + "port": "69/udp", + "service": "tftp", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "rename source and destination tags", + tag: "foo", + dest: "bar", + prot: "tcp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "foo": "80", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "foo": "80", + "bar": "http", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "unknown port", + tag: "port", + dest: "service", + prot: "tcp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "9999", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "9999", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + { + name: "don't mix up protocols", + tag: "port", + dest: "service", + prot: "udp", + input: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "meas", + map[string]string{ + "port": "80", + }, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + }, + } + + r := strings.NewReader(fakeServices) + services = readServices(r) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + p := PortName{ + SourceTag: tt.tag, + DestTag: tt.dest, + DefaultProtocol: tt.prot, + Log: testutil.Logger{}, + } + + actual := p.Apply(tt.input...) + + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} diff --git a/plugins/processors/port_name/services_path.go b/plugins/processors/port_name/services_path.go new file mode 100644 index 000000000..c8cf73d14 --- /dev/null +++ b/plugins/processors/port_name/services_path.go @@ -0,0 +1,12 @@ +// +build windows + +package portname + +import ( + "os" + "path/filepath" +) + +func servicesPath() string { + return filepath.Join(os.Getenv("WINDIR"), `system32\drivers\etc\services`) +} diff --git a/plugins/processors/port_name/services_path_notwindows.go b/plugins/processors/port_name/services_path_notwindows.go new file mode 100644 index 000000000..5097bfa9c --- /dev/null +++ b/plugins/processors/port_name/services_path_notwindows.go @@ -0,0 +1,7 @@ +// +build !windows + +package portname + +func servicesPath() string { + return "/etc/services" +} diff --git a/plugins/processors/printer/printer.go b/plugins/processors/printer/printer.go index 363e9a21d..ead3e8ece 100644 --- a/plugins/processors/printer/printer.go +++ b/plugins/processors/printer/printer.go @@ -30,7 +30,7 @@ func (p *Printer) Apply(in ...telegraf.Metric) []telegraf.Metric { if err != nil { continue } - fmt.Println(octets) + fmt.Printf("%s", octets) } return in } diff --git a/plugins/processors/regex/README.md b/plugins/processors/regex/README.md index c9eec037b..a6cef82a0 100644 --- a/plugins/processors/regex/README.md +++ b/plugins/processors/regex/README.md @@ -2,6 +2,8 @@ The `regex` plugin transforms tag and field values with regex pattern. If `result_key` parameter is present, it can produce new tags and fields from existing ones. +For tags transforms, if `append` is set to `true`, it will append the transformation to the existing tag value, instead of overwriting it. + ### Configuration: ```toml @@ -14,10 +16,12 @@ The `regex` plugin transforms tag and field values with regex pattern. If `resul key = "resp_code" ## Regular expression to match on a tag value pattern = "^(\\d)\\d\\d$" - ## Pattern for constructing a new value (${1} represents first subgroup) + ## Matches of the pattern will be replaced with this string. Use ${1} + ## notation to use the text of the first submatch. replacement = "${1}xx" [[processors.regex.fields]] + ## Field to change key = "request" ## All the power of the Go regular expressions available here ## For example, named subgroups diff --git a/plugins/processors/regex/regex.go b/plugins/processors/regex/regex.go index f73ed06b6..47b53546f 100644 --- a/plugins/processors/regex/regex.go +++ b/plugins/processors/regex/regex.go @@ -18,6 +18,7 @@ type converter struct { Pattern string Replacement string ResultKey string + Append bool } const sampleConfig = ` @@ -27,14 +28,16 @@ const sampleConfig = ` # key = "resp_code" # ## Regular expression to match on a tag value # pattern = "^(\\d)\\d\\d$" - # ## Pattern for constructing a new value (${1} represents first subgroup) + # ## Matches of the pattern will be replaced with this string. Use ${1} + # ## notation to use the text of the first submatch. # replacement = "${1}xx" # [[processors.regex.fields]] + # ## Field to change # key = "request" # ## All the power of the Go regular expressions available here # ## For example, named subgroups - # pattern = "^/api(?P/[\\w/]+)\\S*" + # pattern = "^/api(?P/[\\w/]+)\\S*" # replacement = "${method}" # ## If result_key is present, a new field will be created # ## instead of changing existing field @@ -68,6 +71,11 @@ func (r *Regex) Apply(in ...telegraf.Metric) []telegraf.Metric { for _, converter := range r.Tags { if value, ok := metric.GetTag(converter.Key); ok { if key, newValue := r.convert(converter, value); newValue != "" { + if converter.Append { + if v, ok := metric.GetTag(key); ok { + newValue = v + newValue + } + } metric.AddTag(key, newValue) } } diff --git a/plugins/processors/regex/regex_test.go b/plugins/processors/regex/regex_test.go index f16ef7f5c..b0ddf47d0 100644 --- a/plugins/processors/regex/regex_test.go +++ b/plugins/processors/regex/regex_test.go @@ -108,6 +108,20 @@ func TestTagConversions(t *testing.T) { "resp_code": "2xx", }, }, + { + message: "Should append to existing tag", + converter: converter{ + Key: "verb", + Pattern: "^(.*)$", + Replacement: " (${1})", + ResultKey: "resp_code", + Append: true, + }, + expectedTags: map[string]string{ + "verb": "GET", + "resp_code": "200 (GET)", + }, + }, { message: "Should add new tag", converter: converter{ diff --git a/plugins/processors/registry.go b/plugins/processors/registry.go index 592c688f3..efade2966 100644 --- a/plugins/processors/registry.go +++ b/plugins/processors/registry.go @@ -3,9 +3,24 @@ package processors import "github.com/influxdata/telegraf" type Creator func() telegraf.Processor +type StreamingCreator func() telegraf.StreamingProcessor -var Processors = map[string]Creator{} +// all processors are streaming processors. +// telegraf.Processor processors are upgraded to telegraf.StreamingProcessor +var Processors = map[string]StreamingCreator{} +// Add adds a telegraf.Processor processor func Add(name string, creator Creator) { + Processors[name] = upgradeToStreamingProcessor(creator) +} + +// AddStreaming adds a telegraf.StreamingProcessor streaming processor +func AddStreaming(name string, creator StreamingCreator) { Processors[name] = creator } + +func upgradeToStreamingProcessor(oldCreator Creator) StreamingCreator { + return func() telegraf.StreamingProcessor { + return NewStreamingProcessorFromProcessor(oldCreator()) + } +} diff --git a/plugins/processors/rename/README.md b/plugins/processors/rename/README.md new file mode 100644 index 000000000..cc3c61a94 --- /dev/null +++ b/plugins/processors/rename/README.md @@ -0,0 +1,36 @@ +# Rename Processor Plugin + +The `rename` processor renames measurements, fields, and tags. + +### Configuration: + +```toml +[[processors.rename]] + ## Specify one sub-table per rename operation. + [[processors.rename.replace]] + measurement = "network_interface_throughput" + dest = "throughput" + + [[processors.rename.replace]] + tag = "hostname" + dest = "host" + + [[processors.rename.replace]] + field = "lower" + dest = "min" + + [[processors.rename.replace]] + field = "upper" + dest = "max" +``` + +### Tags: + +No tags are applied by this processor, though it can alter them by renaming. + +### Example processing: + +```diff +- network_interface_throughput,hostname=backend.example.com lower=10i,upper=1000i,mean=500i 1502489900000000000 ++ throughput,host=backend.example.com min=10i,max=1000i,mean=500i 1502489900000000000 +``` diff --git a/plugins/processors/rename/rename.go b/plugins/processors/rename/rename.go new file mode 100644 index 000000000..acb6d2ccc --- /dev/null +++ b/plugins/processors/rename/rename.go @@ -0,0 +1,69 @@ +package rename + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +const sampleConfig = ` +` + +type Replace struct { + Measurement string `toml:"measurement"` + Tag string `toml:"tag"` + Field string `toml:"field"` + Dest string `toml:"dest"` +} + +type Rename struct { + Replaces []Replace `toml:"replace"` +} + +func (r *Rename) SampleConfig() string { + return sampleConfig +} + +func (r *Rename) Description() string { + return "Rename measurements, tags, and fields that pass through this filter." +} + +func (r *Rename) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, point := range in { + for _, replace := range r.Replaces { + if replace.Dest == "" { + continue + } + + if replace.Measurement != "" { + if value := point.Name(); value == replace.Measurement { + point.SetName(replace.Dest) + } + continue + } + + if replace.Tag != "" { + if value, ok := point.GetTag(replace.Tag); ok { + point.RemoveTag(replace.Tag) + point.AddTag(replace.Dest, value) + } + continue + } + + if replace.Field != "" { + if value, ok := point.GetField(replace.Field); ok { + point.RemoveField(replace.Field) + point.AddField(replace.Dest, value) + } + continue + } + } + } + + return in +} + +func init() { + processors.Add("rename", func() telegraf.Processor { + return &Rename{} + }) +} diff --git a/plugins/processors/rename/rename_test.go b/plugins/processors/rename/rename_test.go new file mode 100644 index 000000000..1f8e0b7db --- /dev/null +++ b/plugins/processors/rename/rename_test.go @@ -0,0 +1,61 @@ +package rename + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func newMetric(name string, tags map[string]string, fields map[string]interface{}) telegraf.Metric { + if tags == nil { + tags = map[string]string{} + } + if fields == nil { + fields = map[string]interface{}{} + } + m, _ := metric.New(name, tags, fields, time.Now()) + return m +} + +func TestMeasurementRename(t *testing.T) { + r := Rename{ + Replaces: []Replace{ + {Measurement: "foo", Dest: "bar"}, + {Measurement: "baz", Dest: "quux"}, + }, + } + m1 := newMetric("foo", nil, nil) + m2 := newMetric("bar", nil, nil) + m3 := newMetric("baz", nil, nil) + results := r.Apply(m1, m2, m3) + assert.Equal(t, "bar", results[0].Name(), "Should change name from 'foo' to 'bar'") + assert.Equal(t, "bar", results[1].Name(), "Should not name from 'bar'") + assert.Equal(t, "quux", results[2].Name(), "Should change name from 'baz' to 'quux'") +} + +func TestTagRename(t *testing.T) { + r := Rename{ + Replaces: []Replace{ + {Tag: "hostname", Dest: "host"}, + }, + } + m := newMetric("foo", map[string]string{"hostname": "localhost", "region": "east-1"}, nil) + results := r.Apply(m) + + assert.Equal(t, map[string]string{"host": "localhost", "region": "east-1"}, results[0].Tags(), "should change tag 'hostname' to 'host'") +} + +func TestFieldRename(t *testing.T) { + r := Rename{ + Replaces: []Replace{ + {Field: "time_msec", Dest: "time"}, + }, + } + m := newMetric("foo", nil, map[string]interface{}{"time_msec": int64(1250), "snakes": true}) + results := r.Apply(m) + + assert.Equal(t, map[string]interface{}{"time": int64(1250), "snakes": true}, results[0].Fields(), "should change field 'time_msec' to 'time'") +} diff --git a/plugins/processors/s2geo/README.md b/plugins/processors/s2geo/README.md new file mode 100644 index 000000000..d48947fe6 --- /dev/null +++ b/plugins/processors/s2geo/README.md @@ -0,0 +1,30 @@ +# S2 Geo Processor Plugin + +Use the `s2geo` processor to add tag with S2 cell ID token of specified [cell level][cell levels]. +The tag is used in `experimental/geo` Flux package functions. +The `lat` and `lon` fields values should contain WGS-84 coordinates in decimal degrees. + +### Configuration + +```toml +[[processors.s2geo]] + ## The name of the lat and lon fields containing WGS-84 latitude and + ## longitude in decimal degrees. + # lat_field = "lat" + # lon_field = "lon" + + ## New tag to create + # tag_key = "s2_cell_id" + + ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) + # cell_level = 9 +``` + +### Example + +```diff +- mta,area=llir,id=GO505_20_2704,status=1 lat=40.878738,lon=-72.517572 1560540094 ++ mta,area=llir,id=GO505_20_2704,status=1,s2_cell_id=89e8ed4 lat=40.878738,lon=-72.517572 1560540094 +``` + +[cell levels]: https://s2geometry.io/resources/s2cell_statistics.html diff --git a/plugins/processors/s2geo/s2geo.go b/plugins/processors/s2geo/s2geo.go new file mode 100644 index 000000000..5376a6657 --- /dev/null +++ b/plugins/processors/s2geo/s2geo.go @@ -0,0 +1,78 @@ +package geo + +import ( + "fmt" + + "github.com/golang/geo/s2" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Geo struct { + LatField string `toml:"lat_field"` + LonField string `toml:"lon_field"` + TagKey string `toml:"tag_key"` + CellLevel int `toml:"cell_level"` +} + +var SampleConfig = ` + ## The name of the lat and lon fields containing WGS-84 latitude and + ## longitude in decimal degrees. + # lat_field = "lat" + # lon_field = "lon" + + ## New tag to create + # tag_key = "s2_cell_id" + + ## Cell level (see https://s2geometry.io/resources/s2cell_statistics.html) + # cell_level = 9 +` + +func (g *Geo) SampleConfig() string { + return SampleConfig +} + +func (g *Geo) Description() string { + return "Add the S2 Cell ID as a tag based on latitude and longitude fields" +} + +func (g *Geo) Init() error { + if g.CellLevel < 0 || g.CellLevel > 30 { + return fmt.Errorf("invalid cell level %d", g.CellLevel) + } + return nil +} + +func (g *Geo) Apply(in ...telegraf.Metric) []telegraf.Metric { + for _, point := range in { + var latOk, lonOk bool + var lat, lon float64 + for _, field := range point.FieldList() { + switch field.Key { + case g.LatField: + lat, latOk = field.Value.(float64) + case g.LonField: + lon, lonOk = field.Value.(float64) + } + } + if latOk && lonOk { + cellID := s2.CellIDFromLatLng(s2.LatLngFromDegrees(lat, lon)) + if cellID.IsValid() { + value := cellID.Parent(g.CellLevel).ToToken() + point.AddTag(g.TagKey, value) + } + } + } + return in +} + +func init() { + processors.Add("s2geo", func() telegraf.Processor { + return &Geo{ + LatField: "lat", + LonField: "lon", + TagKey: "s2_cell_id", + CellLevel: 9, + } + }) +} diff --git a/plugins/processors/s2geo/s2geo_test.go b/plugins/processors/s2geo/s2geo_test.go new file mode 100644 index 000000000..b06a1a06d --- /dev/null +++ b/plugins/processors/s2geo/s2geo_test.go @@ -0,0 +1,55 @@ +package geo + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestGeo(t *testing.T) { + plugin := &Geo{ + LatField: "lat", + LonField: "lon", + TagKey: "s2_cell_id", + CellLevel: 11, + } + + pluginMostlyDefault := &Geo{ + CellLevel: 11, + } + + err := plugin.Init() + require.NoError(t, err) + + metric := testutil.MustMetric( + "mta", + map[string]string{}, + map[string]interface{}{ + "lat": 40.878738, + "lon": -72.517572, + }, + time.Unix(1578603600, 0), + ) + + expected := []telegraf.Metric{ + testutil.MustMetric( + "mta", + map[string]string{ + "s2_cell_id": "89e8ed4", + }, + map[string]interface{}{ + "lat": 40.878738, + "lon": -72.517572, + }, + time.Unix(1578603600, 0), + ), + } + + actual := plugin.Apply(metric) + testutil.RequireMetricsEqual(t, expected, actual) + actual = pluginMostlyDefault.Apply(metric) + testutil.RequireMetricsEqual(t, expected, actual) +} diff --git a/plugins/processors/streamingprocessor.go b/plugins/processors/streamingprocessor.go new file mode 100644 index 000000000..4078ac26c --- /dev/null +++ b/plugins/processors/streamingprocessor.go @@ -0,0 +1,49 @@ +package processors + +import ( + "github.com/influxdata/telegraf" +) + +// NewStreamingProcessorFromProcessor is a converter that turns a standard +// processor into a streaming processor +func NewStreamingProcessorFromProcessor(p telegraf.Processor) telegraf.StreamingProcessor { + sp := &streamingProcessor{ + processor: p, + } + return sp +} + +type streamingProcessor struct { + processor telegraf.Processor + acc telegraf.Accumulator +} + +func (sp *streamingProcessor) SampleConfig() string { + return sp.processor.SampleConfig() +} + +func (sp *streamingProcessor) Description() string { + return sp.processor.Description() +} + +func (sp *streamingProcessor) Start(acc telegraf.Accumulator) error { + sp.acc = acc + return nil +} + +func (sp *streamingProcessor) Add(m telegraf.Metric, acc telegraf.Accumulator) { + for _, m := range sp.processor.Apply(m) { + acc.AddMetric(m) + } +} + +func (sp *streamingProcessor) Stop() error { + return nil +} + +// Unwrap lets you retrieve the original telegraf.Processor from the +// StreamingProcessor. This is necessary because the toml Unmarshaller won't +// look inside composed types. +func (sp *streamingProcessor) Unwrap() telegraf.Processor { + return sp.processor +} diff --git a/plugins/processors/strings/README.md b/plugins/processors/strings/README.md new file mode 100644 index 000000000..a7aa0e2a5 --- /dev/null +++ b/plugins/processors/strings/README.md @@ -0,0 +1,149 @@ +# Strings Processor Plugin + +The `strings` plugin maps certain go string functions onto measurement, tag, and field values. Values can be modified in place or stored in another key. + +Implemented functions are: +- lowercase +- uppercase +- titlecase +- trim +- trim_left +- trim_right +- trim_prefix +- trim_suffix +- replace +- left +- base64decode + +Please note that in this implementation these are processed in the order that they appear above. + +Specify the `measurement`, `tag`, `tag_key`, `field`, or `field_key` that you want processed in each section and optionally a `dest` if you want the result stored in a new tag or field. You can specify lots of transformations on data with a single strings processor. + +If you'd like to apply the change to every `tag`, `tag_key`, `field`, `field_key`, or `measurement`, use the value `"*"` for each respective field. Note that the `dest` field will be ignored if `"*"` is used. + +If you'd like to apply multiple processings to the same `tag_key` or `field_key`, note the process order stated above. See [Example 2]() for an example. + +### Configuration: + +```toml +[[processors.strings]] + ## Convert a field value to lowercase and store in a new field + # [[processors.strings.lowercase]] + # field = "uri_stem" + # dest = "uri_stem_normalised" + + ## Convert a tag value to uppercase + # [[processors.strings.uppercase]] + # tag = "method" + + ## Convert a field value to titlecase + # [[processors.strings.titlecase]] + # field = "status" + + ## Trim leading and trailing whitespace using the default cutset + # [[processors.strings.trim]] + # field = "message" + + ## Trim leading characters in cutset + # [[processors.strings.trim_left]] + # field = "message" + # cutset = "\t" + + ## Trim trailing characters in cutset + # [[processors.strings.trim_right]] + # field = "message" + # cutset = "\r\n" + + ## Trim the given prefix from the field + # [[processors.strings.trim_prefix]] + # field = "my_value" + # prefix = "my_" + + ## Trim the given suffix from the field + # [[processors.strings.trim_suffix]] + # field = "read_count" + # suffix = "_count" + + ## Replace all non-overlapping instances of old with new + # [[processors.strings.replace]] + # measurement = "*" + # old = ":" + # new = "_" + + ## Trims strings based on width + # [[processors.strings.left]] + # field = "message" + # width = 10 + + ## Decode a base64 encoded utf-8 string + # [[processors.strings.base64decode]] + # field = "message" +``` + +#### Trim, TrimLeft, TrimRight + +The `trim`, `trim_left`, and `trim_right` functions take an optional parameter: `cutset`. This value is a string containing the characters to remove from the value. + +#### TrimPrefix, TrimSuffix + +The `trim_prefix` and `trim_suffix` functions remote the given `prefix` or `suffix` +respectively from the string. + +#### Replace + +The `replace` function does a substring replacement across the entire +string to allow for different conventions between various input and output +plugins. Some example usages are eliminating disallowed characters in +field names or replacing separators between different separators. +Can also be used to eliminate unneeded chars that were in metrics. +If the entire name would be deleted, it will refuse to perform +the operation and keep the old name. + +### Example +**Config** +```toml +[[processors.strings]] + [[processors.strings.lowercase]] + tag = "uri_stem" + + [[processors.strings.trim_prefix]] + tag = "uri_stem" + prefix = "/api/" + + [[processors.strings.uppercase]] + field = "cs-host" + dest = "cs-host_normalised" +``` + +**Input** +``` +iis_log,method=get,uri_stem=/API/HealthCheck cs-host="MIXEDCASE_host",http_version=1.1 1519652321000000000 +``` + +**Output** +``` +iis_log,method=get,uri_stem=healthcheck cs-host="MIXEDCASE_host",http_version=1.1,cs-host_normalised="MIXEDCASE_HOST" 1519652321000000000 +``` + +### Example 2 +**Config** +```toml +[[processors.strings]] + [[processors.strings.lowercase]] + tag_key = "URI-Stem" + + [[processors.strings.replace]] + tag_key = "uri-stem" + old = "-" + new = "_" +``` + +**Input** +``` +iis_log,URI-Stem=/API/HealthCheck http_version=1.1 1519652321000000000 +``` + +**Output** +``` +iis_log,uri_stem=/API/HealthCheck http_version=1.1 1519652321000000000 +``` diff --git a/plugins/processors/strings/strings.go b/plugins/processors/strings/strings.go new file mode 100644 index 000000000..1ac6c6101 --- /dev/null +++ b/plugins/processors/strings/strings.go @@ -0,0 +1,341 @@ +package strings + +import ( + "encoding/base64" + "strings" + "unicode" + "unicode/utf8" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +type Strings struct { + Lowercase []converter `toml:"lowercase"` + Uppercase []converter `toml:"uppercase"` + Titlecase []converter `toml:"titlecase"` + Trim []converter `toml:"trim"` + TrimLeft []converter `toml:"trim_left"` + TrimRight []converter `toml:"trim_right"` + TrimPrefix []converter `toml:"trim_prefix"` + TrimSuffix []converter `toml:"trim_suffix"` + Replace []converter `toml:"replace"` + Left []converter `toml:"left"` + Base64Decode []converter `toml:"base64decode"` + + converters []converter + init bool +} + +type ConvertFunc func(s string) string + +type converter struct { + Field string + FieldKey string + Tag string + TagKey string + Measurement string + Dest string + Cutset string + Suffix string + Prefix string + Old string + New string + Width int + + fn ConvertFunc +} + +const sampleConfig = ` + ## Convert a tag value to uppercase + # [[processors.strings.uppercase]] + # tag = "method" + + ## Convert a field value to lowercase and store in a new field + # [[processors.strings.lowercase]] + # field = "uri_stem" + # dest = "uri_stem_normalised" + + ## Convert a field value to titlecase + # [[processors.strings.titlecase]] + # field = "status" + + ## Trim leading and trailing whitespace using the default cutset + # [[processors.strings.trim]] + # field = "message" + + ## Trim leading characters in cutset + # [[processors.strings.trim_left]] + # field = "message" + # cutset = "\t" + + ## Trim trailing characters in cutset + # [[processors.strings.trim_right]] + # field = "message" + # cutset = "\r\n" + + ## Trim the given prefix from the field + # [[processors.strings.trim_prefix]] + # field = "my_value" + # prefix = "my_" + + ## Trim the given suffix from the field + # [[processors.strings.trim_suffix]] + # field = "read_count" + # suffix = "_count" + + ## Replace all non-overlapping instances of old with new + # [[processors.strings.replace]] + # measurement = "*" + # old = ":" + # new = "_" + + ## Trims strings based on width + # [[processors.strings.left]] + # field = "message" + # width = 10 + + ## Decode a base64 encoded utf-8 string + # [[processors.strings.base64decode]] + # field = "message" +` + +func (s *Strings) SampleConfig() string { + return sampleConfig +} + +func (s *Strings) Description() string { + return "Perform string processing on tags, fields, and measurements" +} + +func (c *converter) convertTag(metric telegraf.Metric) { + var tags map[string]string + if c.Tag == "*" { + tags = metric.Tags() + } else { + tags = make(map[string]string) + tv, ok := metric.GetTag(c.Tag) + if !ok { + return + } + tags[c.Tag] = tv + } + + for key, value := range tags { + dest := key + if c.Tag != "*" && c.Dest != "" { + dest = c.Dest + } + metric.AddTag(dest, c.fn(value)) + } +} + +func (c *converter) convertTagKey(metric telegraf.Metric) { + var tags map[string]string + if c.TagKey == "*" { + tags = metric.Tags() + } else { + tags = make(map[string]string) + tv, ok := metric.GetTag(c.TagKey) + if !ok { + return + } + tags[c.TagKey] = tv + } + + for key, value := range tags { + if k := c.fn(key); k != "" { + metric.RemoveTag(key) + metric.AddTag(k, value) + } + } +} + +func (c *converter) convertField(metric telegraf.Metric) { + var fields map[string]interface{} + if c.Field == "*" { + fields = metric.Fields() + } else { + fields = make(map[string]interface{}) + fv, ok := metric.GetField(c.Field) + if !ok { + return + } + fields[c.Field] = fv + } + + for key, value := range fields { + dest := key + if c.Field != "*" && c.Dest != "" { + dest = c.Dest + } + if fv, ok := value.(string); ok { + metric.AddField(dest, c.fn(fv)) + } + } +} + +func (c *converter) convertFieldKey(metric telegraf.Metric) { + var fields map[string]interface{} + if c.FieldKey == "*" { + fields = metric.Fields() + } else { + fields = make(map[string]interface{}) + fv, ok := metric.GetField(c.FieldKey) + if !ok { + return + } + fields[c.FieldKey] = fv + } + + for key, value := range fields { + if k := c.fn(key); k != "" { + metric.RemoveField(key) + metric.AddField(k, value) + } + } +} + +func (c *converter) convertMeasurement(metric telegraf.Metric) { + if metric.Name() != c.Measurement && c.Measurement != "*" { + return + } + + metric.SetName(c.fn(metric.Name())) +} + +func (c *converter) convert(metric telegraf.Metric) { + if c.Field != "" { + c.convertField(metric) + } + + if c.FieldKey != "" { + c.convertFieldKey(metric) + } + + if c.Tag != "" { + c.convertTag(metric) + } + + if c.TagKey != "" { + c.convertTagKey(metric) + } + + if c.Measurement != "" { + c.convertMeasurement(metric) + } +} + +func (s *Strings) initOnce() { + if s.init { + return + } + + s.converters = make([]converter, 0) + for _, c := range s.Lowercase { + c.fn = strings.ToLower + s.converters = append(s.converters, c) + } + for _, c := range s.Uppercase { + c.fn = strings.ToUpper + s.converters = append(s.converters, c) + } + for _, c := range s.Titlecase { + c.fn = strings.Title + s.converters = append(s.converters, c) + } + for _, c := range s.Trim { + c := c + if c.Cutset != "" { + c.fn = func(s string) string { return strings.Trim(s, c.Cutset) } + } else { + c.fn = func(s string) string { return strings.TrimFunc(s, unicode.IsSpace) } + } + s.converters = append(s.converters, c) + } + for _, c := range s.TrimLeft { + c := c + if c.Cutset != "" { + c.fn = func(s string) string { return strings.TrimLeft(s, c.Cutset) } + } else { + c.fn = func(s string) string { return strings.TrimLeftFunc(s, unicode.IsSpace) } + } + s.converters = append(s.converters, c) + } + for _, c := range s.TrimRight { + c := c + if c.Cutset != "" { + c.fn = func(s string) string { return strings.TrimRight(s, c.Cutset) } + } else { + c.fn = func(s string) string { return strings.TrimRightFunc(s, unicode.IsSpace) } + } + s.converters = append(s.converters, c) + } + for _, c := range s.TrimPrefix { + c := c + c.fn = func(s string) string { return strings.TrimPrefix(s, c.Prefix) } + s.converters = append(s.converters, c) + } + for _, c := range s.TrimSuffix { + c := c + c.fn = func(s string) string { return strings.TrimSuffix(s, c.Suffix) } + s.converters = append(s.converters, c) + } + for _, c := range s.Replace { + c := c + c.fn = func(s string) string { + newString := strings.Replace(s, c.Old, c.New, -1) + if newString == "" { + return s + } else { + return newString + } + } + s.converters = append(s.converters, c) + } + for _, c := range s.Left { + c := c + c.fn = func(s string) string { + if len(s) < c.Width { + return s + } else { + return s[:c.Width] + } + } + s.converters = append(s.converters, c) + } + for _, c := range s.Base64Decode { + c := c + c.fn = func(s string) string { + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return s + } + if utf8.Valid(data) { + return string(data) + } + return s + } + s.converters = append(s.converters, c) + } + + s.init = true +} + +func (s *Strings) Apply(in ...telegraf.Metric) []telegraf.Metric { + s.initOnce() + + for _, metric := range in { + for _, converter := range s.converters { + converter.convert(metric) + } + } + + return in +} + +func init() { + processors.Add("strings", func() telegraf.Processor { + return &Strings{} + }) +} diff --git a/plugins/processors/strings/strings_test.go b/plugins/processors/strings/strings_test.go new file mode 100644 index 000000000..2c1be510e --- /dev/null +++ b/plugins/processors/strings/strings_test.go @@ -0,0 +1,1049 @@ +package strings + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func newM1() telegraf.Metric { + m1, _ := metric.New("IIS_log", + map[string]string{ + "verb": "GET", + "s-computername": "MIXEDCASE_hostname", + }, + map[string]interface{}{ + "request": "/mixed/CASE/paTH/?from=-1D&to=now", + "whitespace": " whitespace\t", + }, + time.Now(), + ) + return m1 +} + +func newM2() telegraf.Metric { + m1, _ := metric.New("IIS_log", + map[string]string{ + "verb": "GET", + "S-ComputerName": "MIXEDCASE_hostname", + }, + map[string]interface{}{ + "Request": "/mixed/CASE/paTH/?from=-1D&to=now", + "req/sec": 5, + " whitespace ": " whitespace\t", + }, + time.Now(), + ) + return m1 +} + +func TestFieldConversions(t *testing.T) { + tests := []struct { + name string + plugin *Strings + check func(t *testing.T, actual telegraf.Metric) + }{ + { + name: "Should change existing field to lowercase", + plugin: &Strings{ + Lowercase: []converter{ + { + Field: "request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/case/path/?from=-1d&to=now", fv) + }, + }, + { + name: "Should change existing field to uppercase", + plugin: &Strings{ + Uppercase: []converter{ + { + Field: "request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/MIXED/CASE/PATH/?FROM=-1D&TO=NOW", fv) + }, + }, + { + name: "Should change existing field to titlecase", + plugin: &Strings{ + Titlecase: []converter{ + { + Field: "request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/Mixed/CASE/PaTH/?From=-1D&To=Now", fv) + }, + }, + { + name: "Should add new lowercase field", + plugin: &Strings{ + Lowercase: []converter{ + { + Field: "request", + Dest: "lowercase_request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + + fv, ok = actual.GetField("lowercase_request") + require.True(t, ok) + require.Equal(t, "/mixed/case/path/?from=-1d&to=now", fv) + }, + }, + { + name: "Should trim from both sides", + plugin: &Strings{ + Trim: []converter{ + { + Field: "request", + Cutset: "/w", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "mixed/CASE/paTH/?from=-1D&to=no", fv) + }, + }, + { + name: "Should trim from both sides and make lowercase", + plugin: &Strings{ + Trim: []converter{ + { + Field: "request", + Cutset: "/w", + }, + }, + Lowercase: []converter{ + { + Field: "request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "mixed/case/path/?from=-1d&to=no", fv) + }, + }, + { + name: "Should trim from left side", + plugin: &Strings{ + TrimLeft: []converter{ + { + Field: "request", + Cutset: "/w", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim from right side", + plugin: &Strings{ + TrimRight: []converter{ + { + Field: "request", + Cutset: "/w", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=no", fv) + }, + }, + { + name: "Should trim prefix '/mixed'", + plugin: &Strings{ + TrimPrefix: []converter{ + { + Field: "request", + Prefix: "/mixed", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim suffix '-1D&to=now'", + plugin: &Strings{ + TrimSuffix: []converter{ + { + Field: "request", + Suffix: "-1D&to=now", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=", fv) + }, + }, + { + name: "Trim without cutset removes whitespace", + plugin: &Strings{ + Trim: []converter{ + { + Field: "whitespace", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("whitespace") + require.True(t, ok) + require.Equal(t, "whitespace", fv) + }, + }, + { + name: "Trim left without cutset removes whitespace", + plugin: &Strings{ + TrimLeft: []converter{ + { + Field: "whitespace", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("whitespace") + require.True(t, ok) + require.Equal(t, "whitespace\t", fv) + }, + }, + { + name: "Trim right without cutset removes whitespace", + plugin: &Strings{ + TrimRight: []converter{ + { + Field: "whitespace", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("whitespace") + require.True(t, ok) + require.Equal(t, " whitespace", fv) + }, + }, + { + name: "No change if field missing", + plugin: &Strings{ + Lowercase: []converter{ + { + Field: "xyzzy", + Suffix: "-1D&to=now", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := tt.plugin.Apply(newM1()) + require.Len(t, metrics, 1) + tt.check(t, metrics[0]) + }) + } +} + +func TestFieldKeyConversions(t *testing.T) { + tests := []struct { + name string + plugin *Strings + check func(t *testing.T, actual telegraf.Metric) + }{ + { + name: "Should change existing field key to lowercase", + plugin: &Strings{ + Lowercase: []converter{ + { + FieldKey: "Request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should change existing field key to uppercase", + plugin: &Strings{ + Uppercase: []converter{ + { + FieldKey: "Request", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("Request") + require.False(t, ok) + + fv, ok = actual.GetField("REQUEST") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim from both sides", + plugin: &Strings{ + Trim: []converter{ + { + FieldKey: "Request", + Cutset: "eR", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("quest") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim from both sides but not make lowercase", + plugin: &Strings{ + // Tag/field key multiple executions occur in the following order: (initOnce) + // Lowercase + // Uppercase + // Titlecase + // Trim + // TrimLeft + // TrimRight + // TrimPrefix + // TrimSuffix + // Replace + Lowercase: []converter{ + { + FieldKey: "Request", + }, + }, + Trim: []converter{ + { + FieldKey: "request", + Cutset: "tse", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("requ") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim from left side", + plugin: &Strings{ + TrimLeft: []converter{ + { + FieldKey: "req/sec", + Cutset: "req/", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("sec") + require.True(t, ok) + require.Equal(t, int64(5), fv) + }, + }, + { + name: "Should trim from right side", + plugin: &Strings{ + TrimRight: []converter{ + { + FieldKey: "req/sec", + Cutset: "req/", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("req/sec") + require.True(t, ok) + require.Equal(t, int64(5), fv) + }, + }, + { + name: "Should trim prefix 'req/'", + plugin: &Strings{ + TrimPrefix: []converter{ + { + FieldKey: "req/sec", + Prefix: "req/", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("sec") + require.True(t, ok) + require.Equal(t, int64(5), fv) + }, + }, + { + name: "Should trim suffix '/sec'", + plugin: &Strings{ + TrimSuffix: []converter{ + { + FieldKey: "req/sec", + Suffix: "/sec", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("req") + require.True(t, ok) + require.Equal(t, int64(5), fv) + }, + }, + { + name: "Trim without cutset removes whitespace", + plugin: &Strings{ + Trim: []converter{ + { + FieldKey: " whitespace ", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("whitespace") + require.True(t, ok) + require.Equal(t, " whitespace\t", fv) + }, + }, + { + name: "Trim left without cutset removes whitespace", + plugin: &Strings{ + TrimLeft: []converter{ + { + FieldKey: " whitespace ", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("whitespace ") + require.True(t, ok) + require.Equal(t, " whitespace\t", fv) + }, + }, + { + name: "Trim right without cutset removes whitespace", + plugin: &Strings{ + TrimRight: []converter{ + { + FieldKey: " whitespace ", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField(" whitespace") + require.True(t, ok) + require.Equal(t, " whitespace\t", fv) + }, + }, + { + name: "No change if field missing", + plugin: &Strings{ + Lowercase: []converter{ + { + FieldKey: "xyzzy", + Suffix: "-1D&to=now", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("Request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + { + name: "Should trim the existing field to 6 characters", + plugin: &Strings{ + Left: []converter{ + { + Field: "Request", + Width: 6, + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("Request") + require.True(t, ok) + require.Equal(t, "/mixed", fv) + }, + }, + { + name: "Should do nothing to the string", + plugin: &Strings{ + Left: []converter{ + { + Field: "Request", + Width: 600, + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + fv, ok := actual.GetField("Request") + require.True(t, ok) + require.Equal(t, "/mixed/CASE/paTH/?from=-1D&to=now", fv) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := tt.plugin.Apply(newM2()) + require.Len(t, metrics, 1) + tt.check(t, metrics[0]) + }) + } +} + +func TestTagConversions(t *testing.T) { + tests := []struct { + name string + plugin *Strings + check func(t *testing.T, actual telegraf.Metric) + }{ + { + name: "Should change existing tag to lowercase", + plugin: &Strings{ + Lowercase: []converter{ + { + Tag: "s-computername", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "mixedcase_hostname", tv) + }, + }, + { + name: "Should add new lowercase tag", + plugin: &Strings{ + Lowercase: []converter{ + { + Tag: "s-computername", + Dest: "s-computername_lowercase", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + + tv, ok = actual.GetTag("s-computername_lowercase") + require.True(t, ok) + require.Equal(t, "mixedcase_hostname", tv) + }, + }, + { + name: "Should add new uppercase tag", + plugin: &Strings{ + Uppercase: []converter{ + { + Tag: "s-computername", + Dest: "s-computername_uppercase", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + + tv, ok = actual.GetTag("s-computername_uppercase") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_HOSTNAME", tv) + }, + }, + { + name: "Should add new titlecase tag", + plugin: &Strings{ + Titlecase: []converter{ + { + Tag: "s-computername", + Dest: "s-computername_titlecase", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + + tv, ok = actual.GetTag("s-computername_titlecase") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := tt.plugin.Apply(newM1()) + require.Len(t, metrics, 1) + tt.check(t, metrics[0]) + }) + } +} + +func TestTagKeyConversions(t *testing.T) { + tests := []struct { + name string + plugin *Strings + check func(t *testing.T, actual telegraf.Metric) + }{ + { + name: "Should change existing tag key to lowercase", + plugin: &Strings{ + Lowercase: []converter{ + { + Tag: "S-ComputerName", + TagKey: "S-ComputerName", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "mixedcase_hostname", tv) + }, + }, + { + name: "Should add new lowercase tag key", + plugin: &Strings{ + Lowercase: []converter{ + { + TagKey: "S-ComputerName", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("S-ComputerName") + require.False(t, ok) + + tv, ok = actual.GetTag("s-computername") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + }, + }, + { + name: "Should add new uppercase tag key", + plugin: &Strings{ + Uppercase: []converter{ + { + TagKey: "S-ComputerName", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + tv, ok := actual.GetTag("verb") + require.True(t, ok) + require.Equal(t, "GET", tv) + + tv, ok = actual.GetTag("S-ComputerName") + require.False(t, ok) + + tv, ok = actual.GetTag("S-COMPUTERNAME") + require.True(t, ok) + require.Equal(t, "MIXEDCASE_hostname", tv) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := tt.plugin.Apply(newM2()) + require.Len(t, metrics, 1) + tt.check(t, metrics[0]) + }) + } +} + +func TestMeasurementConversions(t *testing.T) { + tests := []struct { + name string + plugin *Strings + check func(t *testing.T, actual telegraf.Metric) + }{ + { + name: "lowercase measurement", + plugin: &Strings{ + Lowercase: []converter{ + { + Measurement: "IIS_log", + }, + }, + }, + check: func(t *testing.T, actual telegraf.Metric) { + name := actual.Name() + require.Equal(t, "iis_log", name) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + metrics := tt.plugin.Apply(newM1()) + require.Len(t, metrics, 1) + tt.check(t, metrics[0]) + }) + } +} + +func TestMultipleConversions(t *testing.T) { + plugin := &Strings{ + Lowercase: []converter{ + { + Tag: "s-computername", + }, + { + Field: "request", + }, + { + Field: "cs-host", + Dest: "cs-host_lowercase", + }, + }, + Uppercase: []converter{ + { + Tag: "verb", + }, + }, + Titlecase: []converter{ + { + Field: "status", + }, + }, + Replace: []converter{ + { + Tag: "foo", + Old: "a", + New: "x", + }, + { + Tag: "bar", + Old: "b", + New: "y", + }, + }, + } + + m, _ := metric.New("IIS_log", + map[string]string{ + "verb": "GET", + "resp_code": "200", + "s-computername": "MIXEDCASE_hostname", + "foo": "a", + "bar": "b", + }, + map[string]interface{}{ + "request": "/mixed/CASE/paTH/?from=-1D&to=now", + "cs-host": "AAAbbb", + "ignore_number": int64(200), + "ignore_bool": true, + "status": "green", + }, + time.Now(), + ) + + processed := plugin.Apply(m) + + expectedFields := map[string]interface{}{ + "request": "/mixed/case/path/?from=-1d&to=now", + "ignore_number": int64(200), + "ignore_bool": true, + "cs-host": "AAAbbb", + "cs-host_lowercase": "aaabbb", + "status": "Green", + } + expectedTags := map[string]string{ + "verb": "GET", + "resp_code": "200", + "s-computername": "mixedcase_hostname", + "foo": "x", + "bar": "y", + } + + assert.Equal(t, expectedFields, processed[0].Fields()) + assert.Equal(t, expectedTags, processed[0].Tags()) +} + +func TestReadmeExample(t *testing.T) { + plugin := &Strings{ + Lowercase: []converter{ + { + Tag: "uri_stem", + }, + }, + TrimPrefix: []converter{ + { + Tag: "uri_stem", + Prefix: "/api/", + }, + }, + Uppercase: []converter{ + { + Field: "cs-host", + Dest: "cs-host_normalised", + }, + }, + } + + m, _ := metric.New("iis_log", + map[string]string{ + "verb": "get", + "uri_stem": "/API/HealthCheck", + }, + map[string]interface{}{ + "cs-host": "MIXEDCASE_host", + "referrer": "-", + "ident": "-", + "http_version": "1.1", + "agent": "UserAgent", + "resp_bytes": int64(270), + }, + time.Now(), + ) + + processed := plugin.Apply(m) + + expectedTags := map[string]string{ + "verb": "get", + "uri_stem": "healthcheck", + } + expectedFields := map[string]interface{}{ + "cs-host": "MIXEDCASE_host", + "cs-host_normalised": "MIXEDCASE_HOST", + "referrer": "-", + "ident": "-", + "http_version": "1.1", + "agent": "UserAgent", + "resp_bytes": int64(270), + } + + assert.Equal(t, expectedFields, processed[0].Fields()) + assert.Equal(t, expectedTags, processed[0].Tags()) +} + +func newMetric(name string) telegraf.Metric { + tags := map[string]string{} + fields := map[string]interface{}{} + m, _ := metric.New(name, tags, fields, time.Now()) + return m +} + +func TestMeasurementReplace(t *testing.T) { + plugin := &Strings{ + Replace: []converter{ + { + Old: "_", + New: "-", + Measurement: "*", + }, + }, + } + metrics := []telegraf.Metric{ + newMetric("foo:some_value:bar"), + newMetric("average:cpu:usage"), + newMetric("average_cpu_usage"), + } + results := plugin.Apply(metrics...) + assert.Equal(t, "foo:some-value:bar", results[0].Name(), "`_` was not changed to `-`") + assert.Equal(t, "average:cpu:usage", results[1].Name(), "Input name should have been unchanged") + assert.Equal(t, "average-cpu-usage", results[2].Name(), "All instances of `_` should have been changed to `-`") +} + +func TestMeasurementCharDeletion(t *testing.T) { + plugin := &Strings{ + Replace: []converter{ + { + Old: "foo", + New: "", + Measurement: "*", + }, + }, + } + metrics := []telegraf.Metric{ + newMetric("foo:bar:baz"), + newMetric("foofoofoo"), + newMetric("barbarbar"), + } + results := plugin.Apply(metrics...) + assert.Equal(t, ":bar:baz", results[0].Name(), "Should have deleted the initial `foo`") + assert.Equal(t, "foofoofoo", results[1].Name(), "Should have refused to delete the whole string") + assert.Equal(t, "barbarbar", results[2].Name(), "Should not have changed the input") +} + +func TestBase64Decode(t *testing.T) { + tests := []struct { + name string + plugin *Strings + metric []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "base64decode success", + plugin: &Strings{ + Base64Decode: []converter{ + { + Field: "message", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "aG93ZHk=", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "howdy", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "base64decode not valid base64 returns original string", + plugin: &Strings{ + Base64Decode: []converter{ + { + Field: "message", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "_not_base64_", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "_not_base64_", + }, + time.Unix(0, 0), + ), + }, + }, + { + name: "base64decode not valid utf-8 returns original string", + plugin: &Strings{ + Base64Decode: []converter{ + { + Field: "message", + }, + }, + }, + metric: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "//5oAG8AdwBkAHkA", + }, + time.Unix(0, 0), + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "message": "//5oAG8AdwBkAHkA", + }, + time.Unix(0, 0), + ), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.plugin.Apply(tt.metric...) + testutil.RequireMetricsEqual(t, tt.expected, actual) + }) + } +} diff --git a/plugins/processors/tag_limit/README.md b/plugins/processors/tag_limit/README.md new file mode 100644 index 000000000..b287f0f8d --- /dev/null +++ b/plugins/processors/tag_limit/README.md @@ -0,0 +1,27 @@ +# Tag Limit Processor Plugin + +Use the `tag_limit` processor to ensure that only a certain number of tags are +preserved for any given metric, and to choose the tags to preserve when the +number of tags appended by the data source is over the limit. + +This can be useful when dealing with output systems (e.g. Stackdriver) that +impose hard limits on the number of tags/labels per metric or where high +levels of cardinality are computationally and/or financially expensive. + +### Configuration + +```toml +[[processors.tag_limit]] + ## Maximum number of tags to preserve + limit = 3 + + ## List of tags to preferentially preserve + keep = ["environment", "region"] +``` + +### Example + +```diff ++ throughput month=Jun,environment=qa,region=us-east1,lower=10i,upper=1000i,mean=500i 1560540094000000000 ++ throughput environment=qa,region=us-east1,lower=10i 1560540094000000000 +``` diff --git a/plugins/processors/tag_limit/tag_limit.go b/plugins/processors/tag_limit/tag_limit.go new file mode 100644 index 000000000..41353a8f8 --- /dev/null +++ b/plugins/processors/tag_limit/tag_limit.go @@ -0,0 +1,86 @@ +package taglimit + +import ( + "fmt" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" + "log" +) + +const sampleConfig = ` + ## Maximum number of tags to preserve + limit = 10 + + ## List of tags to preferentially preserve + keep = ["foo", "bar", "baz"] +` + +type TagLimit struct { + Limit int `toml:"limit"` + Keep []string `toml:"keep"` + init bool + keepTags map[string]string +} + +func (d *TagLimit) SampleConfig() string { + return sampleConfig +} + +func (d *TagLimit) Description() string { + return "Restricts the number of tags that can pass through this filter and chooses which tags to preserve when over the limit." +} + +func (d *TagLimit) initOnce() error { + if d.init { + return nil + } + if len(d.Keep) > d.Limit { + return fmt.Errorf("%d keep tags is greater than %d total tag limit", len(d.Keep), d.Limit) + } + d.keepTags = make(map[string]string) + // convert list of tags-to-keep to a map so we can do constant-time lookups + for _, tag_key := range d.Keep { + d.keepTags[tag_key] = "" + } + d.init = true + return nil +} + +func (d *TagLimit) Apply(in ...telegraf.Metric) []telegraf.Metric { + err := d.initOnce() + if err != nil { + log.Printf("E! [processors.tag_limit] could not create tag_limit processor: %v", err) + return in + } + for _, point := range in { + pointOriginalTags := point.TagList() + lenPointTags := len(pointOriginalTags) + if lenPointTags <= d.Limit { + continue + } + tagsToRemove := make([]string, lenPointTags-d.Limit) + removeIdx := 0 + // remove extraneous tags, stop once we're at the limit + for _, t := range pointOriginalTags { + if _, ok := d.keepTags[t.Key]; !ok { + tagsToRemove[removeIdx] = t.Key + removeIdx++ + lenPointTags-- + } + if lenPointTags <= d.Limit { + break + } + } + for _, t := range tagsToRemove { + point.RemoveTag(t) + } + } + + return in +} + +func init() { + processors.Add("tag_limit", func() telegraf.Processor { + return &TagLimit{} + }) +} diff --git a/plugins/processors/tag_limit/tag_limit_test.go b/plugins/processors/tag_limit/tag_limit_test.go new file mode 100644 index 000000000..9412d866b --- /dev/null +++ b/plugins/processors/tag_limit/tag_limit_test.go @@ -0,0 +1,86 @@ +package taglimit + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func MustMetric(name string, tags map[string]string, fields map[string]interface{}, metricTime time.Time) telegraf.Metric { + if tags == nil { + tags = map[string]string{} + } + if fields == nil { + fields = map[string]interface{}{} + } + m, _ := metric.New(name, tags, fields, metricTime) + return m +} + +func TestUnderLimit(t *testing.T) { + currentTime := time.Now() + + oneTags := make(map[string]string) + oneTags["foo"] = "bar" + + tenTags := make(map[string]string) + tenTags["a"] = "bar" + tenTags["b"] = "bar" + tenTags["c"] = "bar" + tenTags["d"] = "bar" + tenTags["e"] = "bar" + tenTags["f"] = "bar" + tenTags["g"] = "bar" + tenTags["h"] = "bar" + tenTags["i"] = "bar" + tenTags["j"] = "bar" + + tagLimitConfig := TagLimit{ + Limit: 10, + Keep: []string{"foo", "bar"}, + } + + m1 := MustMetric("foo", oneTags, nil, currentTime) + m2 := MustMetric("bar", tenTags, nil, currentTime) + limitApply := tagLimitConfig.Apply(m1, m2) + assert.Equal(t, oneTags, limitApply[0].Tags(), "one tag") + assert.Equal(t, tenTags, limitApply[1].Tags(), "ten tags") +} + +func TestTrim(t *testing.T) { + currentTime := time.Now() + + threeTags := make(map[string]string) + threeTags["a"] = "foo" + threeTags["b"] = "bar" + threeTags["z"] = "baz" + + tenTags := make(map[string]string) + tenTags["a"] = "foo" + tenTags["b"] = "bar" + tenTags["c"] = "baz" + tenTags["d"] = "abc" + tenTags["e"] = "def" + tenTags["f"] = "ghi" + tenTags["g"] = "jkl" + tenTags["h"] = "mno" + tenTags["i"] = "pqr" + tenTags["j"] = "stu" + + tagLimitConfig := TagLimit{ + Limit: 3, + Keep: []string{"a", "b"}, + } + + m1 := MustMetric("foo", threeTags, nil, currentTime) + m2 := MustMetric("bar", tenTags, nil, currentTime) + limitApply := tagLimitConfig.Apply(m1, m2) + assert.Equal(t, threeTags, limitApply[0].Tags(), "three tags") + trimmedTags := limitApply[1].Tags() + assert.Equal(t, 3, len(trimmedTags), "ten tags") + assert.Equal(t, "foo", trimmedTags["a"], "preserved: a") + assert.Equal(t, "bar", trimmedTags["b"], "preserved: b") +} diff --git a/plugins/processors/template/README.md b/plugins/processors/template/README.md new file mode 100644 index 000000000..348dae096 --- /dev/null +++ b/plugins/processors/template/README.md @@ -0,0 +1,59 @@ +# Template Processor + +The `template` processor applies a Go template to metrics to generate a new +tag. The primary use case of this plugin is to create a tag that can be used +for dynamic routing to multiple output plugins or using an output specific +routing option. + +The template has access to each metric's measurement name, tags, fields, and +timestamp using the [interface in `/template_metric.go`](template_metric.go). + +Read the full [Go Template Documentation][]. + +### Configuration + +```toml +[[processors.template]] + ## Tag to set with the output of the template. + tag = "topic" + + ## Go template used to create the tag value. In order to ease TOML + ## escaping requirements, you may wish to use single quotes around the + ## template string. + template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' +``` + +### Example + +Combine multiple tags to create a single tag: +```toml +[[processors.template]] + tag = "topic" + template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' +``` + +```diff +- cpu,level=debug,hostname=localhost time_idle=42 ++ cpu,level=debug,hostname=localhost,topic=localhost.debug time_idle=42 +``` + +Add measurement name as a tag: +```toml +[[processors.template]] + tag = "measurement" + template = '{{ .Name }}' +``` + +```diff +- cpu,hostname=localhost time_idle=42 ++ cpu,hostname=localhost,measurement=cpu time_idle=42 +``` + +Add the year as a tag, similar to the date processor: +```toml +[[processors.template]] + tag = "year" + template = '{{.Time.UTC.Year}}' +``` + +[Go Template Documentation]: https://golang.org/pkg/text/template/ diff --git a/plugins/processors/template/template.go b/plugins/processors/template/template.go new file mode 100644 index 000000000..f4470a07c --- /dev/null +++ b/plugins/processors/template/template.go @@ -0,0 +1,66 @@ +package template + +import ( + "strings" + "text/template" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +type TemplateProcessor struct { + Tag string `toml:"tag"` + Template string `toml:"template"` + Log telegraf.Logger `toml:"-"` + tmpl *template.Template +} + +const sampleConfig = ` + ## Tag to set with the output of the template. + tag = "topic" + + ## Go template used to create the tag value. In order to ease TOML + ## escaping requirements, you may wish to use single quotes around the + ## template string. + template = '{{ .Tag "hostname" }}.{{ .Tag "level" }}' +` + +func (r *TemplateProcessor) SampleConfig() string { + return sampleConfig +} + +func (r *TemplateProcessor) Description() string { + return "Uses a Go template to create a new tag" +} + +func (r *TemplateProcessor) Apply(in ...telegraf.Metric) []telegraf.Metric { + // for each metric in "in" array + for _, metric := range in { + var b strings.Builder + newM := TemplateMetric{metric} + + // supply TemplateMetric and Template from configuration to Template.Execute + err := r.tmpl.Execute(&b, &newM) + if err != nil { + r.Log.Errorf("failed to execute template: %v", err) + continue + } + + metric.AddTag(r.Tag, b.String()) + } + return in +} + +func (r *TemplateProcessor) Init() error { + // create template + t, err := template.New("configured_template").Parse(r.Template) + + r.tmpl = t + return err +} + +func init() { + processors.Add("template", func() telegraf.Processor { + return &TemplateProcessor{} + }) +} diff --git a/plugins/processors/template/template_metric.go b/plugins/processors/template/template_metric.go new file mode 100644 index 000000000..e4a81bd1c --- /dev/null +++ b/plugins/processors/template/template_metric.go @@ -0,0 +1,29 @@ +package template + +import ( + "time" + + "github.com/influxdata/telegraf" +) + +type TemplateMetric struct { + metric telegraf.Metric +} + +func (m *TemplateMetric) Name() string { + return m.metric.Name() +} + +func (m *TemplateMetric) Tag(key string) string { + tagString, _ := m.metric.GetTag(key) + return tagString +} + +func (m *TemplateMetric) Field(key string) interface{} { + field, _ := m.metric.GetField(key) + return field +} + +func (m *TemplateMetric) Time() time.Time { + return m.metric.Time() +} diff --git a/plugins/processors/template/template_test.go b/plugins/processors/template/template_test.go new file mode 100644 index 000000000..f43d69795 --- /dev/null +++ b/plugins/processors/template/template_test.go @@ -0,0 +1,117 @@ +package template + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestName(t *testing.T) { + plugin := TemplateProcessor{ + Tag: "measurement", + Template: "{{ .Name }}", + } + + err := plugin.Init() + require.NoError(t, err) + + input := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + } + + actual := plugin.Apply(input...) + expected := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "measurement": "cpu", + }, + map[string]interface{}{ + "time_idle": 42, + }, + time.Unix(0, 0), + ), + } + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestTagTemplateConcatenate(t *testing.T) { + now := time.Now() + + // Create Template processor + tmp := TemplateProcessor{Tag: "topic", Template: `{{.Tag "hostname"}}.{{ .Tag "level" }}`} + // manually init + err := tmp.Init() + + if err != nil { + panic(err) + } + + // create metric for testing + input := []telegraf.Metric{testutil.MustMetric("Tags", map[string]string{"hostname": "localhost", "level": "debug"}, nil, now)} + + // act + actual := tmp.Apply(input[0]) + + // assert + expected := []telegraf.Metric{testutil.MustMetric("Tags", map[string]string{"hostname": "localhost", "level": "debug", "topic": "localhost.debug"}, nil, now)} + testutil.RequireMetricsEqual(t, expected, actual) +} + +func TestMetricMissingTagsIsNotLost(t *testing.T) { + now := time.Now() + + // Create Template processor + tmp := TemplateProcessor{Tag: "topic", Template: `{{.Tag "hostname"}}.{{ .Tag "level" }}`} + // manually init + err := tmp.Init() + + if err != nil { + panic(err) + } + + // create metrics for testing + m1 := testutil.MustMetric("Works", map[string]string{"hostname": "localhost", "level": "debug"}, nil, now) + m2 := testutil.MustMetric("Fails", map[string]string{"hostname": "localhost"}, nil, now) + + // act + actual := tmp.Apply(m1, m2) + + // assert + // make sure no metrics are lost when a template process fails + assert.Equal(t, 2, len(actual), "Number of metrics input should equal number of metrics output") +} + +func TestTagAndFieldConcatenate(t *testing.T) { + now := time.Now() + + // Create Template processor + tmp := TemplateProcessor{Tag: "LocalTemp", Template: `{{.Tag "location"}} is {{ .Field "temperature" }}`} + // manually init + err := tmp.Init() + + if err != nil { + panic(err) + } + + // create metric for testing + m1 := testutil.MustMetric("weather", map[string]string{"location": "us-midwest"}, map[string]interface{}{"temperature": "too warm"}, now) + + // act + actual := tmp.Apply(m1) + + // assert + expected := []telegraf.Metric{testutil.MustMetric("weather", map[string]string{"location": "us-midwest", "LocalTemp": "us-midwest is too warm"}, map[string]interface{}{"temperature": "too warm"}, now)} + testutil.RequireMetricsEqual(t, expected, actual) +} diff --git a/plugins/processors/topk/README.md b/plugins/processors/topk/README.md index 9c9e48af9..308d4f9f8 100644 --- a/plugins/processors/topk/README.md +++ b/plugins/processors/topk/README.md @@ -53,7 +53,7 @@ Note that depending on the amount of metrics on each computed bucket, more than # add_rank_fields = [] ## These settings provide a way to know what values the plugin is generating - ## when aggregating metrics. The 'add_agregate_field' setting allows to + ## when aggregating metrics. The 'add_aggregate_field' setting allows to ## specify for which fields the final aggregation value is required. If the ## list is non empty, then a field will be added to each every metric for ## each field present in this setting. This field will contain @@ -72,3 +72,36 @@ This processor does not add tags by default. But the setting `add_groupby_tag` w ### Fields: This processor does not add fields by default. But the settings `add_rank_fields` and `add_aggregation_fields` will add one or several fields if set to anything other than "" + + +### Example +**Config** +```toml +[[processors.topk]] + period = 20 + k = 3 + group_by = ["pid"] + fields = ["cpu_usage"] +``` + +**Output difference with topk** +```diff +< procstat,pid=2088,process_name=Xorg cpu_usage=7.296576662282613 1546473820000000000 +< procstat,pid=2780,process_name=ibus-engine-simple cpu_usage=0 1546473820000000000 +< procstat,pid=2554,process_name=gsd-sound cpu_usage=0 1546473820000000000 +< procstat,pid=3484,process_name=chrome cpu_usage=4.274300361942799 1546473820000000000 +< procstat,pid=2467,process_name=gnome-shell-calendar-server cpu_usage=0 1546473820000000000 +< procstat,pid=2525,process_name=gvfs-goa-volume-monitor cpu_usage=0 1546473820000000000 +< procstat,pid=2888,process_name=gnome-terminal-server cpu_usage=1.0224991500287577 1546473820000000000 +< procstat,pid=2454,process_name=ibus-x11 cpu_usage=0 1546473820000000000 +< procstat,pid=2564,process_name=gsd-xsettings cpu_usage=0 1546473820000000000 +< procstat,pid=12184,process_name=docker cpu_usage=0 1546473820000000000 +< procstat,pid=2432,process_name=pulseaudio cpu_usage=9.892858669796528 1546473820000000000 +--- +> procstat,pid=2432,process_name=pulseaudio cpu_usage=11.486933087507786 1546474120000000000 +> procstat,pid=2432,process_name=pulseaudio cpu_usage=10.056503212060552 1546474130000000000 +> procstat,pid=23620,process_name=chrome cpu_usage=2.098690278123081 1546474120000000000 +> procstat,pid=23620,process_name=chrome cpu_usage=17.52514619948493 1546474130000000000 +> procstat,pid=2088,process_name=Xorg cpu_usage=1.6016732172309973 1546474120000000000 +> procstat,pid=2088,process_name=Xorg cpu_usage=8.481040931533833 1546474130000000000 +``` diff --git a/plugins/processors/topk/topk.go b/plugins/processors/topk/topk.go index 8a52fa8d4..907ec1cc4 100644 --- a/plugins/processors/topk/topk.go +++ b/plugins/processors/topk/topk.go @@ -10,6 +10,7 @@ import ( "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/filter" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/metric" "github.com/influxdata/telegraf/plugins/processors" ) @@ -42,8 +43,8 @@ func New() *TopK { topk.Aggregation = "mean" topk.GroupBy = []string{"*"} topk.AddGroupByTag = "" - topk.AddRankFields = []string{""} - topk.AddAggregateFields = []string{""} + topk.AddRankFields = []string{} + topk.AddAggregateFields = []string{} // Initialize cache topk.Reset() @@ -76,12 +77,12 @@ var sampleConfig = ` ## tags. If this setting is different than "" the plugin will add a ## tag (which name will be the value of this setting) to each metric with ## the value of the calculated GroupBy tag. Useful for debugging - # add_groupby_tag = "" + # add_groupby_tag = "" ## These settings provide a way to know the position of each metric in ## the top k. The 'add_rank_field' setting allows to specify for which ## fields the position is required. If the list is non empty, then a field - ## will be added to each and every metric for each string present in this + ## will be added to each and every metric for each string present in this ## setting. This field will contain the ranking of the group that ## the metric belonged to when aggregated over that field. ## The name of the field will be set to the name of the aggregation field, @@ -89,7 +90,7 @@ var sampleConfig = ` # add_rank_fields = [] ## These settings provide a way to know what values the plugin is generating - ## when aggregating metrics. The 'add_agregate_field' setting allows to + ## when aggregating metrics. The 'add_aggregate_field' setting allows to ## specify for which fields the final aggregation value is required. If the ## list is non empty, then a field will be added to each every metric for ## each field present in this setting. This field will contain @@ -202,14 +203,21 @@ func (t *TopK) Apply(in ...telegraf.Metric) []telegraf.Metric { if t.aggFieldSet == nil { t.aggFieldSet = make(map[string]bool) for _, f := range t.AddAggregateFields { - t.aggFieldSet[f] = true + if f != "" { + t.aggFieldSet[f] = true + } } } // Add the metrics received to our internal cache for _, m := range in { + // When tracking metrics this plugin could deadlock the input by + // holding undelivered metrics while the input waits for metrics to be + // delivered. Instead, treat all handled metrics as delivered and + // produced metrics as untracked in a similar way to aggregators. + m.Drop() - // Check if the metric has any of the fields over wich we are aggregating + // Check if the metric has any of the fields over which we are aggregating hasField := false for _, f := range t.Fields { if m.HasField(f) { @@ -273,19 +281,16 @@ func (t *TopK) push() []telegraf.Metric { // Get the top K metrics for each field and add them to the return value addedKeys := make(map[string]bool) - groupTag := t.AddGroupByTag for _, field := range t.Fields { // Sort the aggregations sortMetrics(aggregations, field, t.Bottomk) - // Create a one dimentional list with the top K metrics of each key + // Create a one dimensional list with the top K metrics of each key for i, ag := range aggregations[0:min(t.K, len(aggregations))] { - // Check whether of not we need to add fields of tags to the selected metrics - if len(t.aggFieldSet) != 0 || len(t.rankFieldSet) != 0 || groupTag != "" { + if len(t.aggFieldSet) != 0 || len(t.rankFieldSet) != 0 || t.AddGroupByTag != "" { for _, m := range t.cache[ag.groupbykey] { - // Add the aggregation final value if requested _, addAggField := t.aggFieldSet[field] if addAggField && m.HasField(field) { @@ -311,12 +316,20 @@ func (t *TopK) push() []telegraf.Metric { t.Reset() - return ret + result := make([]telegraf.Metric, 0, len(ret)) + for _, m := range ret { + copy, err := metric.New(m.Name(), m.Tags(), m.Fields(), m.Time(), m.Type()) + if err != nil { + continue + } + result = append(result, copy) + } + + return result } // Function that generates the aggregation functions func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metric, []string) map[string]float64, error) { - // This is a function aggregates a set of metrics using a given aggregation function var aggregator = func(ms []telegraf.Metric, fields []string, f func(map[string]float64, float64, string)) map[string]float64 { agg := make(map[string]float64) @@ -405,7 +418,7 @@ func (t *TopK) getAggregationFunction(aggOperation string) (func([]telegraf.Metr } // Divide by the number of recorded measurements collected for every field noMeasurementsFound := true // Canary to check if no field with values was found, so we can return nil - for k, _ := range mean { + for k := range mean { if meanCounters[k] == 0 { mean[k] = 0 continue diff --git a/plugins/processors/topk/topk_test.go b/plugins/processors/topk/topk_test.go index 2f5844448..928111b29 100644 --- a/plugins/processors/topk/topk_test.go +++ b/plugins/processors/topk/topk_test.go @@ -1,12 +1,12 @@ package topk import ( - "reflect" "testing" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" + "github.com/influxdata/telegraf/testutil" ) // Key, value pair that represents a telegraf.Metric Field @@ -35,7 +35,7 @@ type metricChange struct { newTags []tag // Tags that should be added to the metric runHash bool // Sometimes the metrics' HashID must be run so reflect.DeepEqual works - // This happens because telegraf.Metric mantains an internal cache of + // This happens because telegraf.Metric maintains an internal cache of // its hash value that is set when HashID() is called for the first time } @@ -95,7 +95,7 @@ func deepCopy(a []telegraf.Metric) []telegraf.Metric { func belongs(m telegraf.Metric, ms []telegraf.Metric) bool { for _, i := range ms { - if reflect.DeepEqual(i, m) { + if testutil.MetricEqual(i, m) { return true } } @@ -149,7 +149,7 @@ func TestTopkAggregatorsSmokeTests(t *testing.T) { aggregators := []string{"mean", "sum", "max", "min"} - //The answer is equal to the original set for these particual scenarios + //The answer is equal to the original set for these particular scenarios input := MetricsSet1 answer := MetricsSet1 @@ -178,11 +178,11 @@ func TestTopkMeanAddAggregateFields(t *testing.T) { // Generate the answer chng := fieldList(field{"a_topk_aggregate", float64(28.044)}) changeSet := map[int]metricChange{ - 0: metricChange{newFields: chng}, - 1: metricChange{newFields: chng}, - 2: metricChange{newFields: chng}, - 3: metricChange{newFields: chng}, - 4: metricChange{newFields: chng}, + 0: {newFields: chng}, + 1: {newFields: chng}, + 2: {newFields: chng}, + 3: {newFields: chng}, + 4: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -208,11 +208,11 @@ func TestTopkSumAddAggregateFields(t *testing.T) { // Generate the answer chng := fieldList(field{"a_topk_aggregate", float64(140.22)}) changeSet := map[int]metricChange{ - 0: metricChange{newFields: chng}, - 1: metricChange{newFields: chng}, - 2: metricChange{newFields: chng}, - 3: metricChange{newFields: chng}, - 4: metricChange{newFields: chng}, + 0: {newFields: chng}, + 1: {newFields: chng}, + 2: {newFields: chng}, + 3: {newFields: chng}, + 4: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -238,11 +238,11 @@ func TestTopkMaxAddAggregateFields(t *testing.T) { // Generate the answer chng := fieldList(field{"a_topk_aggregate", float64(50.5)}) changeSet := map[int]metricChange{ - 0: metricChange{newFields: chng}, - 1: metricChange{newFields: chng}, - 2: metricChange{newFields: chng}, - 3: metricChange{newFields: chng}, - 4: metricChange{newFields: chng}, + 0: {newFields: chng}, + 1: {newFields: chng}, + 2: {newFields: chng}, + 3: {newFields: chng}, + 4: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -268,11 +268,11 @@ func TestTopkMinAddAggregateFields(t *testing.T) { // Generate the answer chng := fieldList(field{"a_topk_aggregate", float64(0.3)}) changeSet := map[int]metricChange{ - 0: metricChange{newFields: chng}, - 1: metricChange{newFields: chng}, - 2: metricChange{newFields: chng}, - 3: metricChange{newFields: chng}, - 4: metricChange{newFields: chng}, + 0: {newFields: chng}, + 1: {newFields: chng}, + 2: {newFields: chng}, + 3: {newFields: chng}, + 4: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -297,10 +297,10 @@ func TestTopkGroupby1(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 2: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(74.18)})}, - 3: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(72)})}, - 4: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})}, - 5: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})}, + 2: {newFields: fieldList(field{"value_topk_aggregate", float64(74.18)})}, + 3: {newFields: fieldList(field{"value_topk_aggregate", float64(72)})}, + 4: {newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})}, + 5: {newFields: fieldList(field{"value_topk_aggregate", float64(163.22)})}, } answer := generateAns(input, changeSet) @@ -326,11 +326,11 @@ func TestTopkGroupby2(t *testing.T) { chng2 := fieldList(field{"value_topk_aggregate", float64(72)}) chng3 := fieldList(field{"value_topk_aggregate", float64(81.61)}) changeSet := map[int]metricChange{ - 1: metricChange{newFields: chng1}, - 2: metricChange{newFields: chng1}, - 3: metricChange{newFields: chng2}, - 4: metricChange{newFields: chng3}, - 5: metricChange{newFields: chng3}, + 1: {newFields: chng1}, + 2: {newFields: chng1}, + 3: {newFields: chng2}, + 4: {newFields: chng3}, + 5: {newFields: chng3}, } answer := generateAns(input, changeSet) @@ -354,8 +354,8 @@ func TestTopkGroupby3(t *testing.T) { // Generate the answer chng := fieldList(field{"value_topk_aggregate", float64(75.3)}) changeSet := map[int]metricChange{ - 4: metricChange{newFields: chng}, - 5: metricChange{newFields: chng}, + 4: {newFields: chng}, + 5: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -381,10 +381,10 @@ func TestTopkGroupbyFields1(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 0: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})}, - 1: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})}, - 2: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})}, - 5: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(29.45)})}, + 0: {newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})}, + 1: {newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})}, + 2: {newFields: fieldList(field{"A_topk_aggregate", float64(39.01)})}, + 5: {newFields: fieldList(field{"A_topk_aggregate", float64(29.45)})}, } answer := generateAns(input, changeSet) @@ -409,10 +409,10 @@ func TestTopkGroupbyFields2(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 0: metricChange{newFields: fieldList(field{"C_topk_aggregate", float64(72.41)})}, - 2: metricChange{newFields: fieldList(field{"B_topk_aggregate", float64(60.96)})}, - 4: metricChange{newFields: fieldList(field{"B_topk_aggregate", float64(81.55)}, field{"C_topk_aggregate", float64(49.96)})}, - 5: metricChange{newFields: fieldList(field{"C_topk_aggregate", float64(49.96)})}, + 0: {newFields: fieldList(field{"C_topk_aggregate", float64(72.41)})}, + 2: {newFields: fieldList(field{"B_topk_aggregate", float64(60.96)})}, + 4: {newFields: fieldList(field{"B_topk_aggregate", float64(81.55)}, field{"C_topk_aggregate", float64(49.96)})}, + 5: {newFields: fieldList(field{"C_topk_aggregate", float64(49.96)})}, } answer := generateAns(input, changeSet) @@ -438,9 +438,9 @@ func TestTopkGroupbyMetricName1(t *testing.T) { // Generate the answer chng := fieldList(field{"value_topk_aggregate", float64(235.22000000000003)}) changeSet := map[int]metricChange{ - 3: metricChange{newFields: chng}, - 4: metricChange{newFields: chng}, - 5: metricChange{newFields: chng}, + 3: {newFields: chng}, + 4: {newFields: chng}, + 5: {newFields: chng}, } answer := generateAns(input, changeSet) @@ -465,10 +465,10 @@ func TestTopkGroupbyMetricName2(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 0: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})}, - 1: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})}, - 2: metricChange{newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})}, - 4: metricChange{newFields: fieldList(field{"value_topk_aggregate", float64(87.92)})}, + 0: {newFields: fieldList(field{"A_topk_aggregate", float64(95.36)})}, + 1: {newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})}, + 2: {newFields: fieldList(field{"A_topk_aggregate", float64(78.02)}, field{"value_topk_aggregate", float64(133.61)})}, + 4: {newFields: fieldList(field{"value_topk_aggregate", float64(87.92)})}, } answer := generateAns(input, changeSet) @@ -493,9 +493,9 @@ func TestTopkBottomk(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 0: metricChange{}, - 1: metricChange{}, - 3: metricChange{}, + 0: {}, + 1: {}, + 3: {}, } answer := generateAns(input, changeSet) @@ -520,10 +520,10 @@ func TestTopkGroupByKeyTag(t *testing.T) { // Generate the answer changeSet := map[int]metricChange{ - 2: metricChange{newTags: tagList(tag{"gbt", "metric1&tag1=TWO&tag3=SIX&"})}, - 3: metricChange{newTags: tagList(tag{"gbt", "metric2&tag1=ONE&tag3=THREE&"})}, - 4: metricChange{newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})}, - 5: metricChange{newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})}, + 2: {newTags: tagList(tag{"gbt", "metric1&tag1=TWO&tag3=SIX&"})}, + 3: {newTags: tagList(tag{"gbt", "metric2&tag1=ONE&tag3=THREE&"})}, + 4: {newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})}, + 5: {newTags: tagList(tag{"gbt", "metric2&tag1=TWO&tag3=SEVEN&"})}, } answer := generateAns(input, changeSet) diff --git a/plugins/processors/unpivot/README.md b/plugins/processors/unpivot/README.md new file mode 100644 index 000000000..beee6c276 --- /dev/null +++ b/plugins/processors/unpivot/README.md @@ -0,0 +1,26 @@ +# Unpivot Processor + +You can use the `unpivot` processor to rotate a multi field series into single valued metrics. This transformation often results in data that is more easy to aggregate across fields. + +To perform the reverse operation use the [pivot] processor. + +### Configuration + +```toml +[[processors.unpivot]] + ## Tag to use for the name. + tag_key = "name" + ## Field to use for the name of the value. + value_key = "value" +``` + +### Example + +```diff +- cpu,cpu=cpu0 time_idle=42i,time_user=43i ++ cpu,cpu=cpu0,name=time_idle value=42i ++ cpu,cpu=cpu0,name=time_user value=43i +``` + +[pivot]: /plugins/processors/pivot/README.md + diff --git a/plugins/processors/unpivot/unpivot.go b/plugins/processors/unpivot/unpivot.go new file mode 100644 index 000000000..4a081a428 --- /dev/null +++ b/plugins/processors/unpivot/unpivot.go @@ -0,0 +1,71 @@ +package unpivot + +import ( + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/processors" +) + +const ( + description = "Rotate multi field metric into several single field metrics" + sampleConfig = ` + ## Tag to use for the name. + tag_key = "name" + ## Field to use for the name of the value. + value_key = "value" +` +) + +type Unpivot struct { + TagKey string `toml:"tag_key"` + ValueKey string `toml:"value_key"` +} + +func (p *Unpivot) SampleConfig() string { + return sampleConfig +} + +func (p *Unpivot) Description() string { + return description +} + +func copyWithoutFields(metric telegraf.Metric) telegraf.Metric { + m := metric.Copy() + + fieldKeys := make([]string, 0, len(m.FieldList())) + for _, field := range m.FieldList() { + fieldKeys = append(fieldKeys, field.Key) + } + + for _, fk := range fieldKeys { + m.RemoveField(fk) + } + + return m +} + +func (p *Unpivot) Apply(metrics ...telegraf.Metric) []telegraf.Metric { + fieldCount := 0 + for _, m := range metrics { + fieldCount += len(m.FieldList()) + } + + results := make([]telegraf.Metric, 0, fieldCount) + + for _, m := range metrics { + base := copyWithoutFields(m) + for _, field := range m.FieldList() { + newMetric := base.Copy() + newMetric.AddField(p.ValueKey, field.Value) + newMetric.AddTag(p.TagKey, field.Key) + results = append(results, newMetric) + } + m.Accept() + } + return results +} + +func init() { + processors.Add("unpivot", func() telegraf.Processor { + return &Unpivot{} + }) +} diff --git a/plugins/processors/unpivot/unpivot_test.go b/plugins/processors/unpivot/unpivot_test.go new file mode 100644 index 000000000..a3a538503 --- /dev/null +++ b/plugins/processors/unpivot/unpivot_test.go @@ -0,0 +1,90 @@ +package unpivot + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" +) + +func TestUnpivot(t *testing.T) { + now := time.Now() + tests := []struct { + name string + unpivot *Unpivot + metrics []telegraf.Metric + expected []telegraf.Metric + }{ + { + name: "simple", + unpivot: &Unpivot{ + TagKey: "name", + ValueKey: "value", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "idle_time": int64(42), + }, + now, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_time", + }, + map[string]interface{}{ + "value": int64(42), + }, + now, + ), + }, + }, + { + name: "multi fields", + unpivot: &Unpivot{ + TagKey: "name", + ValueKey: "value", + }, + metrics: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{}, + map[string]interface{}{ + "idle_time": int64(42), + "idle_user": int64(43), + }, + now, + ), + }, + expected: []telegraf.Metric{ + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_time", + }, + map[string]interface{}{ + "value": int64(42), + }, + now, + ), + testutil.MustMetric("cpu", + map[string]string{ + "name": "idle_user", + }, + map[string]interface{}{ + "value": int64(43), + }, + now, + ), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := tt.unpivot.Apply(tt.metrics...) + testutil.RequireMetricsEqual(t, tt.expected, actual, testutil.SortMetrics()) + }) + } +} diff --git a/plugins/serializers/EXAMPLE_README.md b/plugins/serializers/EXAMPLE_README.md new file mode 100644 index 000000000..11965c07f --- /dev/null +++ b/plugins/serializers/EXAMPLE_README.md @@ -0,0 +1,46 @@ +# Example + +This description explains at a high level what the serializer does and +provides links to where additional information about the format can be found. + +### Configuration + +This section contains the sample configuration for the serializer. Since the +configuration for a serializer is not have a standalone plugin, use the `file` +or `http` outputs as the base config. + +```toml +[[inputs.file]] + files = ["stdout"] + + ## Describe variables using the standard SampleConfig style. + ## https://github.com/influxdata/telegraf/wiki/SampleConfig + example_option = "example_value" + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "example" +``` + +#### example_option + +If an option requires a more expansive explanation than can be included inline +in the sample configuration, it may be described here. + +### Metrics + +The optional Metrics section contains details about how the serializer converts +Telegraf metrics into output. + +### Example + +The optional Example section can show an example conversion to the output +format using InfluxDB Line Protocol as the reference format. + +For line delimited text formats a diff may be appropriate: +```diff +- cpu,host=localhost,source=example.org value=42 ++ cpu|host=localhost|source=example.org|value=42 +``` diff --git a/plugins/serializers/carbon2/README.md b/plugins/serializers/carbon2/README.md new file mode 100644 index 000000000..e88b18cf0 --- /dev/null +++ b/plugins/serializers/carbon2/README.md @@ -0,0 +1,49 @@ +# Carbon2 + +The `carbon2` serializer translates the Telegraf metric format to the [Carbon2 format](http://metrics20.org/implementations/). + +### Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "carbon2" +``` + +Standard form: +``` +metric=name field=field_1 host=foo 30 1234567890 +metric=name field=field_2 host=foo 4 1234567890 +metric=name field=field_N host=foo 59 1234567890 +``` + +### Metrics + +The serializer converts the metrics by creating `intrinsic_tags` using the combination of metric name and fields. So, if one Telegraf metric has 4 fields, the `carbon2` output will be 4 separate metrics. There will be a `metric` tag that represents the name of the metric and a `field` tag to represent the field. + +### Example + +If we take the following InfluxDB Line Protocol: + +``` +weather,location=us-midwest,season=summer temperature=82,wind=100 1234567890 +``` + +after serializing in Carbon2, the result would be: + +``` +metric=weather field=temperature location=us-midwest season=summer 82 1234567890 +metric=weather field=wind location=us-midwest season=summer 100 1234567890 +``` + +### Fields and Tags with spaces +When a field key or tag key/value have spaces, spaces will be replaced with `_`. + +### Tags with empty values +When a tag's value is empty, it will be replaced with `null` diff --git a/plugins/serializers/carbon2/carbon2.go b/plugins/serializers/carbon2/carbon2.go new file mode 100644 index 000000000..fc11de062 --- /dev/null +++ b/plugins/serializers/carbon2/carbon2.go @@ -0,0 +1,67 @@ +package carbon2 + +import ( + "bytes" + "fmt" + "github.com/influxdata/telegraf" + "strconv" + "strings" +) + +type serializer struct { +} + +func NewSerializer() (*serializer, error) { + s := &serializer{} + return s, nil +} + +func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { + return s.createObject(metric), nil +} + +func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + var batch bytes.Buffer + for _, metric := range metrics { + batch.Write(s.createObject(metric)) + } + return batch.Bytes(), nil +} + +func (s *serializer) createObject(metric telegraf.Metric) []byte { + var m bytes.Buffer + for fieldName, fieldValue := range metric.Fields() { + if isNumeric(fieldValue) { + m.WriteString("metric=") + m.WriteString(strings.Replace(metric.Name(), " ", "_", -1)) + m.WriteString(" field=") + m.WriteString(strings.Replace(fieldName, " ", "_", -1)) + m.WriteString(" ") + for _, tag := range metric.TagList() { + m.WriteString(strings.Replace(tag.Key, " ", "_", -1)) + m.WriteString("=") + value := tag.Value + if len(value) == 0 { + value = "null" + } + m.WriteString(strings.Replace(value, " ", "_", -1)) + m.WriteString(" ") + } + m.WriteString(" ") + m.WriteString(fmt.Sprintf("%v", fieldValue)) + m.WriteString(" ") + m.WriteString(strconv.FormatInt(metric.Time().Unix(), 10)) + m.WriteString("\n") + } + } + return m.Bytes() +} + +func isNumeric(v interface{}) bool { + switch v.(type) { + case string: + return false + default: + return true + } +} diff --git a/plugins/serializers/carbon2/carbon2_test.go b/plugins/serializers/carbon2/carbon2_test.go new file mode 100644 index 000000000..f335342d5 --- /dev/null +++ b/plugins/serializers/carbon2/carbon2_test.go @@ -0,0 +1,138 @@ +package carbon2 + +import ( + "fmt" + "github.com/stretchr/testify/require" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func MustMetric(v telegraf.Metric, err error) telegraf.Metric { + if err != nil { + panic(err) + } + return v +} + +func TestSerializeMetricFloat(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := []byte(fmt.Sprintf(`metric=cpu field=usage_idle cpu=cpu0 91.5 %d`, now.Unix()) + "\n") + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricWithEmptyStringTag(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := []byte(fmt.Sprintf(`metric=cpu field=usage_idle cpu=null 91.5 %d`, now.Unix()) + "\n") + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeWithSpaces(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu 0": "cpu 0", + } + fields := map[string]interface{}{ + "usage_idle 1": float64(91.5), + } + m, err := metric.New("cpu metric", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := []byte(fmt.Sprintf(`metric=cpu_metric field=usage_idle_1 cpu_0=cpu_0 91.5 %d`, now.Unix()) + "\n") + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricInt(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := []byte(fmt.Sprintf(`metric=cpu field=usage_idle cpu=cpu0 90 %d`, now.Unix()) + "\n") + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricString(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": "foobar", + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := []byte("") + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeBatch(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m, m} + s, _ := NewSerializer() + buf, err := s.SerializeBatch(metrics) + require.NoError(t, err) + expS := []byte(`metric=cpu field=value 42 0 +metric=cpu field=value 42 0 +`) + assert.Equal(t, string(expS), string(buf)) +} diff --git a/plugins/serializers/graphite/README.md b/plugins/serializers/graphite/README.md new file mode 100644 index 000000000..f6fd0c2cc --- /dev/null +++ b/plugins/serializers/graphite/README.md @@ -0,0 +1,67 @@ +# Graphite + +The Graphite data format is translated from Telegraf Metrics using either the +template pattern or tag support method. You can select between the two +methods using the [`graphite_tag_support`](#graphite-tag-support) option. When set, the tag support +method is used, otherwise the [Template Pattern](templates) is used. + +### Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "graphite" + + ## Prefix added to each graphite bucket + prefix = "telegraf" + ## Graphite template pattern + template = "host.tags.measurement.field" + + ## Graphite templates patterns + ## 1. Template for cpu + ## 2. Template for disk* + ## 3. Default template + # templates = [ + # "cpu tags.measurement.host.field", + # "disk* measurement.field", + # "host.measurement.tags.field" + #] + + ## Support Graphite tags, recommended to enable when using Graphite 1.1 or later. + # graphite_tag_support = false + ## Character for separating metric name and field for Graphite tags + # graphite_separator = "." +``` + +#### graphite_tag_support + +When the `graphite_tag_support` option is enabled, the template pattern is not +used. Instead, tags are encoded using +[Graphite tag support](http://graphite.readthedocs.io/en/latest/tags.html) +added in Graphite 1.1. The `metric_path` is a combination of the optional +`prefix` option, measurement name, and field name. + +The tag `name` is reserved by Graphite, any conflicting tags and will be encoded as `_name`. + +**Example Conversion**: +``` +cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 +=> +cpu.usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 +cpu.usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 +``` +With set option `graphite_separator` to "_" +``` +cpu,cpu=cpu-total,dc=us-east-1,host=tars usage_idle=98.09,usage_user=0.89 1455320660004257758 +=> +cpu_usage_user;cpu=cpu-total;dc=us-east-1;host=tars 0.89 1455320690 +cpu_usage_idle;cpu=cpu-total;dc=us-east-1;host=tars 98.09 1455320690 +``` + +[templates]: /docs/TEMPLATE_PATTERN.md diff --git a/plugins/serializers/graphite/graphite.go b/plugins/serializers/graphite/graphite.go index d02b0e26b..e580409fe 100644 --- a/plugins/serializers/graphite/graphite.go +++ b/plugins/serializers/graphite/graphite.go @@ -10,13 +10,14 @@ import ( "strings" "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/filter" ) const DEFAULT_TEMPLATE = "host.tags.measurement.field" var ( allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-:._=\p{L}]`) - hypenChars = strings.NewReplacer( + hyphenChars = strings.NewReplacer( "/", "-", "@", "-", "*", "-", @@ -29,10 +30,17 @@ var ( fieldDeleter = strings.NewReplacer(".FIELDNAME", "", "FIELDNAME.", "") ) +type GraphiteTemplate struct { + Filter filter.Filter + Value string +} + type GraphiteSerializer struct { Prefix string Template string TagSupport bool + Separator string + Templates []*GraphiteTemplate } func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { @@ -48,7 +56,7 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { if fieldValue == "" { continue } - bucket := SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, fieldName) + bucket := SerializeBucketNameWithTags(metric.Name(), metric.Tags(), s.Prefix, s.Separator, fieldName) metricString := fmt.Sprintf("%s %s %d\n", // insert "field" section of template bucket, @@ -59,7 +67,15 @@ func (s *GraphiteSerializer) Serialize(metric telegraf.Metric) ([]byte, error) { out = append(out, point...) } default: - bucket := SerializeBucketName(metric.Name(), metric.Tags(), s.Template, s.Prefix) + template := s.Template + for _, graphiteTemplate := range s.Templates { + if graphiteTemplate.Filter.Match(metric.Name()) { + template = graphiteTemplate.Value + break + } + } + + bucket := SerializeBucketName(metric.Name(), metric.Tags(), template, s.Prefix) if bucket == "" { return out, nil } @@ -185,6 +201,45 @@ func SerializeBucketName( return prefix + "." + strings.Join(out, ".") } +func InitGraphiteTemplates(templates []string) ([]*GraphiteTemplate, string, error) { + var graphiteTemplates []*GraphiteTemplate + defaultTemplate := "" + + for i, t := range templates { + parts := strings.Fields(t) + + if len(parts) == 0 { + return nil, "", fmt.Errorf("missing template at position: %d", i) + } + if len(parts) == 1 { + if parts[0] == "" { + return nil, "", fmt.Errorf("missing template at position: %d", i) + } else { + // Override default template + defaultTemplate = t + continue + } + } + + if len(parts) > 2 { + return nil, "", fmt.Errorf("invalid template format: '%s'", t) + } + + tFilter, err := filter.Compile([]string{parts[0]}) + + if err != nil { + return nil, "", err + } + + graphiteTemplates = append(graphiteTemplates, &GraphiteTemplate{ + Filter: tFilter, + Value: parts[1], + }) + } + + return graphiteTemplates, defaultTemplate, nil +} + // SerializeBucketNameWithTags will take the given measurement name and tags and // produce a graphite bucket. It will use the Graphite11Serializer. // http://graphite.readthedocs.io/en/latest/tags.html @@ -192,6 +247,7 @@ func SerializeBucketNameWithTags( measurement string, tags map[string]string, prefix string, + separator string, field string, ) string { var out string @@ -205,13 +261,13 @@ func SerializeBucketNameWithTags( sort.Strings(tagsCopy) if prefix != "" { - out = prefix + "." + out = prefix + separator } out += measurement if field != "value" { - out += "." + field + out += separator + field } out = sanitize(out) @@ -254,8 +310,8 @@ func buildTags(tags map[string]string) string { } func sanitize(value string) string { - // Apply special hypenation rules to preserve backwards compatibility - value = hypenChars.Replace(value) + // Apply special hyphenation rules to preserve backwards compatibility + value = hyphenChars.Replace(value) // Apply rule to drop some chars to preserve backwards compatibility value = dropChars.Replace(value) // Replace any remaining illegal chars diff --git a/plugins/serializers/graphite/graphite_test.go b/plugins/serializers/graphite/graphite_test.go index e72ed7a30..b6fcad696 100644 --- a/plugins/serializers/graphite/graphite_test.go +++ b/plugins/serializers/graphite/graphite_test.go @@ -102,6 +102,7 @@ func TestSerializeMetricNoHostWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -144,6 +145,97 @@ func TestSerializeMetricHost(t *testing.T) { assert.Equal(t, expS, mS) } +func TestSerializeMetricHostWithMultipleTemplates(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + "usage_busy": float64(8.5), + } + m1, err := metric.New("cpu", tags, fields, now) + m2, err := metric.New("new_cpu", tags, fields, now) + assert.NoError(t, err) + + templates, defaultTemplate, err := InitGraphiteTemplates([]string{ + "cp* tags.measurement.host.field", + "new_cpu tags.host.measurement.field", + }) + assert.NoError(t, err) + assert.Equal(t, defaultTemplate, "") + + s := GraphiteSerializer{ + Templates: templates, + } + + buf, _ := s.Serialize(m1) + buf2, _ := s.Serialize(m2) + + buf = append(buf, buf2...) + + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{ + fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_busy 8.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_busy 8.5 %d", now.Unix()), + } + sort.Strings(mS) + sort.Strings(expS) + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricHostWithMultipleTemplatesWithDefault(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "host": "localhost", + "cpu": "cpu0", + "datacenter": "us-west-2", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + "usage_busy": float64(8.5), + } + m1, err := metric.New("cpu", tags, fields, now) + m2, err := metric.New("new_cpu", tags, fields, now) + assert.NoError(t, err) + + templates, defaultTemplate, err := InitGraphiteTemplates([]string{ + "cp* tags.measurement.host.field", + "tags.host.measurement.field", + }) + assert.NoError(t, err) + assert.Equal(t, defaultTemplate, "tags.host.measurement.field") + + s := GraphiteSerializer{ + Templates: templates, + Template: defaultTemplate, + } + + buf, _ := s.Serialize(m1) + buf2, _ := s.Serialize(m2) + + buf = append(buf, buf2...) + + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{ + fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.cpu.localhost.usage_busy 8.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_idle 91.5 %d", now.Unix()), + fmt.Sprintf("cpu0.us-west-2.localhost.new_cpu.usage_busy 8.5 %d", now.Unix()), + } + sort.Strings(mS) + sort.Strings(expS) + assert.Equal(t, expS, mS) +} + func TestSerializeMetricHostWithTagSupport(t *testing.T) { now := time.Now() tags := map[string]string{ @@ -160,6 +252,7 @@ func TestSerializeMetricHostWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -214,6 +307,7 @@ func TestSerializeValueFieldWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -289,6 +383,7 @@ func TestSerializeValueStringWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -342,6 +437,7 @@ func TestSerializeValueBooleanWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -414,6 +510,7 @@ func TestSerializeFieldWithSpacesWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -467,6 +564,7 @@ func TestSerializeTagWithSpacesWithTagSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -577,6 +675,7 @@ func TestSerializeMetricPrefixWithTagSupport(t *testing.T) { s := GraphiteSerializer{ Prefix: "prefix", TagSupport: true, + Separator: ".", } buf, _ := s.Serialize(m) mS := strings.Split(strings.TrimSpace(string(buf)), "\n") @@ -882,6 +981,7 @@ func TestCleanWithTagsSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -942,6 +1042,7 @@ func TestSerializeBatchWithTagsSupport(t *testing.T) { s := GraphiteSerializer{ TagSupport: true, + Separator: ".", } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/plugins/serializers/influx/README.md b/plugins/serializers/influx/README.md new file mode 100644 index 000000000..d97fd42c8 --- /dev/null +++ b/plugins/serializers/influx/README.md @@ -0,0 +1,34 @@ +# Influx + +The `influx` data format outputs metrics into [InfluxDB Line Protocol][line +protocol]. This is the recommended format unless another format is required +for interoperability. + +### Configuration +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "influx" + + ## Maximum line length in bytes. Useful only for debugging. + influx_max_line_bytes = 0 + + ## When true, fields will be output in ascending lexical order. Enabling + ## this option will result in decreased performance and is only recommended + ## when you need predictable ordering while debugging. + influx_sort_fields = false + + ## When true, Telegraf will output unsigned integers as unsigned values, + ## i.e.: `42u`. You will need a version of InfluxDB supporting unsigned + ## integer values. Enabling this option will result in field type errors if + ## existing data has been written. + influx_uint_support = false +``` + +[line protocol]: https://docs.influxdata.com/influxdb/latest/write_protocols/line_protocol_tutorial/ diff --git a/plugins/serializers/influx/escape.go b/plugins/serializers/influx/escape.go index 27caa6bb3..9320eb7fa 100644 --- a/plugins/serializers/influx/escape.go +++ b/plugins/serializers/influx/escape.go @@ -29,10 +29,6 @@ var ( ) stringFieldEscaper = strings.NewReplacer( - "\t", `\t`, - "\n", `\n`, - "\f", `\f`, - "\r", `\r`, `"`, `\"`, `\`, `\\`, ) diff --git a/plugins/serializers/influx/influx.go b/plugins/serializers/influx/influx.go index f052c9c93..a675add4b 100644 --- a/plugins/serializers/influx/influx.go +++ b/plugins/serializers/influx/influx.go @@ -27,30 +27,34 @@ const ( UintSupport FieldTypeSupport = 1 << iota ) -// MetricError is an error causing a metric to be unserializable. +var ( + NeedMoreSpace = "need more space" + InvalidName = "invalid name" + NoFields = "no serializable fields" +) + +// MetricError is an error causing an entire metric to be unserializable. type MetricError struct { - s string + series string + reason string } func (e MetricError) Error() string { - return e.s + if e.series != "" { + return fmt.Sprintf("%q: %s", e.series, e.reason) + } + return e.reason } // FieldError is an error causing a field to be unserializable. type FieldError struct { - s string + reason string } func (e FieldError) Error() string { - return e.s + return e.reason } -var ( - ErrNeedMoreSpace = &MetricError{"need more space"} - ErrInvalidName = &MetricError{"invalid name"} - ErrNoFields = &MetricError{"no serializable fields"} -) - // Serializer is a serializer for line protocol. type Serializer struct { maxLineBytes int @@ -102,17 +106,23 @@ func (s *Serializer) Serialize(m telegraf.Metric) ([]byte, error) { return out, nil } +// SerializeBatch writes the slice of metrics and returns a byte slice of the +// results. The returned byte slice may contain multiple lines of data. func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { - var batch bytes.Buffer + s.buf.Reset() for _, m := range metrics { - _, err := s.Write(&batch, m) + _, err := s.Write(&s.buf, m) if err != nil { + if _, ok := err.(*MetricError); ok { + continue + } return nil, err } } - return batch.Bytes(), nil + out := make([]byte, s.buf.Len()) + copy(out, s.buf.Bytes()) + return out, nil } - func (s *Serializer) Write(w io.Writer, m telegraf.Metric) (int, error) { err := s.writeMetric(w, m) return s.bytesWritten, err @@ -135,7 +145,7 @@ func (s *Serializer) buildHeader(m telegraf.Metric) error { name := nameEscape(m.Name()) if name == "" { - return ErrInvalidName + return s.newMetricError(InvalidName) } s.header = append(s.header, name...) @@ -222,9 +232,10 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { } if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes { - // Need at least one field per line + // Need at least one field per line, this metric cannot be fit + // into the max line bytes. if firstField { - return ErrNeedMoreSpace + return s.newMetricError(NeedMoreSpace) } err = s.write(w, s.footer) @@ -232,21 +243,13 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { return err } + pairsLen = 0 + firstField = true bytesNeeded = len(s.header) + len(s.pair) + len(s.footer) - if s.maxLineBytes > 0 && bytesNeeded > s.maxLineBytes { - return ErrNeedMoreSpace + if bytesNeeded > s.maxLineBytes { + return s.newMetricError(NeedMoreSpace) } - - err = s.write(w, s.header) - if err != nil { - return err - } - - s.write(w, s.pair) - pairsLen += len(s.pair) - firstField = false - continue } if firstField { @@ -261,18 +264,28 @@ func (s *Serializer) writeMetric(w io.Writer, m telegraf.Metric) error { } } - s.write(w, s.pair) + err = s.write(w, s.pair) + if err != nil { + return err + } pairsLen += len(s.pair) firstField = false } if firstField { - return ErrNoFields + return s.newMetricError(NoFields) } return s.write(w, s.footer) +} +func (s *Serializer) newMetricError(reason string) *MetricError { + if len(s.header) != 0 { + series := bytes.TrimRight(s.header, " ") + return &MetricError{series: string(series), reason: reason} + } + return &MetricError{reason: reason} } func (s *Serializer) appendFieldValue(buf []byte, value interface{}) ([]byte, error) { diff --git a/plugins/serializers/influx/influx_test.go b/plugins/serializers/influx/influx_test.go index 74bffe5e4..eae3d755c 100644 --- a/plugins/serializers/influx/influx_test.go +++ b/plugins/serializers/influx/influx_test.go @@ -23,7 +23,7 @@ var tests = []struct { typeSupport FieldTypeSupport input telegraf.Metric output []byte - err error + errReason string }{ { name: "minimal", @@ -98,7 +98,7 @@ var tests = []struct { time.Unix(0, 0), ), ), - err: ErrNoFields, + errReason: NoFields, }, { name: "float Inf", @@ -275,6 +275,24 @@ var tests = []struct { ), output: []byte("cpu abc=123i 1519194109000000042\ncpu def=456i 1519194109000000042\n"), }, + { + name: "split_fields_overflow", + maxBytes: 43, + input: MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "abc": 123, + "def": 456, + "ghi": 789, + "jkl": 123, + }, + time.Unix(1519194109, 42), + ), + ), + output: []byte("cpu abc=123i,def=456i 1519194109000000042\ncpu ghi=789i,jkl=123i 1519194109000000042\n"), + }, { name: "name newline", input: MustMetric( @@ -317,7 +335,7 @@ var tests = []struct { time.Unix(0, 0), ), ), - output: []byte("cpu value=\"x\\ny\" 0\n"), + output: []byte("cpu value=\"x\ny\" 0\n"), }, { name: "need more space", @@ -333,8 +351,8 @@ var tests = []struct { time.Unix(1519194109, 42), ), ), - output: nil, - err: ErrNeedMoreSpace, + output: nil, + errReason: NeedMoreSpace, }, { name: "no fields", @@ -346,7 +364,7 @@ var tests = []struct { time.Unix(0, 0), ), ), - err: ErrNoFields, + errReason: NoFields, }, { name: "procstat", @@ -367,7 +385,6 @@ var tests = []struct { "cpu_time_nice": float64(0), "cpu_time_soft_irq": float64(0), "cpu_time_steal": float64(0), - "cpu_time_stolen": float64(0), "cpu_time_system": float64(0), "cpu_time_user": float64(0.02), "cpu_usage": float64(0), @@ -415,7 +432,7 @@ var tests = []struct { time.Unix(0, 1517620624000000000), ), ), - output: []byte("procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_stolen=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i,memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i,num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i,write_bytes=106496i,write_count=35i 1517620624000000000\n"), + output: []byte("procstat,exe=bash,process_name=bash cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=0,cpu_time_user=0.02,cpu_usage=0,involuntary_context_switches=2i,memory_data=1576960i,memory_locked=0i,memory_rss=5103616i,memory_stack=139264i,memory_swap=0i,memory_vms=21659648i,nice_priority=20i,num_fds=4i,num_threads=1i,pid=29417i,read_bytes=0i,read_count=259i,realtime_priority=0i,rlimit_cpu_time_hard=2147483647i,rlimit_cpu_time_soft=2147483647i,rlimit_file_locks_hard=2147483647i,rlimit_file_locks_soft=2147483647i,rlimit_memory_data_hard=2147483647i,rlimit_memory_data_soft=2147483647i,rlimit_memory_locked_hard=65536i,rlimit_memory_locked_soft=65536i,rlimit_memory_rss_hard=2147483647i,rlimit_memory_rss_soft=2147483647i,rlimit_memory_stack_hard=2147483647i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=2147483647i,rlimit_memory_vms_soft=2147483647i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=4096i,rlimit_num_fds_soft=1024i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=78994i,rlimit_signals_pending_soft=78994i,signals_pending=0i,voluntary_context_switches=42i,write_bytes=106496i,write_count=35i 1517620624000000000\n"), }, } @@ -427,7 +444,10 @@ func TestSerializer(t *testing.T) { serializer.SetFieldSortOrder(SortFields) serializer.SetFieldTypeSupport(tt.typeSupport) output, err := serializer.Serialize(tt.input) - require.Equal(t, tt.err, err) + if tt.errReason != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.errReason) + } require.Equal(t, string(tt.output), string(output)) }) } diff --git a/plugins/serializers/influx/reader.go b/plugins/serializers/influx/reader.go index 4a755c88d..55b6c2b41 100644 --- a/plugins/serializers/influx/reader.go +++ b/plugins/serializers/influx/reader.go @@ -2,7 +2,6 @@ package influx import ( "bytes" - "fmt" "io" "log" @@ -54,18 +53,13 @@ func (r *reader) Read(p []byte) (int, error) { r.offset += 1 if err != nil { r.buf.Reset() - switch err.(type) { - case *MetricError: - // Since we are serializing an array of metrics, don't fail - // the entire batch just because of one unserializable metric. - log.Printf( - "D! [serializers.influx] could not serialize metric %q: %v; discarding metric", - metric.Name(), err) + if _, ok := err.(*MetricError); ok { continue - default: - fmt.Println(err) - return 0, err } + // Since we are serializing multiple metrics, don't fail the + // the entire batch just because of one unserializable metric. + log.Printf("E! [serializers.influx] could not serialize metric: %v; discarding metric", err) + continue } break } diff --git a/plugins/serializers/influx/reader_test.go b/plugins/serializers/influx/reader_test.go index 642b71b1c..7aaf3fccf 100644 --- a/plugins/serializers/influx/reader_test.go +++ b/plugins/serializers/influx/reader_test.go @@ -189,3 +189,92 @@ func TestZeroLengthBufferNoError(t *testing.T) { require.NoError(t, err) require.Equal(t, 0, n) } + +func BenchmarkReader(b *testing.B) { + m := MustMetric( + metric.New( + "procstat", + map[string]string{ + "exe": "bash", + "process_name": "bash", + }, + map[string]interface{}{ + "cpu_time": 0, + "cpu_time_guest": float64(0), + "cpu_time_guest_nice": float64(0), + "cpu_time_idle": float64(0), + "cpu_time_iowait": float64(0), + "cpu_time_irq": float64(0), + "cpu_time_nice": float64(0), + "cpu_time_soft_irq": float64(0), + "cpu_time_steal": float64(0), + "cpu_time_system": float64(0), + "cpu_time_user": float64(0.02), + "cpu_usage": float64(0), + "involuntary_context_switches": 2, + "memory_data": 1576960, + "memory_locked": 0, + "memory_rss": 5103616, + "memory_stack": 139264, + "memory_swap": 0, + "memory_vms": 21659648, + "nice_priority": 20, + "num_fds": 4, + "num_threads": 1, + "pid": 29417, + "read_bytes": 0, + "read_count": 259, + "realtime_priority": 0, + "rlimit_cpu_time_hard": 2147483647, + "rlimit_cpu_time_soft": 2147483647, + "rlimit_file_locks_hard": 2147483647, + "rlimit_file_locks_soft": 2147483647, + "rlimit_memory_data_hard": 2147483647, + "rlimit_memory_data_soft": 2147483647, + "rlimit_memory_locked_hard": 65536, + "rlimit_memory_locked_soft": 65536, + "rlimit_memory_rss_hard": 2147483647, + "rlimit_memory_rss_soft": 2147483647, + "rlimit_memory_stack_hard": 2147483647, + "rlimit_memory_stack_soft": 8388608, + "rlimit_memory_vms_hard": 2147483647, + "rlimit_memory_vms_soft": 2147483647, + "rlimit_nice_priority_hard": 0, + "rlimit_nice_priority_soft": 0, + "rlimit_num_fds_hard": 4096, + "rlimit_num_fds_soft": 1024, + "rlimit_realtime_priority_hard": 0, + "rlimit_realtime_priority_soft": 0, + "rlimit_signals_pending_hard": 78994, + "rlimit_signals_pending_soft": 78994, + "signals_pending": 0, + "voluntary_context_switches": 42, + "write_bytes": 106496, + "write_count": 35, + }, + time.Unix(0, 1517620624000000000), + ), + ) + + metrics := make([]telegraf.Metric, 1000, 1000) + for i := range metrics { + metrics[i] = m + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + readbuf := make([]byte, 4096, 4096) + serializer := NewSerializer() + reader := NewReader(metrics, serializer) + for { + _, err := reader.Read(readbuf) + if err == io.EOF { + break + } + + if err != nil { + panic(err.Error()) + } + } + } +} diff --git a/plugins/serializers/json/README.md b/plugins/serializers/json/README.md new file mode 100644 index 000000000..08bb9d4f7 --- /dev/null +++ b/plugins/serializers/json/README.md @@ -0,0 +1,77 @@ +# JSON + +The `json` output data format converts metrics into JSON documents. + +### Configuration + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["stdout", "/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "json" + + ## The resolution to use for the metric timestamp. Must be a duration string + ## such as "1ns", "1us", "1ms", "10ms", "1s". Durations are truncated to + ## the power of 10 less than the specified units. + json_timestamp_units = "1s" +``` + +### Examples: + +Standard form: +```json +{ + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 +} +``` + +When an output plugin needs to emit multiple metrics at one time, it may use +the batch format. The use of batch format is determined by the plugin, +reference the documentation for the specific plugin. +```json +{ + "metrics": [ + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + }, + { + "fields": { + "field_1": 30, + "field_2": 4, + "field_N": 59, + "n_images": 660 + }, + "name": "docker", + "tags": { + "host": "raynor" + }, + "timestamp": 1458229140 + } + ] +} +``` diff --git a/plugins/serializers/json/json.go b/plugins/serializers/json/json.go index bfb84f9a7..e2d7af330 100644 --- a/plugins/serializers/json/json.go +++ b/plugins/serializers/json/json.go @@ -2,6 +2,7 @@ package json import ( "encoding/json" + "math" "time" "github.com/influxdata/telegraf" @@ -49,8 +50,26 @@ func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { func (s *serializer) createObject(metric telegraf.Metric) map[string]interface{} { m := make(map[string]interface{}, 4) - m["tags"] = metric.Tags() - m["fields"] = metric.Fields() + + tags := make(map[string]string, len(metric.TagList())) + for _, tag := range metric.TagList() { + tags[tag.Key] = tag.Value + } + m["tags"] = tags + + fields := make(map[string]interface{}, len(metric.FieldList())) + for _, field := range metric.FieldList() { + switch fv := field.Value.(type) { + case float64: + // JSON does not support these special values + if math.IsNaN(fv) || math.IsInf(fv, 0) { + continue + } + } + fields[field.Key] = field.Value + } + m["fields"] = fields + m["name"] = metric.Name() m["timestamp"] = metric.Time().UnixNano() / int64(s.TimestampUnits) return m diff --git a/plugins/serializers/json/json_test.go b/plugins/serializers/json/json_test.go index 82990b747..9ea304c88 100644 --- a/plugins/serializers/json/json_test.go +++ b/plugins/serializers/json/json_test.go @@ -2,14 +2,15 @@ package json import ( "fmt" + "math" "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func MustMetric(v telegraf.Metric, err error) telegraf.Metric { @@ -193,3 +194,42 @@ func TestSerializeBatch(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte(`{"metrics":[{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0},{"fields":{"value":42},"name":"cpu","tags":{},"timestamp":0}]}`), buf) } + +func TestSerializeBatchSkipInf(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "inf": math.Inf(1), + "time_idle": 42, + }, + time.Unix(0, 0), + ), + } + + s, err := NewSerializer(0) + require.NoError(t, err) + buf, err := s.SerializeBatch(metrics) + require.NoError(t, err) + require.Equal(t, []byte(`{"metrics":[{"fields":{"time_idle":42},"name":"cpu","tags":{},"timestamp":0}]}`), buf) +} + +func TestSerializeBatchSkipInfAllFields(t *testing.T) { + metrics := []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "inf": math.Inf(1), + }, + time.Unix(0, 0), + ), + } + + s, err := NewSerializer(0) + require.NoError(t, err) + buf, err := s.SerializeBatch(metrics) + require.NoError(t, err) + require.Equal(t, []byte(`{"metrics":[{"fields":{},"name":"cpu","tags":{},"timestamp":0}]}`), buf) +} diff --git a/plugins/serializers/nowmetric/README.md b/plugins/serializers/nowmetric/README.md new file mode 100644 index 000000000..c1bc22cbe --- /dev/null +++ b/plugins/serializers/nowmetric/README.md @@ -0,0 +1,83 @@ +# ServiceNow Metrics serializer + +The ServiceNow Metrics serializer outputs metrics in the [ServiceNow Operational Intelligence format][ServiceNow-format]. + +It can be used to write to a file using the file output, or for sending metrics to a MID Server with Enable REST endpoint activated using the standard telegraf HTTP output. +If you're using the HTTP output, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. + +[ServiceNow-format]: https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/reference/mid-POST-metrics.html + + +An example event looks like: +```javascript +[{ + "metric_type": "Disk C: % Free Space", + "resource": "C:\\", + "node": "lnux100", + "value": 50, + "timestamp": 1473183012000, + "ci2metric_id": { + "node": "lnux100" + }, + "source": “Telegraf” +}] +``` +## Using with the HTTP output + +To send this data to a ServiceNow MID Server with Web Server extension activated, you can use the HTTP output, there are some custom headers that you need to add to manage the MID Web Server authorization, here's a sample config for an HTTP output: + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "http://:9082/api/mid/sa/metrics" + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP method, one of: "POST" or "PUT" + method = "POST" + + ## HTTP Basic Auth credentials + username = 'evt.integration' + password = 'P@$$w0rd!' + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "nowmetric" + + ## Additional HTTP headers + [outputs.http.headers] + # # Should be set manually to "application/json" for json data_format + Content-Type = "application/json" + Accept = "application/json" +``` + +Starting with the London release, you also need to explicitly create event rule to allow binding of metric events to host CIs. + +https://docs.servicenow.com/bundle/london-it-operations-management/page/product/event-management/task/event-rule-bind-metrics-to-host.html + +## Using with the File output + +You can use the file output to output the payload in a file. +In this case, just add the following section to your telegraf config file + +```toml +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["C:/Telegraf/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "nowmetric" +``` diff --git a/plugins/serializers/nowmetric/nowmetric.go b/plugins/serializers/nowmetric/nowmetric.go new file mode 100644 index 000000000..c9d0b9463 --- /dev/null +++ b/plugins/serializers/nowmetric/nowmetric.go @@ -0,0 +1,137 @@ +package nowmetric + +import ( + "bytes" + "encoding/json" + "fmt" + "time" + + "github.com/influxdata/telegraf" +) + +type serializer struct { + TimestampUnits time.Duration +} + +/* +Example for the JSON generated and pushed to the MID +{ + "metric_type":"cpu_usage_system", + "resource":"", + "node":"ASGARD", + "value": 0.89, + "timestamp":1487365430, + "ci2metric_id":{"node":"ASGARD"}, + "source":"Telegraf" +} +*/ + +type OIMetric struct { + Metric string `json:"metric_type"` + Resource string `json:"resource"` + Node string `json:"node"` + Value interface{} `json:"value"` + Timestamp int64 `json:"timestamp"` + CiMapping map[string]string `json:"ci2metric_id"` + Source string `json:"source"` +} + +type OIMetrics []OIMetric + +func NewSerializer() (*serializer, error) { + s := &serializer{} + return s, nil +} + +func (s *serializer) Serialize(metric telegraf.Metric) (out []byte, err error) { + serialized, err := s.createObject(metric) + if err != nil { + return []byte{}, nil + } + return serialized, err +} + +func (s *serializer) SerializeBatch(metrics []telegraf.Metric) (out []byte, err error) { + objects := make([]byte, 0) + for _, metric := range metrics { + m, err := s.createObject(metric) + if err != nil { + return nil, fmt.Errorf("D! [serializer.nowmetric] Dropping invalid metric: %s", metric.Name()) + } else if m != nil { + objects = append(objects, m...) + } + } + replaced := bytes.Replace(objects, []byte("]["), []byte(","), -1) + return replaced, nil +} + +func (s *serializer) createObject(metric telegraf.Metric) ([]byte, error) { + /* ServiceNow Operational Intelligence supports an array of JSON objects. + ** Following elements accepted in the request body: + ** metric_type: The name of the metric + ** resource: Information about the resource for which metric data is being collected. In the example below, C:\ is the resource for which metric data is collected + ** node: IP, FQDN, name of the CI, or host + ** value: Value of the metric + ** timestamp: Epoch timestamp of the metric in milliseconds + ** ci2metric_id: List of key-value pairs to identify the CI. + ** source: Data source monitoring the metric type + */ + var allmetrics OIMetrics + var oimetric OIMetric + + oimetric.Source = "Telegraf" + + // Process Tags to extract node & resource name info + for _, tag := range metric.TagList() { + if tag.Key == "" || tag.Value == "" { + continue + } + + if tag.Key == "objectname" { + oimetric.Resource = tag.Value + } + + if tag.Key == "host" { + oimetric.Node = tag.Value + } + } + + // Format timestamp to UNIX epoch + oimetric.Timestamp = (metric.Time().UnixNano() / int64(time.Millisecond)) + + // Loop of fields value pair and build datapoint for each of them + for _, field := range metric.FieldList() { + if !verifyValue(field.Value) { + // Ignore String + continue + } + + if field.Key == "" { + // Ignore Empty Key + continue + } + + oimetric.Metric = field.Key + oimetric.Value = field.Value + + if oimetric.Node != "" { + cimapping := map[string]string{} + cimapping["node"] = oimetric.Node + oimetric.CiMapping = cimapping + } + + allmetrics = append(allmetrics, oimetric) + } + + metricsJson, err := json.Marshal(allmetrics) + + return metricsJson, err +} + +func verifyValue(v interface{}) bool { + switch v.(type) { + case string: + return false + } + return true +} diff --git a/plugins/serializers/nowmetric/nowmetric_test.go b/plugins/serializers/nowmetric/nowmetric_test.go new file mode 100644 index 000000000..e49b81c2d --- /dev/null +++ b/plugins/serializers/nowmetric/nowmetric_test.go @@ -0,0 +1,189 @@ +package nowmetric + +import ( + "fmt" + "sort" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func MustMetric(v telegraf.Metric, err error) telegraf.Metric { + if err != nil { + panic(err) + } + return v +} + +func TestSerializeMetricFloat(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":91.5,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerialize_TimestampUnits(t *testing.T) { + tests := []struct { + name string + timestampUnits time.Duration + expected string + }{ + { + name: "1ms", + timestampUnits: 1 * time.Millisecond, + expected: `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":1525478795123,"ci2metric_id":null,"source":"Telegraf"}]`, + }, + { + name: "10ms", + timestampUnits: 10 * time.Millisecond, + expected: `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":1525478795123,"ci2metric_id":null,"source":"Telegraf"}]`, + }, + { + name: "15ms is reduced to 10ms", + timestampUnits: 15 * time.Millisecond, + expected: `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":1525478795123,"ci2metric_id":null,"source":"Telegraf"}]`, + }, + { + name: "65ms is reduced to 10ms", + timestampUnits: 65 * time.Millisecond, + expected: `[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":1525478795123,"ci2metric_id":null,"source":"Telegraf"}]`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(1525478795, 123456789), + ), + ) + s, _ := NewSerializer() + actual, err := s.Serialize(m) + require.NoError(t, err) + require.Equal(t, tt.expected, string(actual)) + }) + } +} + +func TestSerializeMetricInt(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricString(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": "foobar", + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + assert.Equal(t, "null", string(buf)) +} + +func TestSerializeMultiFields(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + "usage_total": 8559615, + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + // Sort for predictable field order + sort.Slice(m.FieldList(), func(i, j int) bool { + return m.FieldList()[i].Key < m.FieldList()[j].Key + }) + + s, _ := NewSerializer() + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := []byte(fmt.Sprintf(`[{"metric_type":"usage_idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"},{"metric_type":"usage_total","resource":"","node":"","value":8559615,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)), (now.UnixNano() / int64(time.Millisecond)))) + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricWithEscapes(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu tag": "cpu0", + } + fields := map[string]interface{}{ + "U,age=Idle": int64(90), + } + m, err := metric.New("My CPU", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer() + buf, err := s.Serialize(m) + assert.NoError(t, err) + + expS := []byte(fmt.Sprintf(`[{"metric_type":"U,age=Idle","resource":"","node":"","value":90,"timestamp":%d,"ci2metric_id":null,"source":"Telegraf"}]`, (now.UnixNano() / int64(time.Millisecond)))) + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeBatch(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m, m} + s, _ := NewSerializer() + buf, err := s.SerializeBatch(metrics) + require.NoError(t, err) + require.Equal(t, []byte(`[{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"},{"metric_type":"value","resource":"","node":"","value":42,"timestamp":0,"ci2metric_id":null,"source":"Telegraf"}]`), buf) +} diff --git a/plugins/serializers/prometheus/README.md b/plugins/serializers/prometheus/README.md new file mode 100644 index 000000000..9a0cdfea2 --- /dev/null +++ b/plugins/serializers/prometheus/README.md @@ -0,0 +1,68 @@ +# Prometheus + +The `prometheus` data format converts metrics into the Prometheus text +exposition format. When used with the `prometheus` input, the input should be +use the `metric_version = 2` option in order to properly round trip metrics. + +**Warning**: When generating histogram and summary types, output may +not be correct if the metric spans multiple batches. This issue can be +somewhat, but not fully, mitigated by using outputs that support writing in +"batch format". When using histogram and summary types, it is recommended to +use only the `prometheus_client` output. + +## Configuration + +```toml +[[outputs.file]] + files = ["stdout"] + use_batch_format = true + + ## Include the metric timestamp on each sample. + prometheus_export_timestamp = false + + ## Sort prometheus metric families and metric samples. Useful for + ## debugging. + prometheus_sort_metrics = false + + ## Output string fields as metric labels; when false string fields are + ## discarded. + prometheus_string_as_label = false + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "prometheus" +``` + +### Example + +**Example Input** +``` +cpu,cpu=cpu0 time_guest=8022.6,time_system=26145.98,time_user=92512.89 1574317740000000000 +cpu,cpu=cpu1 time_guest=8097.88,time_system=25223.35,time_user=96519.58 1574317740000000000 +cpu,cpu=cpu2 time_guest=7386.28,time_system=24870.37,time_user=95631.59 1574317740000000000 +cpu,cpu=cpu3 time_guest=7434.19,time_system=24843.71,time_user=93753.88 1574317740000000000 +``` + +**Example Output** +``` +# HELP cpu_time_guest Telegraf collected metric +# TYPE cpu_time_guest counter +cpu_time_guest{cpu="cpu0"} 9582.54 +cpu_time_guest{cpu="cpu1"} 9660.88 +cpu_time_guest{cpu="cpu2"} 8946.45 +cpu_time_guest{cpu="cpu3"} 9002.31 +# HELP cpu_time_system Telegraf collected metric +# TYPE cpu_time_system counter +cpu_time_system{cpu="cpu0"} 28675.47 +cpu_time_system{cpu="cpu1"} 27779.34 +cpu_time_system{cpu="cpu2"} 27406.18 +cpu_time_system{cpu="cpu3"} 27404.97 +# HELP cpu_time_user Telegraf collected metric +# TYPE cpu_time_user counter +cpu_time_user{cpu="cpu0"} 99551.84 +cpu_time_user{cpu="cpu1"} 103468.52 +cpu_time_user{cpu="cpu2"} 102591.45 +cpu_time_user{cpu="cpu3"} 100717.05 +``` diff --git a/plugins/serializers/prometheus/collection.go b/plugins/serializers/prometheus/collection.go new file mode 100644 index 000000000..10e85de07 --- /dev/null +++ b/plugins/serializers/prometheus/collection.go @@ -0,0 +1,489 @@ +package prometheus + +import ( + "hash/fnv" + "sort" + "strconv" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/telegraf" + dto "github.com/prometheus/client_model/go" +) + +const helpString = "Telegraf collected metric" + +type TimeFunc func() time.Time + +type MetricFamily struct { + Name string + Type telegraf.ValueType +} + +type Metric struct { + Labels []LabelPair + Time time.Time + AddTime time.Time + Scaler *Scaler + Histogram *Histogram + Summary *Summary +} + +type LabelPair struct { + Name string + Value string +} + +type Scaler struct { + Value float64 +} + +type Bucket struct { + Bound float64 + Count uint64 +} + +type Quantile struct { + Quantile float64 + Value float64 +} + +type Histogram struct { + Buckets []Bucket + Count uint64 + Sum float64 +} + +func (h *Histogram) merge(b Bucket) { + for i := range h.Buckets { + if h.Buckets[i].Bound == b.Bound { + h.Buckets[i].Count = b.Count + return + } + } + h.Buckets = append(h.Buckets, b) +} + +type Summary struct { + Quantiles []Quantile + Count uint64 + Sum float64 +} + +func (s *Summary) merge(q Quantile) { + for i := range s.Quantiles { + if s.Quantiles[i].Quantile == q.Quantile { + s.Quantiles[i].Value = q.Value + return + } + } + s.Quantiles = append(s.Quantiles, q) +} + +type MetricKey uint64 + +func MakeMetricKey(labels []LabelPair) MetricKey { + h := fnv.New64a() + for _, label := range labels { + h.Write([]byte(label.Name)) + h.Write([]byte("\x00")) + h.Write([]byte(label.Value)) + h.Write([]byte("\x00")) + } + return MetricKey(h.Sum64()) +} + +type Entry struct { + Family MetricFamily + Metrics map[MetricKey]*Metric +} + +type Collection struct { + Entries map[MetricFamily]Entry + config FormatConfig +} + +func NewCollection(config FormatConfig) *Collection { + cache := &Collection{ + Entries: make(map[MetricFamily]Entry), + config: config, + } + return cache +} + +func hasLabel(name string, labels []LabelPair) bool { + for _, label := range labels { + if name == label.Name { + return true + } + } + return false +} + +func (c *Collection) createLabels(metric telegraf.Metric) []LabelPair { + labels := make([]LabelPair, 0, len(metric.TagList())) + for _, tag := range metric.TagList() { + // Ignore special tags for histogram and summary types. + switch metric.Type() { + case telegraf.Histogram: + if tag.Key == "le" { + continue + } + case telegraf.Summary: + if tag.Key == "quantile" { + continue + } + } + + name, ok := SanitizeLabelName(tag.Key) + if !ok { + continue + } + + labels = append(labels, LabelPair{Name: name, Value: tag.Value}) + } + + if c.config.StringHandling != StringAsLabel { + return labels + } + + addedFieldLabel := false + for _, field := range metric.FieldList() { + value, ok := field.Value.(string) + if !ok { + continue + } + + name, ok := SanitizeLabelName(field.Key) + if !ok { + continue + } + + // If there is a tag with the same name as the string field, discard + // the field and use the tag instead. + if hasLabel(name, labels) { + continue + } + + labels = append(labels, LabelPair{Name: name, Value: value}) + addedFieldLabel = true + + } + + if addedFieldLabel { + sort.Slice(labels, func(i, j int) bool { + return labels[i].Name < labels[j].Name + }) + } + + return labels +} + +func (c *Collection) Add(metric telegraf.Metric, now time.Time) { + labels := c.createLabels(metric) + for _, field := range metric.FieldList() { + metricName := MetricName(metric.Name(), field.Key, metric.Type()) + metricName, ok := SanitizeMetricName(metricName) + if !ok { + continue + } + + family := MetricFamily{ + Name: metricName, + Type: metric.Type(), + } + + entry, ok := c.Entries[family] + if !ok { + entry = Entry{ + Family: family, + Metrics: make(map[MetricKey]*Metric), + } + c.Entries[family] = entry + + } + + metricKey := MakeMetricKey(labels) + + m, ok := entry.Metrics[metricKey] + if ok { + // A batch of metrics can contain multiple values for a single + // Prometheus sample. If this metric is older than the existing + // sample then we can skip over it. + if metric.Time().Before(m.Time) { + continue + } + } + + switch metric.Type() { + case telegraf.Counter: + fallthrough + case telegraf.Gauge: + fallthrough + case telegraf.Untyped: + value, ok := SampleValue(field.Value) + if !ok { + continue + } + + m = &Metric{ + Labels: labels, + Time: metric.Time(), + AddTime: now, + Scaler: &Scaler{Value: value}, + } + + entry.Metrics[metricKey] = m + case telegraf.Histogram: + if m == nil { + m = &Metric{ + Labels: labels, + Time: metric.Time(), + AddTime: now, + Histogram: &Histogram{}, + } + } + switch { + case strings.HasSuffix(field.Key, "_bucket"): + le, ok := metric.GetTag("le") + if !ok { + continue + } + bound, err := strconv.ParseFloat(le, 64) + if err != nil { + continue + } + + count, ok := SampleCount(field.Value) + if !ok { + continue + } + + m.Histogram.merge(Bucket{ + Bound: bound, + Count: count, + }) + case strings.HasSuffix(field.Key, "_sum"): + sum, ok := SampleSum(field.Value) + if !ok { + continue + } + + m.Histogram.Sum = sum + case strings.HasSuffix(field.Key, "_count"): + count, ok := SampleCount(field.Value) + if !ok { + continue + } + + m.Histogram.Count = count + default: + continue + } + + entry.Metrics[metricKey] = m + case telegraf.Summary: + if m == nil { + m = &Metric{ + Labels: labels, + Time: metric.Time(), + AddTime: now, + Summary: &Summary{}, + } + } + switch { + case strings.HasSuffix(field.Key, "_sum"): + sum, ok := SampleSum(field.Value) + if !ok { + continue + } + + m.Summary.Sum = sum + case strings.HasSuffix(field.Key, "_count"): + count, ok := SampleCount(field.Value) + if !ok { + continue + } + + m.Summary.Count = count + default: + quantileTag, ok := metric.GetTag("quantile") + if !ok { + continue + } + quantile, err := strconv.ParseFloat(quantileTag, 64) + if err != nil { + continue + } + + value, ok := SampleValue(field.Value) + if !ok { + continue + } + + m.Summary.merge(Quantile{ + Quantile: quantile, + Value: value, + }) + } + + entry.Metrics[metricKey] = m + } + } +} + +func (c *Collection) Expire(now time.Time, age time.Duration) { + expireTime := now.Add(-age) + for _, entry := range c.Entries { + for key, metric := range entry.Metrics { + if metric.AddTime.Before(expireTime) { + delete(entry.Metrics, key) + if len(entry.Metrics) == 0 { + delete(c.Entries, entry.Family) + } + } + } + } +} + +func (c *Collection) GetEntries(order MetricSortOrder) []Entry { + entries := make([]Entry, 0, len(c.Entries)) + for _, entry := range c.Entries { + entries = append(entries, entry) + } + + switch order { + case SortMetrics: + sort.Slice(entries, func(i, j int) bool { + lhs := entries[i].Family + rhs := entries[j].Family + if lhs.Name != rhs.Name { + return lhs.Name < rhs.Name + } + + return lhs.Type < rhs.Type + }) + } + return entries +} + +func (c *Collection) GetMetrics(entry Entry, order MetricSortOrder) []*Metric { + metrics := make([]*Metric, 0, len(entry.Metrics)) + for _, metric := range entry.Metrics { + metrics = append(metrics, metric) + } + + switch order { + case SortMetrics: + sort.Slice(metrics, func(i, j int) bool { + lhs := metrics[i].Labels + rhs := metrics[j].Labels + if len(lhs) != len(rhs) { + return len(lhs) < len(rhs) + } + + for index := range lhs { + l := lhs[index] + r := rhs[index] + + if l.Name != r.Name { + return l.Name < r.Name + } + + if l.Value != r.Value { + return l.Value < r.Value + } + } + + return false + }) + } + + return metrics +} + +func (c *Collection) GetProto() []*dto.MetricFamily { + result := make([]*dto.MetricFamily, 0, len(c.Entries)) + + for _, entry := range c.GetEntries(c.config.MetricSortOrder) { + mf := &dto.MetricFamily{ + Name: proto.String(entry.Family.Name), + Help: proto.String(helpString), + Type: MetricType(entry.Family.Type), + } + + for _, metric := range c.GetMetrics(entry, c.config.MetricSortOrder) { + l := make([]*dto.LabelPair, 0, len(metric.Labels)) + for _, label := range metric.Labels { + l = append(l, &dto.LabelPair{ + Name: proto.String(label.Name), + Value: proto.String(label.Value), + }) + } + + m := &dto.Metric{ + Label: l, + } + + if c.config.TimestampExport == ExportTimestamp { + m.TimestampMs = proto.Int64(metric.Time.UnixNano() / int64(time.Millisecond)) + } + + switch entry.Family.Type { + case telegraf.Gauge: + m.Gauge = &dto.Gauge{Value: proto.Float64(metric.Scaler.Value)} + case telegraf.Counter: + m.Counter = &dto.Counter{Value: proto.Float64(metric.Scaler.Value)} + case telegraf.Untyped: + m.Untyped = &dto.Untyped{Value: proto.Float64(metric.Scaler.Value)} + case telegraf.Histogram: + buckets := make([]*dto.Bucket, 0, len(metric.Histogram.Buckets)) + for _, bucket := range metric.Histogram.Buckets { + buckets = append(buckets, &dto.Bucket{ + UpperBound: proto.Float64(bucket.Bound), + CumulativeCount: proto.Uint64(bucket.Count), + }) + } + + if len(buckets) == 0 { + continue + } + + m.Histogram = &dto.Histogram{ + Bucket: buckets, + SampleCount: proto.Uint64(metric.Histogram.Count), + SampleSum: proto.Float64(metric.Histogram.Sum), + } + case telegraf.Summary: + quantiles := make([]*dto.Quantile, 0, len(metric.Summary.Quantiles)) + for _, quantile := range metric.Summary.Quantiles { + quantiles = append(quantiles, &dto.Quantile{ + Quantile: proto.Float64(quantile.Quantile), + Value: proto.Float64(quantile.Value), + }) + } + + if len(quantiles) == 0 { + continue + } + + m.Summary = &dto.Summary{ + Quantile: quantiles, + SampleCount: proto.Uint64(metric.Summary.Count), + SampleSum: proto.Float64(metric.Summary.Sum), + } + default: + panic("unknown telegraf.ValueType") + } + + mf.Metric = append(mf.Metric, m) + } + + if len(mf.Metric) != 0 { + result = append(result, mf) + } + } + + return result +} diff --git a/plugins/serializers/prometheus/collection_test.go b/plugins/serializers/prometheus/collection_test.go new file mode 100644 index 000000000..d2c5f5d09 --- /dev/null +++ b/plugins/serializers/prometheus/collection_test.go @@ -0,0 +1,427 @@ +package prometheus + +import ( + "math" + "testing" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + dto "github.com/prometheus/client_model/go" + "github.com/stretchr/testify/require" +) + +type Input struct { + metric telegraf.Metric + addtime time.Time +} + +func TestCollectionExpire(t *testing.T) { + tests := []struct { + name string + now time.Time + age time.Duration + input []Input + expected []*dto.MetricFamily + }{ + { + name: "not expired", + now: time.Unix(1, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("cpu_time_idle"), + Help: proto.String(helpString), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Untyped: &dto.Untyped{Value: proto.Float64(42.0)}, + }, + }, + }, + }, + }, + { + name: "update metric expiration", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(0, 0), + }, + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 43.0, + }, + time.Unix(12, 0), + ), + addtime: time.Unix(12, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("cpu_time_idle"), + Help: proto.String(helpString), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Untyped: &dto.Untyped{Value: proto.Float64(43.0)}, + }, + }, + }, + }, + }, + { + name: "update metric expiration descending order", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(12, 0), + ), + addtime: time.Unix(12, 0), + }, { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 43.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("cpu_time_idle"), + Help: proto.String(helpString), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Untyped: &dto.Untyped{Value: proto.Float64(42.0)}, + }, + }, + }, + }, + }, + { + name: "expired single metric in metric family", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{}, + }, + { + name: "expired one metric in metric family", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_guest": 42.0, + }, + time.Unix(15, 0), + ), + addtime: time.Unix(15, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("cpu_time_guest"), + Help: proto.String(helpString), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Untyped: &dto.Untyped{Value: proto.Float64(42.0)}, + }, + }, + }, + }, + }, + { + name: "histogram bucket updates", + now: time.Unix(0, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 10.0, + "http_request_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 1.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + // Next interval + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 20.0, + "http_request_duration_seconds_count": 4, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 2.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("http_request_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(4), + SampleSum: proto.Float64(20.0), + Bucket: []*dto.Bucket{ + { + UpperBound: proto.Float64(0.05), + CumulativeCount: proto.Uint64(2), + }, + { + UpperBound: proto.Float64(math.Inf(1)), + CumulativeCount: proto.Uint64(2), + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "summary quantile updates", + now: time.Unix(0, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.0, + "rpc_duration_seconds_count": 1, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 1.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + // Updated Summary + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 2.0, + "rpc_duration_seconds_count": 2, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, { + metric: testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 2.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + addtime: time.Unix(0, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("rpc_duration_seconds"), + Help: proto.String(helpString), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(2), + SampleSum: proto.Float64(2.0), + Quantile: []*dto.Quantile{ + { + Quantile: proto.Float64(0.01), + Value: proto.Float64(2), + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "expire based on add time", + now: time.Unix(20, 0), + age: 10 * time.Second, + input: []Input{ + { + metric: testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + addtime: time.Unix(15, 0), + }, + }, + expected: []*dto.MetricFamily{ + { + Name: proto.String("cpu_time_idle"), + Help: proto.String(helpString), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{}, + Untyped: &dto.Untyped{Value: proto.Float64(42.0)}, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := NewCollection(FormatConfig{}) + for _, item := range tt.input { + c.Add(item.metric, item.addtime) + } + c.Expire(tt.now, tt.age) + + actual := c.GetProto() + + require.Equal(t, tt.expected, actual) + }) + } +} diff --git a/plugins/serializers/prometheus/convert.go b/plugins/serializers/prometheus/convert.go new file mode 100644 index 000000000..131ac31b8 --- /dev/null +++ b/plugins/serializers/prometheus/convert.go @@ -0,0 +1,215 @@ +package prometheus + +import ( + "strings" + "unicode" + + "github.com/influxdata/telegraf" + dto "github.com/prometheus/client_model/go" +) + +type Table struct { + First *unicode.RangeTable + Rest *unicode.RangeTable +} + +var MetricNameTable = Table{ + First: &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x003A, 0x003A, 1}, // : + {0x0041, 0x005A, 1}, // A-Z + {0x005F, 0x005F, 1}, // _ + {0x0061, 0x007A, 1}, // a-z + }, + LatinOffset: 4, + }, + Rest: &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x0030, 0x003A, 1}, // 0-: + {0x0041, 0x005A, 1}, // A-Z + {0x005F, 0x005F, 1}, // _ + {0x0061, 0x007A, 1}, // a-z + }, + LatinOffset: 4, + }, +} + +var LabelNameTable = Table{ + First: &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x0041, 0x005A, 1}, // A-Z + {0x005F, 0x005F, 1}, // _ + {0x0061, 0x007A, 1}, // a-z + }, + LatinOffset: 3, + }, + Rest: &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x0030, 0x0039, 1}, // 0-9 + {0x0041, 0x005A, 1}, // A-Z + {0x005F, 0x005F, 1}, // _ + {0x0061, 0x007A, 1}, // a-z + }, + LatinOffset: 4, + }, +} + +func isValid(name string, table Table) bool { + if name == "" { + return false + } + + for i, r := range name { + switch { + case i == 0: + if !unicode.In(r, table.First) { + return false + } + default: + if !unicode.In(r, table.Rest) { + return false + } + } + } + + return true +} + +// Sanitize checks if the name is valid according to the table. If not, it +// attempts to replaces invalid runes with an underscore to create a valid +// name. +func sanitize(name string, table Table) (string, bool) { + if isValid(name, table) { + return name, true + } + + var b strings.Builder + + for i, r := range name { + switch { + case i == 0: + if unicode.In(r, table.First) { + b.WriteRune(r) + } + default: + if unicode.In(r, table.Rest) { + b.WriteRune(r) + } else { + b.WriteString("_") + } + } + } + + name = strings.Trim(b.String(), "_") + if name == "" { + return "", false + } + + return name, true +} + +// SanitizeMetricName checks if the name is a valid Prometheus metric name. If +// not, it attempts to replaces invalid runes with an underscore to create a +// valid name. +func SanitizeMetricName(name string) (string, bool) { + return sanitize(name, MetricNameTable) +} + +// SanitizeLabelName checks if the name is a valid Prometheus label name. If +// not, it attempts to replaces invalid runes with an underscore to create a +// valid name. +func SanitizeLabelName(name string) (string, bool) { + return sanitize(name, LabelNameTable) +} + +// MetricName returns the Prometheus metric name. +func MetricName(measurement, fieldKey string, valueType telegraf.ValueType) string { + switch valueType { + case telegraf.Histogram, telegraf.Summary: + switch { + case strings.HasSuffix(fieldKey, "_bucket"): + fieldKey = strings.TrimSuffix(fieldKey, "_bucket") + case strings.HasSuffix(fieldKey, "_sum"): + fieldKey = strings.TrimSuffix(fieldKey, "_sum") + case strings.HasSuffix(fieldKey, "_count"): + fieldKey = strings.TrimSuffix(fieldKey, "_count") + } + } + + if measurement == "prometheus" { + return fieldKey + } + return measurement + "_" + fieldKey +} + +func MetricType(valueType telegraf.ValueType) *dto.MetricType { + switch valueType { + case telegraf.Counter: + return dto.MetricType_COUNTER.Enum() + case telegraf.Gauge: + return dto.MetricType_GAUGE.Enum() + case telegraf.Summary: + return dto.MetricType_SUMMARY.Enum() + case telegraf.Untyped: + return dto.MetricType_UNTYPED.Enum() + case telegraf.Histogram: + return dto.MetricType_HISTOGRAM.Enum() + default: + panic("unknown telegraf.ValueType") + } +} + +// SampleValue converts a field value into a value suitable for a simple sample value. +func SampleValue(value interface{}) (float64, bool) { + switch v := value.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + case bool: + if v { + return 1.0, true + } + return 0.0, true + default: + return 0, false + } +} + +// SampleCount converts a field value into a count suitable for a metric family +// of the Histogram or Summary type. +func SampleCount(value interface{}) (uint64, bool) { + switch v := value.(type) { + case float64: + if v < 0 { + return 0, false + } + return uint64(v), true + case int64: + if v < 0 { + return 0, false + } + return uint64(v), true + case uint64: + return v, true + default: + return 0, false + } +} + +// SampleSum converts a field value into a sum suitable for a metric family +// of the Histogram or Summary type. +func SampleSum(value interface{}) (float64, bool) { + switch v := value.(type) { + case float64: + return v, true + case int64: + return float64(v), true + case uint64: + return float64(v), true + default: + return 0, false + } +} diff --git a/plugins/serializers/prometheus/prometheus.go b/plugins/serializers/prometheus/prometheus.go new file mode 100644 index 000000000..9e5df5882 --- /dev/null +++ b/plugins/serializers/prometheus/prometheus.go @@ -0,0 +1,70 @@ +package prometheus + +import ( + "bytes" + "time" + + "github.com/influxdata/telegraf" + "github.com/prometheus/common/expfmt" +) + +// TimestampExport controls if the output contains timestamps. +type TimestampExport int + +const ( + NoExportTimestamp TimestampExport = iota + ExportTimestamp +) + +// MetricSortOrder controls if the output is sorted. +type MetricSortOrder int + +const ( + NoSortMetrics MetricSortOrder = iota + SortMetrics +) + +// StringHandling defines how to process string fields. +type StringHandling int + +const ( + DiscardStrings StringHandling = iota + StringAsLabel +) + +type FormatConfig struct { + TimestampExport TimestampExport + MetricSortOrder MetricSortOrder + StringHandling StringHandling +} + +type Serializer struct { + config FormatConfig +} + +func NewSerializer(config FormatConfig) (*Serializer, error) { + s := &Serializer{config: config} + return s, nil +} + +func (s *Serializer) Serialize(metric telegraf.Metric) ([]byte, error) { + return s.SerializeBatch([]telegraf.Metric{metric}) +} + +func (s *Serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + coll := NewCollection(s.config) + for _, metric := range metrics { + coll.Add(metric, time.Now()) + } + + var buf bytes.Buffer + for _, mf := range coll.GetProto() { + enc := expfmt.NewEncoder(&buf, expfmt.FmtText) + err := enc.Encode(mf) + if err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} diff --git a/plugins/serializers/prometheus/prometheus_test.go b/plugins/serializers/prometheus/prometheus_test.go new file mode 100644 index 000000000..ff082f7b2 --- /dev/null +++ b/plugins/serializers/prometheus/prometheus_test.go @@ -0,0 +1,667 @@ +package prometheus + +import ( + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/testutil" + "github.com/stretchr/testify/require" +) + +func TestSerialize(t *testing.T) { + tests := []struct { + name string + config FormatConfig + metric telegraf.Metric + expected []byte + }{ + { + name: "simple", + metric: testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 +`), + }, + { + name: "prometheus input untyped", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Untyped, + ), + expected: []byte(` +# HELP http_requests_total Telegraf collected metric +# TYPE http_requests_total untyped +http_requests_total{code="400",method="post"} 3 +`), + }, + { + name: "prometheus input counter", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Counter, + ), + expected: []byte(` +# HELP http_requests_total Telegraf collected metric +# TYPE http_requests_total counter +http_requests_total{code="400",method="post"} 3 +`), + }, + { + name: "prometheus input gauge", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "code": "400", + "method": "post", + }, + map[string]interface{}{ + "http_requests_total": 3.0, + }, + time.Unix(0, 0), + telegraf.Gauge, + ), + expected: []byte(` +# HELP http_requests_total Telegraf collected metric +# TYPE http_requests_total gauge +http_requests_total{code="400",method="post"} 3 +`), + }, + { + name: "prometheus input histogram no buckets", + metric: testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 53423, + "http_request_duration_seconds_count": 144320, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + expected: []byte(` +`), + }, + { + name: "prometheus input histogram only bucket", + metric: testutil.MustMetric( + "prometheus", + map[string]string{ + "le": "0.5", + }, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 129389.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + expected: []byte(` +# HELP http_request_duration_seconds Telegraf collected metric +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="+Inf"} 0 +http_request_duration_seconds_sum 0 +http_request_duration_seconds_count 0 +`), + }, + { + name: "simple with timestamp", + config: FormatConfig{ + TimestampExport: ExportTimestamp, + }, + metric: testutil.MustMetric( + "cpu", + map[string]string{ + "host": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(1574279268, 0), + ), + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="example.org"} 42 1574279268000 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := NewSerializer(FormatConfig{ + MetricSortOrder: SortMetrics, + TimestampExport: tt.config.TimestampExport, + StringHandling: tt.config.StringHandling, + }) + require.NoError(t, err) + actual, err := s.Serialize(tt.metric) + require.NoError(t, err) + + require.Equal(t, strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(actual))) + }) + } +} + +func TestSerializeBatch(t *testing.T) { + tests := []struct { + name string + config FormatConfig + metrics []telegraf.Metric + expected []byte + }{ + { + name: "simple", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "one.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "two.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="one.example.org"} 42 +cpu_time_idle{host="two.example.org"} 42 +`), + }, + { + name: "multiple metric families", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host": "one.example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + "time_guest": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_guest Telegraf collected metric +# TYPE cpu_time_guest untyped +cpu_time_guest{host="one.example.org"} 42 +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host="one.example.org"} 42 +`), + }, + { + name: "histogram", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "http_request_duration_seconds_sum": 53423, + "http_request_duration_seconds_count": 144320, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.05"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 24054.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.1"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 33444.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.2"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 100392.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "0.5"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 129389.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "1.0"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 133988.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"le": "+Inf"}, + map[string]interface{}{ + "http_request_duration_seconds_bucket": 144320.0, + }, + time.Unix(0, 0), + telegraf.Histogram, + ), + }, + expected: []byte(` +# HELP http_request_duration_seconds Telegraf collected metric +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{le="0.05"} 24054 +http_request_duration_seconds_bucket{le="0.1"} 33444 +http_request_duration_seconds_bucket{le="0.2"} 100392 +http_request_duration_seconds_bucket{le="0.5"} 129389 +http_request_duration_seconds_bucket{le="1"} 133988 +http_request_duration_seconds_bucket{le="+Inf"} 144320 +http_request_duration_seconds_sum 53423 +http_request_duration_seconds_count 144320 +`), + }, + { + name: "", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "prometheus", + map[string]string{}, + map[string]interface{}{ + "rpc_duration_seconds_sum": 1.7560473e+07, + "rpc_duration_seconds_count": 2693, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.01"}, + map[string]interface{}{ + "rpc_duration_seconds": 3102.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.05"}, + map[string]interface{}{ + "rpc_duration_seconds": 3272.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.5"}, + map[string]interface{}{ + "rpc_duration_seconds": 4773.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.9"}, + map[string]interface{}{ + "rpc_duration_seconds": 9001.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + testutil.MustMetric( + "prometheus", + map[string]string{"quantile": "0.99"}, + map[string]interface{}{ + "rpc_duration_seconds": 76656.0, + }, + time.Unix(0, 0), + telegraf.Summary, + ), + }, + expected: []byte(` +# HELP rpc_duration_seconds Telegraf collected metric +# TYPE rpc_duration_seconds summary +rpc_duration_seconds{quantile="0.01"} 3102 +rpc_duration_seconds{quantile="0.05"} 3272 +rpc_duration_seconds{quantile="0.5"} 4773 +rpc_duration_seconds{quantile="0.9"} 9001 +rpc_duration_seconds{quantile="0.99"} 76656 +rpc_duration_seconds_sum 1.7560473e+07 +rpc_duration_seconds_count 2693 +`), + }, + { + name: "newer sample", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 43.0, + }, + time.Unix(1, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle 43 +`), + }, + { + name: "colons are not replaced in metric name from measurement", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu::xyzzy", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu::xyzzy_time_idle Telegraf collected metric +# TYPE cpu::xyzzy_time_idle untyped +cpu::xyzzy_time_idle 42 +`), + }, + { + name: "colons are not replaced in metric name from field", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time:idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time:idle Telegraf collected metric +# TYPE cpu_time:idle untyped +cpu_time:idle 42 +`), + }, + { + name: "invalid label", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host-name": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "colons are replaced in label name", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "host:name": "example.org", + }, + map[string]interface{}{ + "time_idle": 42.0, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "discard strings", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu0", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle 42 +`), + }, + { + name: "string as label", + config: FormatConfig{ + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu0", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{cpu="cpu0"} 42 +`), + }, + { + name: "string as label duplicate tag", + config: FormatConfig{ + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_idle": 42.0, + "cpu": "cpu1", + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{cpu="cpu0"} 42 +`), + }, + { + name: "replace characters when using string as label", + config: FormatConfig{ + StringHandling: StringAsLabel, + }, + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{ + "host:name": "example.org", + "time_idle": 42.0, + }, + time.Unix(1574279268, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_idle Telegraf collected metric +# TYPE cpu_time_idle untyped +cpu_time_idle{host_name="example.org"} 42 +`), + }, + { + name: "multiple fields grouping", + metrics: []telegraf.Metric{ + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu0", + }, + map[string]interface{}{ + "time_guest": 8106.04, + "time_system": 26271.4, + "time_user": 92904.33, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu1", + }, + map[string]interface{}{ + "time_guest": 8181.63, + "time_system": 25351.49, + "time_user": 96912.57, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu2", + }, + map[string]interface{}{ + "time_guest": 7470.04, + "time_system": 24998.43, + "time_user": 96034.08, + }, + time.Unix(0, 0), + ), + testutil.MustMetric( + "cpu", + map[string]string{ + "cpu": "cpu3", + }, + map[string]interface{}{ + "time_guest": 7517.95, + "time_system": 24970.82, + "time_user": 94148, + }, + time.Unix(0, 0), + ), + }, + expected: []byte(` +# HELP cpu_time_guest Telegraf collected metric +# TYPE cpu_time_guest untyped +cpu_time_guest{cpu="cpu0"} 8106.04 +cpu_time_guest{cpu="cpu1"} 8181.63 +cpu_time_guest{cpu="cpu2"} 7470.04 +cpu_time_guest{cpu="cpu3"} 7517.95 +# HELP cpu_time_system Telegraf collected metric +# TYPE cpu_time_system untyped +cpu_time_system{cpu="cpu0"} 26271.4 +cpu_time_system{cpu="cpu1"} 25351.49 +cpu_time_system{cpu="cpu2"} 24998.43 +cpu_time_system{cpu="cpu3"} 24970.82 +# HELP cpu_time_user Telegraf collected metric +# TYPE cpu_time_user untyped +cpu_time_user{cpu="cpu0"} 92904.33 +cpu_time_user{cpu="cpu1"} 96912.57 +cpu_time_user{cpu="cpu2"} 96034.08 +cpu_time_user{cpu="cpu3"} 94148 +`), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := NewSerializer(FormatConfig{ + MetricSortOrder: SortMetrics, + TimestampExport: tt.config.TimestampExport, + StringHandling: tt.config.StringHandling, + }) + require.NoError(t, err) + actual, err := s.SerializeBatch(tt.metrics) + require.NoError(t, err) + + require.Equal(t, + strings.TrimSpace(string(tt.expected)), + strings.TrimSpace(string(actual))) + }) + } +} diff --git a/plugins/serializers/registry.go b/plugins/serializers/registry.go index 277d33206..e5065a93c 100644 --- a/plugins/serializers/registry.go +++ b/plugins/serializers/registry.go @@ -5,10 +5,14 @@ import ( "time" "github.com/influxdata/telegraf" - + "github.com/influxdata/telegraf/plugins/serializers/carbon2" "github.com/influxdata/telegraf/plugins/serializers/graphite" "github.com/influxdata/telegraf/plugins/serializers/influx" "github.com/influxdata/telegraf/plugins/serializers/json" + "github.com/influxdata/telegraf/plugins/serializers/nowmetric" + "github.com/influxdata/telegraf/plugins/serializers/prometheus" + "github.com/influxdata/telegraf/plugins/serializers/splunkmetric" + "github.com/influxdata/telegraf/plugins/serializers/wavefront" ) // SerializerOutput is an interface for output plugins that are able to @@ -20,10 +24,16 @@ type SerializerOutput interface { // Serializer is an interface defining functions that a serializer plugin must // satisfy. +// +// Implementations of this interface should be reentrant but are not required +// to be thread-safe. type Serializer interface { // Serialize takes a single telegraf metric and turns it into a byte buffer. // separate metrics should be separated by a newline, and there should be // a newline at the end of the buffer. + // + // New plugins should use SerializeBatch instead to allow for non-line + // delimited metrics. Serialize(metric telegraf.Metric) ([]byte, error) // SerializeBatch takes an array of telegraf metric and serializes it into @@ -35,31 +45,61 @@ type Serializer interface { // Config is a struct that covers the data types needed for all serializer types, // and can be used to instantiate _any_ of the serializers. type Config struct { - // Dataformat can be one of: influx, graphite, or json - DataFormat string + // Dataformat can be one of the serializer types listed in NewSerializer. + DataFormat string `toml:"data_format"` // Support tags in graphite protocol - GraphiteTagSupport bool + GraphiteTagSupport bool `toml:"graphite_tag_support"` + + // Character for separating metric name and field for Graphite tags + GraphiteSeparator string `toml:"graphite_separator"` // Maximum line length in bytes; influx format only - InfluxMaxLineBytes int + InfluxMaxLineBytes int `toml:"influx_max_line_bytes"` // Sort field keys, set to true only when debugging as it less performant // than unsorted fields; influx format only - InfluxSortFields bool + InfluxSortFields bool `toml:"influx_sort_fields"` // Support unsigned integer output; influx format only - InfluxUintSupport bool + InfluxUintSupport bool `toml:"influx_uint_support"` // Prefix to add to all measurements, only supports Graphite - Prefix string + Prefix string `toml:"prefix"` // Template for converting telegraf metrics into Graphite // only supports Graphite - Template string + Template string `toml:"template"` + + // Templates same Template, but multiple + Templates []string `toml:"templates"` // Timestamp units to use for JSON formatted output - TimestampUnits time.Duration + TimestampUnits time.Duration `toml:"timestamp_units"` + + // Include HEC routing fields for splunkmetric output + HecRouting bool `toml:"hec_routing"` + + // Enable Splunk MultiMetric output (Splunk 8.0+) + SplunkmetricMultiMetric bool `toml:"splunkmetric_multi_metric"` + + // Point tags to use as the source name for Wavefront (if none found, host will be used). + WavefrontSourceOverride []string `toml:"wavefront_source_override"` + + // Use Strict rules to sanitize metric and tag names from invalid characters for Wavefront + // When enabled forward slash (/) and comma (,) will be accepted + WavefrontUseStrict bool `toml:"wavefront_use_strict"` + + // Include the metric timestamp on each sample. + PrometheusExportTimestamp bool `toml:"prometheus_export_timestamp"` + + // Sort prometheus metric families and metric samples. Useful for + // debugging. + PrometheusSortMetrics bool `toml:"prometheus_sort_metrics"` + + // Output string fields as metric labels; when false string fields are + // discarded. + PrometheusStringAsLabel bool `toml:"prometheus_string_as_label"` } // NewSerializer a Serializer interface based on the given config. @@ -70,19 +110,68 @@ func NewSerializer(config *Config) (Serializer, error) { case "influx": serializer, err = NewInfluxSerializerConfig(config) case "graphite": - serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport) + serializer, err = NewGraphiteSerializer(config.Prefix, config.Template, config.GraphiteTagSupport, config.GraphiteSeparator, config.Templates) case "json": serializer, err = NewJsonSerializer(config.TimestampUnits) + case "splunkmetric": + serializer, err = NewSplunkmetricSerializer(config.HecRouting, config.SplunkmetricMultiMetric) + case "nowmetric": + serializer, err = NewNowSerializer() + case "carbon2": + serializer, err = NewCarbon2Serializer() + case "wavefront": + serializer, err = NewWavefrontSerializer(config.Prefix, config.WavefrontUseStrict, config.WavefrontSourceOverride) + case "prometheus": + serializer, err = NewPrometheusSerializer(config) default: err = fmt.Errorf("Invalid data format: %s", config.DataFormat) } return serializer, err } +func NewPrometheusSerializer(config *Config) (Serializer, error) { + exportTimestamp := prometheus.NoExportTimestamp + if config.PrometheusExportTimestamp { + exportTimestamp = prometheus.ExportTimestamp + } + + sortMetrics := prometheus.NoSortMetrics + if config.PrometheusExportTimestamp { + sortMetrics = prometheus.SortMetrics + } + + stringAsLabels := prometheus.DiscardStrings + if config.PrometheusStringAsLabel { + stringAsLabels = prometheus.StringAsLabel + } + + return prometheus.NewSerializer(prometheus.FormatConfig{ + TimestampExport: exportTimestamp, + MetricSortOrder: sortMetrics, + StringHandling: stringAsLabels, + }) +} + +func NewWavefrontSerializer(prefix string, useStrict bool, sourceOverride []string) (Serializer, error) { + return wavefront.NewSerializer(prefix, useStrict, sourceOverride) +} + func NewJsonSerializer(timestampUnits time.Duration) (Serializer, error) { return json.NewSerializer(timestampUnits) } +func NewCarbon2Serializer() (Serializer, error) { + return carbon2.NewSerializer() +} + +func NewSplunkmetricSerializer(splunkmetric_hec_routing bool, splunkmetric_multimetric bool) (Serializer, error) { + return splunkmetric.NewSerializer(splunkmetric_hec_routing, splunkmetric_multimetric) +} + +func NewNowSerializer() (Serializer, error) { + return nowmetric.NewSerializer() +} + func NewInfluxSerializerConfig(config *Config) (Serializer, error) { var sort influx.FieldSortOrder if config.InfluxSortFields { @@ -105,10 +194,26 @@ func NewInfluxSerializer() (Serializer, error) { return influx.NewSerializer(), nil } -func NewGraphiteSerializer(prefix, template string, tag_support bool) (Serializer, error) { +func NewGraphiteSerializer(prefix, template string, tag_support bool, separator string, templates []string) (Serializer, error) { + graphiteTemplates, defaultTemplate, err := graphite.InitGraphiteTemplates(templates) + + if err != nil { + return nil, err + } + + if defaultTemplate != "" { + template = defaultTemplate + } + + if separator == "" { + separator = "." + } + return &graphite.GraphiteSerializer{ Prefix: prefix, Template: template, TagSupport: tag_support, + Separator: separator, + Templates: graphiteTemplates, }, nil } diff --git a/plugins/serializers/splunkmetric/README.md b/plugins/serializers/splunkmetric/README.md new file mode 100644 index 000000000..ba2170d9c --- /dev/null +++ b/plugins/serializers/splunkmetric/README.md @@ -0,0 +1,186 @@ +# Splunk Metrics serializer + +The Splunk Metrics serializer outputs metrics in the [Splunk metric HEC JSON format][splunk-format]. + +It can be used to write to a file using the file output, or for sending metrics to a HEC using the standard telegraf HTTP output. +If you're using the HTTP output, this serializer knows how to batch the metrics so you don't end up with an HTTP POST per metric. + +[splunk-format]: http://dev.splunk.com/view/event-collector/SP-CAAAFDN#json + +An example event looks like: +```javascript +{ + "time": 1529708430, + "event": "metric", + "host": "patas-mbp", + "fields": { + "_value": 0.6, + "cpu": "cpu0", + "dc": "mobile", + "metric_name": "cpu.usage_user", + "user": "ronnocol" + } +} +``` +In the above snippet, the following keys are dimensions: +* cpu +* dc +* user + +## Using Multimetric output + +Starting with Splunk Enterprise and Splunk Cloud 8.0, you can now send multiple metric values in one payload. This means, for example, that +you can send all of your CPU stats in one JSON struct, an example event looks like: + +```javascript +{ + "time": 1572469920, + "event": "metric", + "host": "mono.local", + "fields": { + "class": "osx", + "cpu": "cpu0", + "metric_name:telegraf.cpu.usage_guest": 0, + "metric_name:telegraf.cpu.usage_guest_nice": 0, + "metric_name:telegraf.cpu.usage_idle": 65.1, + "metric_name:telegraf.cpu.usage_iowait": 0, + "metric_name:telegraf.cpu.usage_irq": 0, + "metric_name:telegraf.cpu.usage_nice": 0, + "metric_name:telegraf.cpu.usage_softirq": 0, + "metric_name:telegraf.cpu.usage_steal": 0, + "metric_name:telegraf.cpu.usage_system": 10.2, + "metric_name:telegraf.cpu.usage_user": 24.7, + } +} +``` +In order to enable this mode, there's a new option `splunkmetric_multimetric` that you set in the appropriate output module you plan on using. + +## Using with the HTTP output + +To send this data to a Splunk HEC, you can use the HTTP output, there are some custom headers that you need to add +to manage the HEC authorization, here's a sample config for an HTTP output: + +```toml +[[outputs.http]] + ## URL is the address to send metrics to + url = "https://localhost:8088/services/collector" + + ## Timeout for HTTP message + # timeout = "5s" + + ## HTTP method, one of: "POST" or "PUT" + # method = "POST" + + ## HTTP Basic Auth credentials + # username = "username" + # password = "pa$$word" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Data format to output. + ## Each data format has it's own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "splunkmetric" + ## Provides time, index, source overrides for the HEC + splunkmetric_hec_routing = true + # splunkmetric_multimetric = true + + ## Additional HTTP headers + [outputs.http.headers] + # Should be set manually to "application/json" for json data_format + Content-Type = "application/json" + Authorization = "Splunk xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" + X-Splunk-Request-Channel = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +``` + +## Overrides +You can override the default values for the HEC token you are using by adding additional tags to the config file. + +The following aspects of the token can be overridden with tags: +* index +* source + +You can either use `[global_tags]` or using a more advanced configuration as documented [here](https://github.com/influxdata/telegraf/blob/master/docs/CONFIGURATION.md). + +Such as this example which overrides the index just on the cpu metric: +```toml +[[inputs.cpu]] + percpu = false + totalcpu = true + [inputs.cpu.tags] + index = "cpu_metrics" +``` + +## Using with the File output + +You can use the file output when running telegraf on a machine with a Splunk forwarder. + +A sample event when `hec_routing` is false (or unset) looks like: +```javascript +{ + "_value": 0.6, + "cpu": "cpu0", + "dc": "mobile", + "metric_name": "cpu.usage_user", + "user": "ronnocol", + "time": 1529708430 +} +``` +Data formatted in this manner can be ingested with a simple `props.conf` file that +looks like this: + +```ini +[telegraf] +category = Metrics +description = Telegraf Metrics +pulldown_type = 1 +DATETIME_CONFIG = +NO_BINARY_CHECK = true +SHOULD_LINEMERGE = true +disabled = false +INDEXED_EXTRACTIONS = json +KV_MODE = none +TIMESTAMP_FIELDS = time +``` + +An example configuration of a file based output is: + +```toml + # Send telegraf metrics to file(s) +[[outputs.file]] + ## Files to write to, "stdout" is a specially handled file. + files = ["/tmp/metrics.out"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "splunkmetric" + splunkmetric_hec_routing = false + splunkmetric_multimetric = true +``` + +## Non-numeric metric values + +Splunk supports only numeric field values, so serializer would silently drop metrics with the string values. For some cases it is possible to workaround using ENUM processor. Example, provided below doing this for the `docker_container_health.health_status` metric: + +```toml +# splunkmetric does not support sting values +[[processors.enum]] + namepass = ["docker_container_health"] + [[processors.enum.mapping]] + ## Name of the field to map + field = "health_status" + [processors.enum.mapping.value_mappings] + starting = 0 + healthy = 1 + unhealthy = 2 + none = 3 +``` + diff --git a/plugins/serializers/splunkmetric/splunkmetric.go b/plugins/serializers/splunkmetric/splunkmetric.go new file mode 100644 index 000000000..801d0d69e --- /dev/null +++ b/plugins/serializers/splunkmetric/splunkmetric.go @@ -0,0 +1,228 @@ +package splunkmetric + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/influxdata/telegraf" +) + +type serializer struct { + HecRouting bool + SplunkmetricMultiMetric bool +} + +type CommonTags struct { + Time float64 + Host string + Index string + Source string + Fields map[string]interface{} +} + +type HECTimeSeries struct { + Time float64 `json:"time"` + Event string `json:"event"` + Host string `json:"host,omitempty"` + Index string `json:"index,omitempty"` + Source string `json:"source,omitempty"` + Fields map[string]interface{} `json:"fields"` +} + +// NewSerializer Setup our new serializer +func NewSerializer(splunkmetric_hec_routing bool, splunkmetric_multimetric bool) (*serializer, error) { + /* Define output params */ + s := &serializer{ + HecRouting: splunkmetric_hec_routing, + SplunkmetricMultiMetric: splunkmetric_multimetric, + } + return s, nil +} + +func (s *serializer) Serialize(metric telegraf.Metric) ([]byte, error) { + + m, err := s.createObject(metric) + if err != nil { + return nil, fmt.Errorf("D! [serializer.splunkmetric] Dropping invalid metric: %s", metric.Name()) + } + + return m, nil +} + +func (s *serializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + + var serialized []byte + + for _, metric := range metrics { + m, err := s.createObject(metric) + if err != nil { + return nil, fmt.Errorf("D! [serializer.splunkmetric] Dropping invalid metric: %s", metric.Name()) + } else if m != nil { + serialized = append(serialized, m...) + } + } + + return serialized, nil +} + +func (s *serializer) createMulti(metric telegraf.Metric, dataGroup HECTimeSeries, commonTags CommonTags) (metricGroup []byte, err error) { + /* When splunkmetric_multimetric is true, then we can write out multiple name=value pairs as part of the same + ** event payload. This only works when the time, host, and dimensions are the same for every name=value pair + ** in the timeseries data. + ** + ** The format for multimetric data is 'metric_name:nameOfMetric = valueOfMetric' + */ + var metricJSON []byte + + // Set the event data from the commonTags above. + dataGroup.Event = "metric" + dataGroup.Time = commonTags.Time + dataGroup.Host = commonTags.Host + dataGroup.Index = commonTags.Index + dataGroup.Source = commonTags.Source + dataGroup.Fields = commonTags.Fields + + // Stuff the metric data into the structure. + for _, field := range metric.FieldList() { + value, valid := verifyValue(field.Value) + + if !valid { + log.Printf("D! Can not parse value: %v for key: %v", field.Value, field.Key) + continue + } + + dataGroup.Fields["metric_name:"+metric.Name()+"."+field.Key] = value + } + + // Manage the rest of the event details based upon HEC routing rules + switch s.HecRouting { + case true: + // Output the data as a fields array and host,index,time,source overrides for the HEC. + metricJSON, err = json.Marshal(dataGroup) + default: + // Just output the data and the time, useful for file based outputs + dataGroup.Fields["time"] = dataGroup.Time + metricJSON, err = json.Marshal(dataGroup.Fields) + } + if err != nil { + return nil, err + } + // Let the JSON fall through to the return below + metricGroup = metricJSON + + return metricGroup, nil +} + +func (s *serializer) createSingle(metric telegraf.Metric, dataGroup HECTimeSeries, commonTags CommonTags) (metricGroup []byte, err error) { + /* The default mode is to generate one JSON entity per metric (required for pre-8.0 Splunks) + ** + ** The format for single metric is 'nameOfMetric = valueOfMetric' + */ + + var metricJSON []byte + + for _, field := range metric.FieldList() { + + value, valid := verifyValue(field.Value) + + if !valid { + log.Printf("D! Can not parse value: %v for key: %v", field.Value, field.Key) + continue + } + + dataGroup.Event = "metric" + + dataGroup.Time = commonTags.Time + + // Apply the common tags from above to every record. + dataGroup.Host = commonTags.Host + dataGroup.Index = commonTags.Index + dataGroup.Source = commonTags.Source + dataGroup.Fields = commonTags.Fields + + dataGroup.Fields["metric_name"] = metric.Name() + "." + field.Key + dataGroup.Fields["_value"] = value + + switch s.HecRouting { + case true: + // Output the data as a fields array and host,index,time,source overrides for the HEC. + metricJSON, err = json.Marshal(dataGroup) + default: + // Just output the data and the time, useful for file based outputs + dataGroup.Fields["time"] = dataGroup.Time + metricJSON, err = json.Marshal(dataGroup.Fields) + } + + metricGroup = append(metricGroup, metricJSON...) + + if err != nil { + return nil, err + } + } + + return metricGroup, nil +} + +func (s *serializer) createObject(metric telegraf.Metric) (metricGroup []byte, err error) { + + /* Splunk supports one metric json object, and does _not_ support an array of JSON objects. + ** Splunk has the following required names for the metric store: + ** metric_name: The name of the metric + ** _value: The value for the metric + ** time: The timestamp for the metric + ** All other index fields become dimensions. + */ + + dataGroup := HECTimeSeries{} + + // The tags are common to all events in this timeseries + commonTags := CommonTags{} + + commonTags.Fields = map[string]interface{}{} + + // Break tags out into key(n)=value(t) pairs + for n, t := range metric.Tags() { + if n == "host" { + commonTags.Host = t + } else if n == "index" { + commonTags.Index = t + } else if n == "source" { + commonTags.Source = t + } else { + commonTags.Fields[n] = t + } + } + commonTags.Time = float64(metric.Time().UnixNano()) / float64(1000000000) + switch s.SplunkmetricMultiMetric { + case true: + metricGroup, _ = s.createMulti(metric, dataGroup, commonTags) + default: + metricGroup, _ = s.createSingle(metric, dataGroup, commonTags) + } + + // Return the metric group regardless of if it's multimetric or single metric. + return metricGroup, nil +} + +func verifyValue(v interface{}) (value interface{}, valid bool) { + switch v.(type) { + case string: + valid = false + value = v + case bool: + if v == bool(true) { + // Store 1 for a "true" value + valid = true + value = 1 + } else { + // Otherwise store 0 + valid = true + value = 0 + } + default: + valid = true + value = v + } + return value, valid +} diff --git a/plugins/serializers/splunkmetric/splunkmetric_test.go b/plugins/serializers/splunkmetric/splunkmetric_test.go new file mode 100644 index 000000000..5ce5265d8 --- /dev/null +++ b/plugins/serializers/splunkmetric/splunkmetric_test.go @@ -0,0 +1,265 @@ +package splunkmetric + +import ( + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/stretchr/testify/assert" +) + +func MustMetric(v telegraf.Metric, err error) telegraf.Metric { + if err != nil { + panic(err) + } + return v +} + +func TestSerializeMetricFloat(t *testing.T) { + // Test sub-second time + now := time.Unix(1529875740, 819000000) + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(false, false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := `{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":1529875740.819}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricFloatHec(t *testing.T) { + // Test sub-second time + now := time.Unix(1529875740, 819000000) + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(true, false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + expS := `{"time":1529875740.819,"event":"metric","fields":{"_value":91.5,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricInt(t *testing.T) { + now := time.Unix(0, 0) + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(false, false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := `{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricIntHec(t *testing.T) { + now := time.Unix(0, 0) + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "usage_idle": int64(90), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(true, false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := `{"time":0,"event":"metric","fields":{"_value":90,"cpu":"cpu0","metric_name":"cpu.usage_idle"}}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricBool(t *testing.T) { + now := time.Unix(0, 0) + tags := map[string]string{ + "container-name": "telegraf-test", + } + fields := map[string]interface{}{ + "oomkiller": bool(true), + } + m, err := metric.New("docker", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(false, false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := `{"_value":1,"container-name":"telegraf-test","metric_name":"docker.oomkiller","time":0}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricBoolHec(t *testing.T) { + now := time.Unix(0, 0) + tags := map[string]string{ + "container-name": "telegraf-test", + } + fields := map[string]interface{}{ + "oomkiller": bool(false), + } + m, err := metric.New("docker", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(true, false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := `{"time":0,"event":"metric","fields":{"_value":0,"container-name":"telegraf-test","metric_name":"docker.oomkiller"}}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMetricString(t *testing.T) { + now := time.Unix(0, 0) + tags := map[string]string{ + "cpu": "cpu0", + } + fields := map[string]interface{}{ + "processorType": "ARMv7 Processor rev 4 (v7l)", + "usage_idle": int64(5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s, _ := NewSerializer(false, false) + var buf []byte + buf, err = s.Serialize(m) + assert.NoError(t, err) + + expS := `{"_value":5,"cpu":"cpu0","metric_name":"cpu.usage_idle","time":0}` + assert.Equal(t, string(expS), string(buf)) + assert.NoError(t, err) +} + +func TestSerializeBatch(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + ) + n := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 92.0, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m, n} + s, _ := NewSerializer(false, false) + buf, err := s.SerializeBatch(metrics) + assert.NoError(t, err) + + expS := `{"_value":42,"metric_name":"cpu.value","time":0}{"_value":92,"metric_name":"cpu.value","time":0}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMulti(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "user": 42.0, + "system": 8.0, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m} + s, _ := NewSerializer(false, true) + buf, err := s.SerializeBatch(metrics) + assert.NoError(t, err) + + expS := `{"metric_name:cpu.system":8,"metric_name:cpu.user":42,"time":0}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeBatchHec(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 42.0, + }, + time.Unix(0, 0), + ), + ) + n := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "value": 92.0, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m, n} + s, _ := NewSerializer(true, false) + buf, err := s.SerializeBatch(metrics) + assert.NoError(t, err) + + expS := `{"time":0,"event":"metric","fields":{"_value":42,"metric_name":"cpu.value"}}{"time":0,"event":"metric","fields":{"_value":92,"metric_name":"cpu.value"}}` + assert.Equal(t, string(expS), string(buf)) +} + +func TestSerializeMultiHec(t *testing.T) { + m := MustMetric( + metric.New( + "cpu", + map[string]string{}, + map[string]interface{}{ + "usage": 42.0, + "system": 8.0, + }, + time.Unix(0, 0), + ), + ) + + metrics := []telegraf.Metric{m} + s, _ := NewSerializer(true, true) + buf, err := s.SerializeBatch(metrics) + assert.NoError(t, err) + + expS := `{"time":0,"event":"metric","fields":{"metric_name:cpu.system":8,"metric_name:cpu.usage":42}}` + assert.Equal(t, string(expS), string(buf)) +} diff --git a/plugins/serializers/wavefront/README.md b/plugins/serializers/wavefront/README.md new file mode 100644 index 000000000..3b72d95b4 --- /dev/null +++ b/plugins/serializers/wavefront/README.md @@ -0,0 +1,47 @@ +# Wavefront + +The `wavefront` serializer translates the Telegraf metric format to the [Wavefront Data Format](https://docs.wavefront.com/wavefront_data_format.html). + +### Configuration + +```toml +[[outputs.file]] + files = ["stdout"] + + ## Use Strict rules to sanitize metric and tag names from invalid characters + ## When enabled forward slash (/) and comma (,) will be accepted + # wavefront_use_strict = false + + ## point tags to use as the source name for Wavefront (if none found, host will be used) + # wavefront_source_override = ["hostname", "address", "agent_host", "node_host"] + + ## Data format to output. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md + data_format = "wavefront" +``` + +### Metrics + +A Wavefront metric is equivalent to a single field value of a Telegraf measurement. +The Wavefront metric name will be: `.` +If a prefix is specified it will be honored. +Only boolean and numeric metrics will be serialized, all other types will generate +an error. + +### Example + +The following Telegraf metric + +``` +cpu,cpu=cpu0,host=testHost user=12,idle=88,system=0 1234567890 +``` + +will serialize into the following Wavefront metrics + +``` +"cpu.user" 12.000000 1234567890 source="testHost" "cpu"="cpu0" +"cpu.idle" 88.000000 1234567890 source="testHost" "cpu"="cpu0" +"cpu.system" 0.000000 1234567890 source="testHost" "cpu"="cpu0" +``` diff --git a/plugins/serializers/wavefront/wavefront.go b/plugins/serializers/wavefront/wavefront.go new file mode 100755 index 000000000..67fa1ae3a --- /dev/null +++ b/plugins/serializers/wavefront/wavefront.go @@ -0,0 +1,218 @@ +package wavefront + +import ( + "log" + "strconv" + "strings" + "sync" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/plugins/outputs/wavefront" +) + +// WavefrontSerializer : WavefrontSerializer struct +type WavefrontSerializer struct { + Prefix string + UseStrict bool + SourceOverride []string + scratch buffer + mu sync.Mutex // buffer mutex +} + +// catch many of the invalid chars that could appear in a metric or tag name +var sanitizedChars = strings.NewReplacer( + "!", "-", "@", "-", "#", "-", "$", "-", "%", "-", "^", "-", "&", "-", + "*", "-", "(", "-", ")", "-", "+", "-", "`", "-", "'", "-", "\"", "-", + "[", "-", "]", "-", "{", "-", "}", "-", ":", "-", ";", "-", "<", "-", + ">", "-", ",", "-", "?", "-", "/", "-", "\\", "-", "|", "-", " ", "-", + "=", "-", +) + +// catch many of the invalid chars that could appear in a metric or tag name +var strictSanitizedChars = strings.NewReplacer( + "!", "-", "@", "-", "#", "-", "$", "-", "%", "-", "^", "-", "&", "-", + "*", "-", "(", "-", ")", "-", "+", "-", "`", "-", "'", "-", "\"", "-", + "[", "-", "]", "-", "{", "-", "}", "-", ":", "-", ";", "-", "<", "-", + ">", "-", "?", "-", "\\", "-", "|", "-", " ", "-", "=", "-", +) + +var tagValueReplacer = strings.NewReplacer("\"", "\\\"", "*", "-") + +var pathReplacer = strings.NewReplacer("_", ".") + +func NewSerializer(prefix string, useStrict bool, sourceOverride []string) (*WavefrontSerializer, error) { + s := &WavefrontSerializer{ + Prefix: prefix, + UseStrict: useStrict, + SourceOverride: sourceOverride, + } + return s, nil +} + +func (s *WavefrontSerializer) serialize(buf *buffer, m telegraf.Metric) { + const metricSeparator = "." + + for fieldName, value := range m.Fields() { + var name string + + if fieldName == "value" { + name = s.Prefix + m.Name() + } else { + name = s.Prefix + m.Name() + metricSeparator + fieldName + } + + if s.UseStrict { + name = strictSanitizedChars.Replace(name) + } else { + name = sanitizedChars.Replace(name) + } + + name = pathReplacer.Replace(name) + + metricValue, valid := buildValue(value, name) + if !valid { + // bad value continue to next metric + continue + } + source, tags := buildTags(m.Tags(), s) + metric := wavefront.MetricPoint{ + Metric: name, + Timestamp: m.Time().Unix(), + Value: metricValue, + Source: source, + Tags: tags, + } + formatMetricPoint(&s.scratch, &metric, s) + } +} + +// Serialize : Serialize based on Wavefront format +func (s *WavefrontSerializer) Serialize(m telegraf.Metric) ([]byte, error) { + s.mu.Lock() + s.scratch.Reset() + s.serialize(&s.scratch, m) + out := s.scratch.Copy() + s.mu.Unlock() + return out, nil +} + +func (s *WavefrontSerializer) SerializeBatch(metrics []telegraf.Metric) ([]byte, error) { + s.mu.Lock() + s.scratch.Reset() + for _, m := range metrics { + s.serialize(&s.scratch, m) + } + out := s.scratch.Copy() + s.mu.Unlock() + return out, nil +} + +func findSourceTag(mTags map[string]string, s *WavefrontSerializer) string { + if src, ok := mTags["source"]; ok { + delete(mTags, "source") + return src + } + for _, src := range s.SourceOverride { + if source, ok := mTags[src]; ok { + delete(mTags, src) + mTags["telegraf_host"] = mTags["host"] + return source + } + } + return mTags["host"] +} + +func buildTags(mTags map[string]string, s *WavefrontSerializer) (string, map[string]string) { + // Remove all empty tags. + for k, v := range mTags { + if v == "" { + delete(mTags, k) + } + } + source := findSourceTag(mTags, s) + delete(mTags, "host") + return tagValueReplacer.Replace(source), mTags +} + +func buildValue(v interface{}, name string) (val float64, valid bool) { + switch p := v.(type) { + case bool: + if p { + return 1, true + } + return 0, true + case int64: + return float64(p), true + case uint64: + return float64(p), true + case float64: + return p, true + case string: + // return false but don't log + return 0, false + default: + // log a debug message + log.Printf("D! Serializer [wavefront] unexpected type: %T, with value: %v, for :%s\n", + v, v, name) + return 0, false + } +} + +func formatMetricPoint(b *buffer, metricPoint *wavefront.MetricPoint, s *WavefrontSerializer) []byte { + b.WriteChar('"') + b.WriteString(metricPoint.Metric) + b.WriteString(`" `) + b.WriteFloat64(metricPoint.Value) + b.WriteChar(' ') + b.WriteUint64(uint64(metricPoint.Timestamp)) + b.WriteString(` source="`) + b.WriteString(metricPoint.Source) + b.WriteChar('"') + + for k, v := range metricPoint.Tags { + b.WriteString(` "`) + if s.UseStrict { + b.WriteString(strictSanitizedChars.Replace(k)) + } else { + b.WriteString(sanitizedChars.Replace(k)) + } + b.WriteString(`"="`) + b.WriteString(tagValueReplacer.Replace(v)) + b.WriteChar('"') + } + + b.WriteChar('\n') + + return *b +} + +type buffer []byte + +func (b *buffer) Reset() { *b = (*b)[:0] } + +func (b *buffer) Copy() []byte { + p := make([]byte, len(*b)) + copy(p, *b) + return p +} + +func (b *buffer) WriteString(s string) { + *b = append(*b, s...) +} + +// This is named WriteChar instead of WriteByte because the 'stdmethods' check +// of 'go vet' wants WriteByte to have the signature: +// +// func (b *buffer) WriteByte(c byte) error { ... } +// +func (b *buffer) WriteChar(c byte) { + *b = append(*b, c) +} + +func (b *buffer) WriteUint64(val uint64) { + *b = strconv.AppendUint(*b, val, 10) +} + +func (b *buffer) WriteFloat64(val float64) { + *b = strconv.AppendFloat(*b, val, 'f', 6, 64) +} diff --git a/plugins/serializers/wavefront/wavefront_test.go b/plugins/serializers/wavefront/wavefront_test.go new file mode 100755 index 000000000..548326e70 --- /dev/null +++ b/plugins/serializers/wavefront/wavefront_test.go @@ -0,0 +1,340 @@ +package wavefront + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" + "github.com/influxdata/telegraf/plugins/outputs/wavefront" + "github.com/stretchr/testify/assert" +) + +func TestBuildTags(t *testing.T) { + var tagTests = []struct { + ptIn map[string]string + outTags map[string]string + outSource string + }{ + { + map[string]string{"one": "two", "three": "four", "host": "testHost"}, + map[string]string{"one": "two", "three": "four"}, + "testHost", + }, + { + map[string]string{"aaa": "bbb", "host": "testHost"}, + map[string]string{"aaa": "bbb"}, + "testHost", + }, + { + map[string]string{"bbb": "789", "aaa": "123", "host": "testHost"}, + map[string]string{"aaa": "123", "bbb": "789"}, + "testHost", + }, + { + map[string]string{"host": "aaa", "dc": "bbb"}, + map[string]string{"dc": "bbb"}, + "aaa", + }, + { + map[string]string{"instanceid": "i-0123456789", "host": "aaa", "dc": "bbb"}, + map[string]string{"dc": "bbb", "telegraf_host": "aaa"}, + "i-0123456789", + }, + { + map[string]string{"instance-id": "i-0123456789", "host": "aaa", "dc": "bbb"}, + map[string]string{"dc": "bbb", "telegraf_host": "aaa"}, + "i-0123456789", + }, + { + map[string]string{"instanceid": "i-0123456789", "host": "aaa", "hostname": "ccc", "dc": "bbb"}, + map[string]string{"dc": "bbb", "hostname": "ccc", "telegraf_host": "aaa"}, + "i-0123456789", + }, + { + map[string]string{"instanceid": "i-0123456789", "host": "aaa", "snmp_host": "ccc", "dc": "bbb"}, + map[string]string{"dc": "bbb", "snmp_host": "ccc", "telegraf_host": "aaa"}, + "i-0123456789", + }, + { + map[string]string{"host": "aaa", "snmp_host": "ccc", "dc": "bbb"}, + map[string]string{"dc": "bbb", "telegraf_host": "aaa"}, + "ccc", + }, + } + s := WavefrontSerializer{SourceOverride: []string{"instanceid", "instance-id", "hostname", "snmp_host", "node_host"}} + + for _, tt := range tagTests { + source, tags := buildTags(tt.ptIn, &s) + if !reflect.DeepEqual(tags, tt.outTags) { + t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outTags, tags) + } + if source != tt.outSource { + t.Errorf("\nexpected\t%s\nreceived\t%s\n", tt.outSource, source) + } + } +} + +func TestBuildTagsHostTag(t *testing.T) { + var tagTests = []struct { + ptIn map[string]string + outTags map[string]string + outSource string + }{ + { + map[string]string{"one": "two", "host": "testHost", "snmp_host": "snmpHost"}, + map[string]string{"telegraf_host": "testHost", "one": "two"}, + "snmpHost", + }, + } + s := WavefrontSerializer{SourceOverride: []string{"snmp_host"}} + + for _, tt := range tagTests { + source, tags := buildTags(tt.ptIn, &s) + if !reflect.DeepEqual(tags, tt.outTags) { + t.Errorf("\nexpected\t%+v\nreceived\t%+v\n", tt.outTags, tags) + } + if source != tt.outSource { + t.Errorf("\nexpected\t%s\nreceived\t%s\n", tt.outSource, source) + } + } +} + +func TestFormatMetricPoint(t *testing.T) { + var pointTests = []struct { + ptIn *wavefront.MetricPoint + out string + }{ + { + &wavefront.MetricPoint{ + Metric: "cpu.idle", + Value: 1, + Timestamp: 1554172967, + Source: "testHost", + Tags: map[string]string{"aaa": "bbb"}, + }, + "\"cpu.idle\" 1.000000 1554172967 source=\"testHost\" \"aaa\"=\"bbb\"\n", + }, + { + &wavefront.MetricPoint{ + Metric: "cpu.idle", + Value: 1, + Timestamp: 1554172967, + Source: "testHost", + Tags: map[string]string{"sp&c!al/chars,": "get*replaced"}, + }, + "\"cpu.idle\" 1.000000 1554172967 source=\"testHost\" \"sp-c-al-chars-\"=\"get-replaced\"\n", + }, + } + + s := WavefrontSerializer{} + + for _, pt := range pointTests { + bout := formatMetricPoint(new(buffer), pt.ptIn, &s) + sout := string(bout[:]) + if sout != pt.out { + t.Errorf("\nexpected\t%s\nreceived\t%s\n", pt.out, sout) + } + } +} + +func TestUseStrict(t *testing.T) { + var pointTests = []struct { + ptIn *wavefront.MetricPoint + out string + }{ + { + &wavefront.MetricPoint{ + Metric: "cpu.idle", + Value: 1, + Timestamp: 1554172967, + Source: "testHost", + Tags: map[string]string{"sp&c!al/chars,": "get*replaced"}, + }, + "\"cpu.idle\" 1.000000 1554172967 source=\"testHost\" \"sp-c-al/chars,\"=\"get-replaced\"\n", + }, + } + + s := WavefrontSerializer{UseStrict: true} + + for _, pt := range pointTests { + bout := formatMetricPoint(new(buffer), pt.ptIn, &s) + sout := string(bout[:]) + if sout != pt.out { + t.Errorf("\nexpected\t%s\nreceived\t%s\n", pt.out, sout) + } + } +} + +func TestSerializeMetricFloat(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "usage_idle": float64(91.5), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 91.500000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricInt(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "usage_idle": int64(91), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricBoolTrue(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "usage_idle": true, + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 1.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricBoolFalse(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "usage_idle": false, + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"cpu.usage.idle\" 0.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricFieldValue(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "value": int64(91), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"cpu\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func TestSerializeMetricPrefix(t *testing.T) { + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + fields := map[string]interface{}{ + "usage_idle": int64(91), + } + m, err := metric.New("cpu", tags, fields, now) + assert.NoError(t, err) + + s := WavefrontSerializer{Prefix: "telegraf."} + buf, _ := s.Serialize(m) + mS := strings.Split(strings.TrimSpace(string(buf)), "\n") + assert.NoError(t, err) + + expS := []string{fmt.Sprintf("\"telegraf.cpu.usage.idle\" 91.000000 %d source=\"realHost\" \"cpu\"=\"cpu0\"", now.UnixNano()/1000000000)} + assert.Equal(t, expS, mS) +} + +func benchmarkMetrics(b *testing.B) [4]telegraf.Metric { + b.Helper() + now := time.Now() + tags := map[string]string{ + "cpu": "cpu0", + "host": "realHost", + } + newMetric := func(v interface{}) telegraf.Metric { + fields := map[string]interface{}{ + "usage_idle": v, + } + m, err := metric.New("cpu", tags, fields, now) + if err != nil { + b.Fatal(err) + } + return m + } + return [4]telegraf.Metric{ + newMetric(91.5), + newMetric(91), + newMetric(true), + newMetric(false), + } +} + +func BenchmarkSerialize(b *testing.B) { + var s WavefrontSerializer + metrics := benchmarkMetrics(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.Serialize(metrics[i%len(metrics)]) + } +} + +func BenchmarkSerializeBatch(b *testing.B) { + var s WavefrontSerializer + m := benchmarkMetrics(b) + metrics := m[:] + b.ResetTimer() + for i := 0; i < b.N; i++ { + s.SerializeBatch(metrics) + } +} diff --git a/processor.go b/processor.go index f2b5133a5..5e2d46914 100644 --- a/processor.go +++ b/processor.go @@ -1,12 +1,31 @@ package telegraf +// Processor is a processor plugin interface for defining new inline processors. +// these are extremely efficient and should be used over StreamingProcessor if +// you do not need asynchronous metric writes. type Processor interface { - // SampleConfig returns the default configuration of the Input - SampleConfig() string + PluginDescriber - // Description returns a one-sentence description on the Input - Description() string - - // Apply the filter to the given metric + // Apply the filter to the given metric. Apply(in ...Metric) []Metric } + +// StreamingProcessor is a processor that can take in a stream of messages +type StreamingProcessor interface { + PluginDescriber + + // Start is the initializer for the processor + // Start is only called once per plugin instance, and never in parallel. + // Start should exit immediately after setup + Start(acc Accumulator) error + + // Add is called for each metric to be processed. + Add(metric Metric, acc Accumulator) + + // Stop gives you a callback to free resources. + // by the time Stop is called, the input stream will have already been closed + // and Add will not be called anymore. + // When stop returns, you should no longer be writing metrics to the + // accumulator. + Stop() error +} diff --git a/scripts/alpine.docker b/scripts/alpine.docker new file mode 100644 index 000000000..8eb86b39d --- /dev/null +++ b/scripts/alpine.docker @@ -0,0 +1,18 @@ +FROM golang:1.13.8 as builder +WORKDIR /go/src/github.com/influxdata/telegraf + +COPY . /go/src/github.com/influxdata/telegraf +RUN CGO_ENABLED=0 make go-install + +FROM alpine:3.6 +RUN echo 'hosts: files dns' >> /etc/nsswitch.conf +RUN apk add --no-cache iputils ca-certificates net-snmp-tools procps lm_sensors && \ + update-ca-certificates +COPY --from=builder /go/bin/* /usr/bin/ +COPY etc/telegraf.conf /etc/telegraf/telegraf.conf + +EXPOSE 8125/udp 8092/udp 8094 + +COPY scripts/docker-entrypoint.sh /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD ["telegraf"] diff --git a/scripts/build.py b/scripts/build.py index 344ee48a8..b309f5095 100755 --- a/scripts/build.py +++ b/scripts/build.py @@ -3,7 +3,6 @@ import sys import os import subprocess -import time from datetime import datetime import shutil import tempfile @@ -18,6 +17,8 @@ import argparse # Packaging variables PACKAGE_NAME = "telegraf" +USER = "telegraf" +GROUP = "telegraf" INSTALL_ROOT_DIR = "/usr/bin" LOG_DIR = "/var/log/telegraf" SCRIPT_DIR = "/usr/lib/telegraf/scripts" @@ -51,9 +52,9 @@ VENDOR = "InfluxData" DESCRIPTION = "Plugin-driven server agent for reporting metrics into InfluxDB." # SCRIPT START -prereqs = [ 'git', 'go' ] +prereqs = ['git', 'go'] go_vet_command = "go tool vet -composites=true ./" -optional_prereqs = [ 'gvm', 'fpm', 'rpmbuild' ] +optional_prereqs = ['gvm', 'fpm', 'rpmbuild'] fpm_common_args = "-f -s dir --log error \ --vendor {} \ @@ -66,41 +67,46 @@ fpm_common_args = "-f -s dir --log error \ --before-install {} \ --after-remove {} \ --before-remove {} \ + --rpm-attr 755,{},{}:{} \ --description \"{}\"".format( VENDOR, PACKAGE_URL, PACKAGE_LICENSE, MAINTAINER, - CONFIG_DIR + '/telegraf.conf', + CONFIG_DIR + '/telegraf.conf.sample', LOGROTATE_DIR + '/telegraf', POSTINST_SCRIPT, PREINST_SCRIPT, POSTREMOVE_SCRIPT, PREREMOVE_SCRIPT, + USER, GROUP, LOG_DIR, DESCRIPTION) targets = { - 'telegraf' : './cmd/telegraf', + 'telegraf': './cmd/telegraf', } supported_builds = { - "windows": [ "amd64", "i386" ], - "linux": [ "amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x"], - "freebsd": [ "amd64", "i386" ] + 'darwin': ["amd64"], + "windows": ["amd64", "i386"], + "linux": ["amd64", "i386", "armhf", "armel", "arm64", "static_amd64", "s390x", "mipsel", "mips"], + "freebsd": ["amd64", "i386"] } supported_packages = { - "linux": [ "deb", "rpm", "tar" ], - "windows": [ "zip" ], - "freebsd": [ "tar" ] + "darwin": ["tar"], + "linux": ["deb", "rpm", "tar"], + "windows": ["zip"], + "freebsd": ["tar"] } -next_version = '1.8.0' +next_version = '1.15.0' ################ #### Telegraf Functions ################ + def print_banner(): logging.info(""" _____ _ __ @@ -112,17 +118,19 @@ def print_banner(): Build Script """) + def create_package_fs(build_root): """Create a filesystem structure to mimic the package filesystem. """ logging.debug("Creating a filesystem hierarchy from directory: {}".format(build_root)) # Using [1:] for the path names due to them being absolute # (will overwrite previous paths, per 'os.path.join' documentation) - dirs = [ INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:], CONFIG_DIR_D[1:] ] + dirs = [INSTALL_ROOT_DIR[1:], LOG_DIR[1:], SCRIPT_DIR[1:], CONFIG_DIR[1:], LOGROTATE_DIR[1:], CONFIG_DIR_D[1:]] for d in dirs: os.makedirs(os.path.join(build_root, d)) os.chmod(os.path.join(build_root, d), 0o755) + def package_scripts(build_root, config_only=False, windows=False): """Copy the necessary scripts and configuration files to the package filesystem. @@ -130,10 +138,10 @@ def package_scripts(build_root, config_only=False, windows=False): if config_only or windows: logging.info("Copying configuration to build directory") if windows: - shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf")) + shutil.copyfile(DEFAULT_WINDOWS_CONFIG, os.path.join(build_root, "telegraf.conf.sample")) else: - shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "telegraf.conf")) - os.chmod(os.path.join(build_root, "telegraf.conf"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, "telegraf.conf.sample")) + os.chmod(os.path.join(build_root, "telegraf.conf.sample"), 0o644) else: logging.info("Copying scripts and configuration to build directory") shutil.copyfile(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1])) @@ -142,32 +150,31 @@ def package_scripts(build_root, config_only=False, windows=False): os.chmod(os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]), 0o644) shutil.copyfile(LOGROTATE_SCRIPT, os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf")) os.chmod(os.path.join(build_root, LOGROTATE_DIR[1:], "telegraf"), 0o644) - shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf")) - os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf"), 0o644) + shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf.sample")) + os.chmod(os.path.join(build_root, CONFIG_DIR[1:], "telegraf.conf.sample"), 0o644) + def run_generate(): # NOOP for Telegraf return True + def go_get(branch, update=False, no_uncommitted=False): """Retrieve build dependencies or restore pinned dependencies. """ if local_changes() and no_uncommitted: logging.error("There are uncommitted changes in the current directory.") return False - if not check_path_for("dep"): - logging.info("Downloading `dep`...") - get_command = "go get -u github.com/golang/dep/cmd/dep" - run(get_command) - logging.info("Retrieving dependencies with `dep`...") - run("{}/bin/dep ensure -v".format(os.environ.get("GOPATH", - os.path.expanduser("~/go")))) + logging.info("Retrieving dependencies...") + run("go mod download") return True + def run_tests(race, parallel, timeout, no_vet): # Currently a NOOP for Telegraf return True + ################ #### All Telegraf-specific content above this line ################ @@ -186,14 +193,14 @@ def run(command, allow_failure=False, shell=False): # logging.debug("Command output: {}".format(out)) except subprocess.CalledProcessError as e: if allow_failure: - logging.warn("Command '{}' failed with error: {}".format(command, e.output)) + logging.warning("Command '{}' failed with error: {}".format(command, e.output)) return None else: logging.error("Command '{}' failed with error: {}".format(command, e.output)) sys.exit(1) except OSError as e: if allow_failure: - logging.warn("Command '{}' failed with error: {}".format(command, e)) + logging.warning("Command '{}' failed with error: {}".format(command, e)) return out else: logging.error("Command '{}' failed with error: {}".format(command, e)) @@ -201,7 +208,8 @@ def run(command, allow_failure=False, shell=False): else: return out -def create_temp_dir(prefix = None): + +def create_temp_dir(prefix=None): """ Create temporary directory with optional prefix. """ if prefix is None: @@ -209,13 +217,14 @@ def create_temp_dir(prefix = None): else: return tempfile.mkdtemp(prefix=prefix) + def increment_minor_version(version): """Return the version with the minor version incremented and patch version set to zero. """ ver_list = version.split('.') if len(ver_list) != 3: - logging.warn("Could not determine how to increment version '{}', will just use provided version.".format(version)) + logging.warning("Could not determine how to increment version '{}', will just use provided version.".format(version)) return version ver_list[1] = str(int(ver_list[1]) + 1) ver_list[2] = str(0) @@ -223,13 +232,15 @@ def increment_minor_version(version): logging.debug("Incremented version from '{}' to '{}'.".format(version, inc_version)) return inc_version + def get_current_version_tag(): """Retrieve the raw git version tag. """ version = run("git describe --exact-match --tags 2>/dev/null", - allow_failure=True, shell=True) + allow_failure=True, shell=True) return version + def get_current_version(): """Parse version information from git tag output. """ @@ -241,15 +252,15 @@ def get_current_version(): version_tag = version_tag[1:] # Replace any '-'/'_' with '~' if '-' in version_tag: - version_tag = version_tag.replace("-","~") + version_tag = version_tag.replace("-", "~") if '_' in version_tag: - version_tag = version_tag.replace("_","~") + version_tag = version_tag.replace("_", "~") return version_tag + def get_current_commit(short=False): """Retrieve the current git commit. """ - command = None if short: command = "git log --pretty=format:'%h' -n 1" else: @@ -257,6 +268,7 @@ def get_current_commit(short=False): out = run(command) return out.strip('\'\n\r ') + def get_current_branch(): """Retrieve the current git branch. """ @@ -264,6 +276,7 @@ def get_current_branch(): out = run(command) return out.strip() + def local_changes(): """Return True if there are local un-committed changes. """ @@ -272,6 +285,7 @@ def local_changes(): return True return False + def get_system_arch(): """Retrieve current system architecture. """ @@ -287,6 +301,7 @@ def get_system_arch(): arch = "arm" return arch + def get_system_platform(): """Retrieve current system platform. """ @@ -295,6 +310,7 @@ def get_system_platform(): else: return sys.platform + def get_go_version(): """Retrieve version information for Go. """ @@ -304,6 +320,7 @@ def get_go_version(): return matches.groups()[0].strip() return None + def check_path_for(b): """Check the the user's path for the provided binary. """ @@ -313,21 +330,23 @@ def check_path_for(b): for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') full_path = os.path.join(path, b) - if os.path.isfile(full_path) and os.access(full_path, os.X_OK): + if is_exe(full_path): return full_path -def check_environ(build_dir = None): + +def check_environ(build_dir=None): """Check environment for common Go variables. """ logging.info("Checking environment...") - for v in [ "GOPATH", "GOBIN", "GOROOT" ]: + for v in ["GOPATH", "GOBIN", "GOROOT"]: logging.debug("Using '{}' for {}".format(os.environ.get(v), v)) cwd = os.getcwd() if build_dir is None and os.environ.get("GOPATH") and os.environ.get("GOPATH") not in cwd: - logging.warn("Your current directory is not under your GOPATH. This may lead to build failures.") + logging.warning("Your current directory is not under your GOPATH. This may lead to build failures.") return True + def check_prereqs(): """Check user path for required dependencies. """ @@ -338,6 +357,7 @@ def check_prereqs(): return False return True + def upload_packages(packages, bucket_name=None, overwrite=False): """Upload provided package output to AWS S3. """ @@ -378,9 +398,10 @@ def upload_packages(packages, bucket_name=None, overwrite=False): n = k.set_contents_from_filename(p, replace=False) k.make_public() else: - logging.warn("Not uploading file {}, as it already exists in the target bucket.".format(name)) + logging.warning("Not uploading file {}, as it already exists in the target bucket.".format(name)) return True + def go_list(vendor=False, relative=False): """ Return a list of packages @@ -407,6 +428,7 @@ def go_list(vendor=False, relative=False): packages = relative_pkgs return packages + def build(version=None, platform=None, arch=None, @@ -414,10 +436,12 @@ def build(version=None, race=False, clean=False, outdir=".", - tags=[], + tags=None, static=False): """Build each target for the specified architecture and platform. """ + if tags is None: + tags = [] logging.info("Starting build for {}/{}...".format(platform, arch)) logging.info("Using Go version: {}".format(get_go_version())) logging.info("Using git branch: {}".format(get_current_branch())) @@ -452,13 +476,16 @@ def build(version=None, build_command += "CGO_ENABLED=0 " # Handle variations in architecture output + goarch = arch if arch == "i386" or arch == "i686": - arch = "386" + goarch = "386" elif "arm64" in arch: - arch = "arm64" + goarch = "arm64" elif "arm" in arch: - arch = "arm" - build_command += "GOOS={} GOARCH={} ".format(platform, arch) + goarch = "arm" + elif arch == "mipsel": + goarch = "mipsle" + build_command += "GOOS={} GOARCH={} ".format(platform, goarch) if "arm" in arch: if arch == "armel": @@ -498,6 +525,7 @@ def build(version=None, logging.info("Time taken: {}s".format((end_time - start_time).total_seconds())) return True + def generate_sha256_from_file(path): """Generate SHA256 hash signature based on the contents of the file at path. """ @@ -506,13 +534,14 @@ def generate_sha256_from_file(path): m.update(f.read()) return m.hexdigest() + def generate_sig_from_file(path): """Generate a detached GPG signature from the file at path. """ logging.debug("Generating GPG signature for file: {}".format(path)) gpg_path = check_path_for('gpg') if gpg_path is None: - logging.warn("gpg binary not found on path! Skipping signature creation.") + logging.warning("gpg binary not found on path! Skipping signature creation.") return False if os.environ.get("GNUPG_HOME") is not None: run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get("GNUPG_HOME"), path)) @@ -520,6 +549,7 @@ def generate_sig_from_file(path): run('gpg --armor --detach-sign --yes {}'.format(path)) return True + def package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False): """Package the output of the build process. """ @@ -572,6 +602,8 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= shutil.copy(fr, to) for package_type in supported_packages[platform]: + if package_type == "rpm" and arch in ["mipsel", "mips"]: + continue # Package the directory structure for each package type for the platform logging.debug("Packaging directory '{}' as '{}'.".format(build_root, package_type)) name = pkg_name @@ -634,7 +666,7 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= else: if package_type == 'rpm' and release and '~' in package_version: package_version, suffix = package_version.split('~', 1) - # The ~ indicatees that this is a prerelease so we give it a leading 0. + # The ~ indicates that this is a prerelease so we give it a leading 0. package_iteration = "0.%s" % suffix fpm_command = "fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} ".format( fpm_common_args, @@ -646,14 +678,14 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= package_build_root, current_location) if package_type == "rpm": - fpm_command += "--depends coreutils --depends shadow-utils --rpm-posttrans {}".format(POSTINST_SCRIPT) + fpm_command += "--directories /var/log/telegraf --directories /etc/telegraf --depends coreutils --depends shadow-utils --rpm-posttrans {}".format(POSTINST_SCRIPT) out = run(fpm_command, shell=True) matches = re.search(':path=>"(.*)"', out) outfile = None if matches is not None: outfile = matches.groups()[0] if outfile is None: - logging.warn("Could not determine output from packaging output!") + logging.warning("Could not determine output from packaging output!") else: if nightly: # Strip nightly version from package name @@ -671,6 +703,7 @@ def package(build_output, pkg_name, version, nightly=False, iteration=1, static= # Cleanup shutil.rmtree(tmp_build_dir) + def main(args): global PACKAGE_NAME @@ -730,7 +763,7 @@ def main(args): platforms = [args.platform] for platform in platforms: - build_output.update( { platform : {} } ) + build_output.update({platform: {}}) archs = [] if args.arch == "all": single_build = False @@ -752,7 +785,7 @@ def main(args): tags=args.build_tags, static=args.static): return 1 - build_output.get(platform).update( { arch : od } ) + build_output.get(platform).update({arch: od}) # Build packages if args.package: @@ -768,7 +801,7 @@ def main(args): release=args.release) if args.sign: logging.debug("Generating GPG signatures for packages: {}".format(packages)) - sigs = [] # retain signatures so they can be uploaded with packages + sigs = [] # retain signatures so they can be uploaded with packages for p in packages: if generate_sig_from_file(p): sigs.append(p + '.asc') @@ -793,6 +826,7 @@ def main(args): return 0 + if __name__ == '__main__': LOG_LEVEL = logging.INFO if '--debug' in sys.argv[1:]: @@ -802,7 +836,7 @@ if __name__ == '__main__': format=log_format) parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.') - parser.add_argument('--verbose','-v','--debug', + parser.add_argument('--verbose', '-v', '--debug', action='store_true', help='Use debug output') parser.add_argument('--outdir', '-o', @@ -858,7 +892,7 @@ if __name__ == '__main__': help='Send build stats to InfluxDB using provided database name') parser.add_argument('--nightly', action='store_true', - help='Mark build output as nightly build (will incremement the minor version)') + help='Mark build output as nightly build (will increment the minor version)') parser.add_argument('--update', action='store_true', help='Update build dependencies prior to building') @@ -880,7 +914,7 @@ if __name__ == '__main__': parser.add_argument('--upload', action='store_true', help='Upload output packages to AWS S3') - parser.add_argument('--upload-overwrite','-w', + parser.add_argument('--upload-overwrite', '-w', action='store_true', help='Upload output packages to AWS S3') parser.add_argument('--bucket', diff --git a/scripts/check-deps.sh b/scripts/check-deps.sh new file mode 100755 index 000000000..2d3c02dad --- /dev/null +++ b/scripts/check-deps.sh @@ -0,0 +1,66 @@ +#!/bin/sh + +tmpdir="$(mktemp -d)" + +cleanup() { + rm -rf "$tmpdir" +} +trap cleanup EXIT + +targets="$(go tool dist list)" + +for target in ${targets}; do + # only check platforms we build for + case "${target}" in + linux/*) ;; + windows/*) ;; + freebsd/*) ;; + darwin/*) ;; + *) continue;; + esac + + GOOS=${target%%/*} GOARCH=${target##*/} \ + go list -deps -f '{{with .Module}}{{.Path}}{{end}}' ./cmd/telegraf/ >> "${tmpdir}/golist" +done + +for dep in $(LC_ALL=C sort -u "${tmpdir}/golist"); do + case "${dep}" in + # ignore ourselves + github.com/influxdata/telegraf) continue;; + + # dependency is replaced in go.mod + github.com/satori/go.uuid) continue;; + + # go-autorest has a single license for all sub modules + github.com/Azure/go-autorest/autorest) + dep=github.com/Azure/go-autorest;; + github.com/Azure/go-autorest/*) + continue;; + + # single license for all sub modules + cloud.google.com/go/*) + continue;; + esac + + # Remove single and double digit version from path; these are generally not + # actual parts of the path and instead indicate a branch or tag. + # example: github.com/influxdata/go-syslog/v2 -> github.com/influxdata/go-syslog + dep="${dep%%/v[0-9]}" + dep="${dep%%/v[0-9][0-9]}" + + echo "${dep}" >> "${tmpdir}/HEAD" +done + +grep '^-' docs/LICENSE_OF_DEPENDENCIES.md | grep -v github.com/DataDog/datadog-agent | cut -f 2 -d' ' > "${tmpdir}/LICENSE_OF_DEPENDENCIES.md" + +diff -U0 "${tmpdir}/LICENSE_OF_DEPENDENCIES.md" "${tmpdir}/HEAD" || +cat - < /dev/null 2>&1 && status="0" || status="$?" - # If the status is SUCCESS then don't need to start again. - if [ "x$status" = "x0" ]; then + if [ -e "$pidfile" ]; then + if pidofproc -p $pidfile $daemon > /dev/null; then log_failure_msg "$name process is running" - exit 0 # Exit + else + log_failure_msg "$name pidfile has no corresponding process; ensure $name is stopped and remove $pidfile" fi + exit 0 fi # Bump the file limits, before launching the daemon. These will carry over to @@ -150,13 +150,18 @@ case $1 in stop) # Stop the daemon. if [ -e $pidfile ]; then - pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" - if [ "$status" = 0 ]; then - if killproc -p $pidfile SIGTERM && /bin/rm -rf $pidfile; then - log_success_msg "$name process was stopped" - else - log_failure_msg "$name failed to stop service" - fi + if pidofproc -p $pidfile $daemon > /dev/null; then + # periodically signal until process exists + while true; do + if ! pidofproc -p $pidfile $daemon > /dev/null; then + break + fi + killproc -p $pidfile SIGTERM 2>&1 >/dev/null + sleep 2 + done + + log_success_msg "$name process was stopped" + rm -f $pidfile fi else log_failure_msg "$name process is not running" @@ -166,8 +171,7 @@ case $1 in reload) # Reload the daemon. if [ -e $pidfile ]; then - pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" - if [ "$status" = 0 ]; then + if pidofproc -p $pidfile $daemon > /dev/null; then if killproc -p $pidfile SIGHUP; then log_success_msg "$name process was reloaded" else diff --git a/scripts/post-install.sh b/scripts/post-install.sh index 6c73fef8e..f37265593 100644 --- a/scripts/post-install.sh +++ b/scripts/post-install.sh @@ -24,18 +24,6 @@ function install_chkconfig { chkconfig --add telegraf } -if ! grep "^telegraf:" /etc/group &>/dev/null; then - groupadd -r telegraf -fi - -if ! id telegraf &>/dev/null; then - useradd -r -M telegraf -s /bin/false -d /etc/telegraf -g telegraf -fi - -test -d $LOG_DIR || mkdir -p $LOG_DIR -chown -R -L telegraf:telegraf $LOG_DIR -chmod 755 $LOG_DIR - # Remove legacy symlink, if it exists if [[ -L /etc/init.d/telegraf ]]; then rm -f /etc/init.d/telegraf @@ -55,6 +43,11 @@ if [[ ! -d /etc/telegraf/telegraf.d ]]; then mkdir -p /etc/telegraf/telegraf.d fi +# If 'telegraf.conf' is not present use package's sample (fresh install) +if [[ ! -f /etc/telegraf/telegraf.conf ]] && [[ -f /etc/telegraf/telegraf.conf.sample ]]; then + cp /etc/telegraf/telegraf.conf.sample /etc/telegraf/telegraf.conf +fi + # Distribution-specific logic if [[ -f /etc/redhat-release ]] || [[ -f /etc/SuSE-release ]]; then # RHEL-variant logic @@ -72,6 +65,14 @@ if [[ -f /etc/redhat-release ]] || [[ -f /etc/SuSE-release ]]; then fi elif [[ -f /etc/debian_version ]]; then # Debian/Ubuntu logic + + # Ownership for RH-based platforms is set in build.py via the `rmp-attr` option. + # We perform ownership change only for Debian-based systems. + # Moving these lines out of this if statement would make `rmp -V` fail after installation. + test -d $LOG_DIR || mkdir -p $LOG_DIR + chown -R -L telegraf:telegraf $LOG_DIR + chmod 755 $LOG_DIR + if [[ "$(readlink /proc/1/exe)" == */systemd ]]; then install_systemd /lib/systemd/system/telegraf.service deb-systemd-invoke restart telegraf.service || echo "WARNING: systemd not running." @@ -88,7 +89,10 @@ elif [[ -f /etc/debian_version ]]; then fi elif [[ -f /etc/os-release ]]; then source /etc/os-release - if [[ $ID = "amzn" ]]; then + if [[ "$NAME" = "Amazon Linux" ]]; then + # Amazon Linux 2+ logic + install_systemd /usr/lib/systemd/system/telegraf.service + elif [[ "$NAME" = "Amazon Linux AMI" ]]; then # Amazon Linux logic install_init # Run update-rc.d or fallback to chkconfig if not available @@ -97,5 +101,8 @@ elif [[ -f /etc/os-release ]]; then else install_chkconfig fi + elif [[ "$NAME" = "Solus" ]]; then + # Solus logic + install_systemd /usr/lib/systemd/system/telegraf.service fi fi diff --git a/scripts/post-remove.sh b/scripts/post-remove.sh index b4b6f18fb..bda08e2cb 100644 --- a/scripts/post-remove.sh +++ b/scripts/post-remove.sh @@ -48,12 +48,19 @@ elif [[ -f /etc/debian_version ]]; then fi elif [[ -f /etc/os-release ]]; then source /etc/os-release - if [[ $ID = "amzn" ]]; then - # Amazon Linux logic - if [[ "$1" = "0" ]]; then - # InfluxDB is no longer installed, remove from init system - rm -f /etc/default/telegraf + if [[ "$ID" = "amzn" ]] && [[ "$1" = "0" ]]; then + # InfluxDB is no longer installed, remove from init system + rm -f /etc/default/telegraf + + if [[ "$NAME" = "Amazon Linux" ]]; then + # Amazon Linux 2+ logic + disable_systemd /usr/lib/systemd/system/telegraf.service + elif [[ "$NAME" = "Amazon Linux AMI" ]]; then + # Amazon Linux logic disable_chkconfig fi + elif [[ "$NAME" = "Solus" ]]; then + rm -f /etc/default/telegraf + disable_systemd /usr/lib/systemd/system/telegraf.service fi fi diff --git a/scripts/pre-install.sh b/scripts/pre-install.sh index b371f462d..3fad54f61 100644 --- a/scripts/pre-install.sh +++ b/scripts/pre-install.sh @@ -1,5 +1,13 @@ #!/bin/bash +if ! grep "^telegraf:" /etc/group &>/dev/null; then + groupadd -r telegraf +fi + +if ! id telegraf &>/dev/null; then + useradd -r -M telegraf -s /bin/false -d /etc/telegraf -g telegraf +fi + if [[ -d /etc/opt/telegraf ]]; then # Legacy configuration found if [[ ! -d /etc/telegraf ]]; then diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100755 index 41b95db01..000000000 --- a/scripts/release.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -ARTIFACT_DIR='artifacts' -run() -{ - "$@" - ret=$? - if [[ $ret -eq 0 ]] - then - echo "[INFO] [ $@ ]" - else - echo "[ERROR] [ $@ ] returned $ret" - exit $ret - fi -} - -run make -run mkdir -p ${ARTIFACT_DIR} -run gzip telegraf -c > "$ARTIFACT_DIR/telegraf.gz" - -# RPM is used to build packages for Enterprise Linux hosts. -# Boto is used to upload packages to S3. -run sudo apt-get update -run sudo apt-get install -y rpm python-boto ruby ruby-dev autoconf libtool -run sudo gem install fpm - -if git describe --exact-match HEAD 2>&1 >/dev/null; then - run ./scripts/build.py --release --package --platform=all --arch=all --upload --bucket=dl.influxdata.com/telegraf/releases -elif [ "${CIRCLE_STAGE}" = nightly ]; then - run ./scripts/build.py --nightly --package --platform=all --arch=all --upload --bucket=dl.influxdata.com/telegraf/nightlies -else - run ./scripts/build.py --package --platform=all --arch=all -fi - -run mv build $ARTIFACT_DIR diff --git a/scripts/stretch.docker b/scripts/stretch.docker new file mode 100644 index 000000000..805786075 --- /dev/null +++ b/scripts/stretch.docker @@ -0,0 +1,15 @@ +FROM golang:1.13.8 as builder +WORKDIR /go/src/github.com/influxdata/telegraf + +COPY . /go/src/github.com/influxdata/telegraf +RUN make go-install + +FROM buildpack-deps:stretch-curl +COPY --from=builder /go/bin/* /usr/bin/ +COPY etc/telegraf.conf /etc/telegraf/telegraf.conf + +EXPOSE 8125/udp 8092/udp 8094 + +COPY scripts/docker-entrypoint.sh /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] +CMD ["telegraf"] diff --git a/selfstat/selfstat.go b/selfstat/selfstat.go index 98ecbb4d4..a60ee099e 100644 --- a/selfstat/selfstat.go +++ b/selfstat/selfstat.go @@ -17,7 +17,7 @@ import ( ) var ( - registry *rgstry + registry *Registry ) // Stat is an interface for dealing with telegraf statistics collected @@ -32,9 +32,6 @@ type Stat interface { // Tags is a tag map. Each time this is called a new map is allocated. Tags() map[string]string - // Key is the unique measurement+tags key of the stat. - Key() uint64 - // Incr increments a regular stat by 'v'. // in the case of a timing stat, increment adds the timing to the cache. Incr(v int64) @@ -56,11 +53,7 @@ type Stat interface { // The returned Stat can be incremented by the consumer of Register(), and it's // value will be returned as a telegraf metric when Metrics() is called. func Register(measurement, field string, tags map[string]string) Stat { - return registry.register(&stat{ - measurement: "internal_" + measurement, - field: field, - tags: tags, - }) + return registry.register("internal_"+measurement, field, tags) } // RegisterTiming registers the given measurement, field, and tags in the selfstat @@ -80,11 +73,7 @@ func Register(measurement, field string, tags map[string]string) Stat { // The returned Stat can be incremented by the consumer of Register(), and it's // value will be returned as a telegraf metric when Metrics() is called. func RegisterTiming(measurement, field string, tags map[string]string) Stat { - return registry.register(&timingStat{ - measurement: "internal_" + measurement, - field: field, - tags: tags, - }) + return registry.registerTiming("internal_"+measurement, field, tags) } // Metrics returns all registered stats as telegraf metrics. @@ -120,27 +109,76 @@ func Metrics() []telegraf.Metric { return metrics } -type rgstry struct { +type Registry struct { stats map[uint64]map[string]Stat mu sync.Mutex } -func (r *rgstry) register(s Stat) Stat { +func (r *Registry) register(measurement, field string, tags map[string]string) Stat { r.mu.Lock() defer r.mu.Unlock() - if stats, ok := r.stats[s.Key()]; ok { - // measurement exists - if stat, ok := stats[s.FieldName()]; ok { - // field already exists, so don't create a new one - return stat - } - r.stats[s.Key()][s.FieldName()] = s - return s - } else { - // creating a new unique metric - r.stats[s.Key()] = map[string]Stat{s.FieldName(): s} - return s + + key := key(measurement, tags) + if stat, ok := registry.get(key, field); ok { + return stat } + + t := make(map[string]string, len(tags)) + for k, v := range tags { + t[k] = v + } + + s := &stat{ + measurement: measurement, + field: field, + tags: t, + } + registry.set(key, s) + return s +} + +func (r *Registry) registerTiming(measurement, field string, tags map[string]string) Stat { + r.mu.Lock() + defer r.mu.Unlock() + + key := key(measurement, tags) + if stat, ok := registry.get(key, field); ok { + return stat + } + + t := make(map[string]string, len(tags)) + for k, v := range tags { + t[k] = v + } + + s := &timingStat{ + measurement: measurement, + field: field, + tags: t, + } + registry.set(key, s) + return s +} + +func (r *Registry) get(key uint64, field string) (Stat, bool) { + if _, ok := r.stats[key]; !ok { + return nil, false + } + + if stat, ok := r.stats[key][field]; ok { + return stat, true + } + + return nil, false +} + +func (r *Registry) set(key uint64, s Stat) { + if _, ok := r.stats[key]; !ok { + r.stats[key] = make(map[string]Stat) + } + + r.stats[key][s.FieldName()] = s + return } func key(measurement string, tags map[string]string) uint64 { @@ -163,7 +201,7 @@ func key(measurement string, tags map[string]string) uint64 { } func init() { - registry = &rgstry{ + registry = &Registry{ stats: make(map[uint64]map[string]Stat), } } diff --git a/selfstat/selfstat_test.go b/selfstat/selfstat_test.go index 2de2bd381..3d590bb96 100644 --- a/selfstat/selfstat_test.go +++ b/selfstat/selfstat_test.go @@ -5,8 +5,8 @@ import ( "testing" "github.com/influxdata/telegraf/testutil" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -18,7 +18,7 @@ var ( // testCleanup resets the global registry for test cleanup & unlocks the test lock func testCleanup() { - registry = &rgstry{ + registry = &Registry{ stats: make(map[uint64]map[string]Stat), } testLock.Unlock() @@ -109,32 +109,17 @@ func TestRegisterTimingAndIncrAndSet(t *testing.T) { } func TestStatKeyConsistency(t *testing.T) { - s := &stat{ - measurement: "internal_stat", - field: "myfield", - tags: map[string]string{ - "foo": "bar", - "bar": "baz", - "whose": "first", - }, - } - k := s.Key() - for i := 0; i < 5000; i++ { - // assert that the Key() func doesn't change anything. - assert.Equal(t, k, s.Key()) - - // assert that two identical measurements always produce the same key. - tmp := &stat{ - measurement: "internal_stat", - field: "myfield", - tags: map[string]string{ - "foo": "bar", - "bar": "baz", - "whose": "first", - }, - } - assert.Equal(t, k, tmp.Key()) - } + lhs := key("internal_stats", map[string]string{ + "foo": "bar", + "bar": "baz", + "whose": "first", + }) + rhs := key("internal_stats", map[string]string{ + "foo": "bar", + "bar": "baz", + "whose": "first", + }) + require.Equal(t, lhs, rhs) } func TestRegisterMetricsAndVerify(t *testing.T) { @@ -219,3 +204,10 @@ func TestRegisterMetricsAndVerify(t *testing.T) { }, ) } + +func TestRegisterCopy(t *testing.T) { + tags := map[string]string{"input": "mem", "alias": "mem1"} + stat := Register("gather", "metrics_gathered", tags) + tags["new"] = "value" + require.NotEqual(t, tags, stat.Tags()) +} diff --git a/selfstat/stat.go b/selfstat/stat.go index d7ec60a2b..e1905baf5 100644 --- a/selfstat/stat.go +++ b/selfstat/stat.go @@ -41,10 +41,3 @@ func (s *stat) Tags() map[string]string { } return m } - -func (s *stat) Key() uint64 { - if s.key == 0 { - s.key = key(s.measurement, s.tags) - } - return s.key -} diff --git a/selfstat/timingStat.go b/selfstat/timingStat.go index ef0ee05aa..13f8400bc 100644 --- a/selfstat/timingStat.go +++ b/selfstat/timingStat.go @@ -57,10 +57,3 @@ func (s *timingStat) Tags() map[string]string { } return m } - -func (s *timingStat) Key() uint64 { - if s.key == 0 { - s.key = key(s.measurement, s.tags) - } - return s.key -} diff --git a/testutil/accumulator.go b/testutil/accumulator.go index 8784cc1db..6e5148ef7 100644 --- a/testutil/accumulator.go +++ b/testutil/accumulator.go @@ -10,20 +10,29 @@ import ( "time" "github.com/influxdata/telegraf" - "github.com/stretchr/testify/assert" ) +var ( + lastID uint64 +) + +func newTrackingID() telegraf.TrackingID { + id := atomic.AddUint64(&lastID, 1) + return telegraf.TrackingID(id) +} + // Metric defines a single point measurement type Metric struct { Measurement string Tags map[string]string Fields map[string]interface{} Time time.Time + Type telegraf.ValueType } func (p *Metric) String() string { - return fmt.Sprintf("%s %v", p.Measurement, p.Fields) + return fmt.Sprintf("%s %v %v", p.Measurement, p.Tags, p.Fields) } // Accumulator defines a mocked out accumulator @@ -31,17 +40,28 @@ type Accumulator struct { sync.Mutex *sync.Cond - Metrics []*Metric - nMetrics uint64 - Discard bool - Errors []error - debug bool + Metrics []*Metric + nMetrics uint64 + Discard bool + Errors []error + debug bool + delivered chan telegraf.DeliveryInfo + + TimeFunc func() time.Time } func (a *Accumulator) NMetrics() uint64 { return atomic.LoadUint64(&a.nMetrics) } +func (a *Accumulator) GetTelegrafMetrics() []telegraf.Metric { + metrics := []telegraf.Metric{} + for _, m := range a.Metrics { + metrics = append(metrics, FromTestMetric(m)) + } + return metrics +} + func (a *Accumulator) FirstError() error { if len(a.Errors) == 0 { return nil @@ -56,11 +76,11 @@ func (a *Accumulator) ClearMetrics() { a.Metrics = make([]*Metric, 0) } -// AddFields adds a measurement point with a specified timestamp. -func (a *Accumulator) AddFields( +func (a *Accumulator) addFields( measurement string, - fields map[string]interface{}, tags map[string]string, + fields map[string]interface{}, + tp telegraf.ValueType, timestamp ...time.Time, ) { a.Lock() @@ -73,6 +93,10 @@ func (a *Accumulator) AddFields( return } + if len(fields) == 0 { + return + } + tagsCopy := map[string]string{} for k, v := range tags { tagsCopy[k] = v @@ -83,15 +107,17 @@ func (a *Accumulator) AddFields( fieldsCopy[k] = v } - if len(fields) == 0 { - return - } - var t time.Time if len(timestamp) > 0 { t = timestamp[0] } else { t = time.Now() + if a.TimeFunc == nil { + t = time.Now() + } else { + t = a.TimeFunc() + } + } if a.debug { @@ -104,21 +130,32 @@ func (a *Accumulator) AddFields( p := &Metric{ Measurement: measurement, - Fields: fields, + Fields: fieldsCopy, Tags: tagsCopy, Time: t, + Type: tp, } a.Metrics = append(a.Metrics, p) } +// AddFields adds a measurement point with a specified timestamp. +func (a *Accumulator) AddFields( + measurement string, + fields map[string]interface{}, + tags map[string]string, + timestamp ...time.Time, +) { + a.addFields(measurement, tags, fields, telegraf.Untyped, timestamp...) +} + func (a *Accumulator) AddCounter( measurement string, fields map[string]interface{}, tags map[string]string, timestamp ...time.Time, ) { - a.AddFields(measurement, fields, tags, timestamp...) + a.addFields(measurement, tags, fields, telegraf.Counter, timestamp...) } func (a *Accumulator) AddGauge( @@ -127,12 +164,12 @@ func (a *Accumulator) AddGauge( tags map[string]string, timestamp ...time.Time, ) { - a.AddFields(measurement, fields, tags, timestamp...) + a.addFields(measurement, tags, fields, telegraf.Gauge, timestamp...) } func (a *Accumulator) AddMetrics(metrics []telegraf.Metric) { for _, m := range metrics { - a.AddFields(m.Name(), m.Fields(), m.Tags(), m.Time()) + a.addFields(m.Name(), m.Tags(), m.Fields(), m.Type(), m.Time()) } } @@ -142,7 +179,7 @@ func (a *Accumulator) AddSummary( tags map[string]string, timestamp ...time.Time, ) { - a.AddFields(measurement, fields, tags, timestamp...) + a.addFields(measurement, tags, fields, telegraf.Summary, timestamp...) } func (a *Accumulator) AddHistogram( @@ -151,7 +188,34 @@ func (a *Accumulator) AddHistogram( tags map[string]string, timestamp ...time.Time, ) { - a.AddFields(measurement, fields, tags, timestamp...) + a.addFields(measurement, tags, fields, telegraf.Histogram, timestamp...) +} + +func (a *Accumulator) AddMetric(m telegraf.Metric) { + a.addFields(m.Name(), m.Tags(), m.Fields(), m.Type(), m.Time()) +} + +func (a *Accumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { + return a +} + +func (a *Accumulator) AddTrackingMetric(m telegraf.Metric) telegraf.TrackingID { + a.AddMetric(m) + return newTrackingID() +} + +func (a *Accumulator) AddTrackingMetricGroup(group []telegraf.Metric) telegraf.TrackingID { + for _, m := range group { + a.AddMetric(m) + } + return newTrackingID() +} + +func (a *Accumulator) Delivered() <-chan telegraf.DeliveryInfo { + if a.delivered == nil { + a.delivered = make(chan telegraf.DeliveryInfo) + } + return a.delivered } // AddError appends the given error to Accumulator.Errors. @@ -167,7 +231,7 @@ func (a *Accumulator) AddError(err error) { a.Unlock() } -func (a *Accumulator) SetPrecision(precision, interval time.Duration) { +func (a *Accumulator) SetPrecision(precision time.Duration) { return } @@ -206,6 +270,18 @@ func (a *Accumulator) HasTag(measurement string, key string) bool { return false } +func (a *Accumulator) TagSetValue(measurement string, key string) string { + for _, p := range a.Metrics { + if p.Measurement == measurement { + v, ok := p.Tags[key] + if ok { + return v + } + } + } + return "" +} + func (a *Accumulator) TagValue(measurement string, key string) string { for _, p := range a.Metrics { if p.Measurement == measurement { @@ -237,7 +313,7 @@ func (a *Accumulator) NFields() int { defer a.Unlock() counter := 0 for _, pt := range a.Metrics { - for _, _ = range pt.Fields { + for range pt.Fields { counter++ } } @@ -281,8 +357,7 @@ func (a *Accumulator) AssertContainsTaggedFields( continue } - if p.Measurement == measurement { - assert.Equal(t, fields, p.Fields) + if p.Measurement == measurement && reflect.DeepEqual(fields, p.Fields) { return } } @@ -303,9 +378,10 @@ func (a *Accumulator) AssertDoesNotContainsTaggedFields( continue } - if p.Measurement == measurement { - assert.Equal(t, fields, p.Fields) - msg := fmt.Sprintf("found measurement %s with tags %v which should not be there", measurement, tags) + if p.Measurement == measurement && reflect.DeepEqual(fields, p.Fields) { + msg := fmt.Sprintf( + "found measurement %s with tagged fields (tags %v) which should not be there", + measurement, tags) assert.Fail(t, msg) } } @@ -639,3 +715,22 @@ func (a *Accumulator) BoolField(measurement string, field string) (bool, bool) { return false, false } + +// NopAccumulator is used for benchmarking to isolate the plugin from the internal +// telegraf accumulator machinery. +type NopAccumulator struct{} + +func (n *NopAccumulator) AddFields(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +} +func (n *NopAccumulator) AddGauge(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +} +func (n *NopAccumulator) AddCounter(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +} +func (n *NopAccumulator) AddSummary(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +} +func (n *NopAccumulator) AddHistogram(measurement string, fields map[string]interface{}, tags map[string]string, t ...time.Time) { +} +func (n *NopAccumulator) AddMetric(telegraf.Metric) {} +func (n *NopAccumulator) SetPrecision(precision time.Duration) {} +func (n *NopAccumulator) AddError(err error) {} +func (n *NopAccumulator) WithTracking(maxTracked int) telegraf.TrackingAccumulator { return nil } diff --git a/testutil/log.go b/testutil/log.go new file mode 100644 index 000000000..5e0458dc7 --- /dev/null +++ b/testutil/log.go @@ -0,0 +1,50 @@ +package testutil + +import ( + "log" +) + +// Logger defines a logging structure for plugins. +type Logger struct { + Name string // Name is the plugin name, will be printed in the `[]`. +} + +// Errorf logs an error message, patterned after log.Printf. +func (l Logger) Errorf(format string, args ...interface{}) { + log.Printf("E! ["+l.Name+"] "+format, args...) +} + +// Error logs an error message, patterned after log.Print. +func (l Logger) Error(args ...interface{}) { + log.Print(append([]interface{}{"E! [" + l.Name + "] "}, args...)...) +} + +// Debugf logs a debug message, patterned after log.Printf. +func (l Logger) Debugf(format string, args ...interface{}) { + log.Printf("D! ["+l.Name+"] "+format, args...) +} + +// Debug logs a debug message, patterned after log.Print. +func (l Logger) Debug(args ...interface{}) { + log.Print(append([]interface{}{"D! [" + l.Name + "] "}, args...)...) +} + +// Warnf logs a warning message, patterned after log.Printf. +func (l Logger) Warnf(format string, args ...interface{}) { + log.Printf("W! ["+l.Name+"] "+format, args...) +} + +// Warn logs a warning message, patterned after log.Print. +func (l Logger) Warn(args ...interface{}) { + log.Print(append([]interface{}{"W! [" + l.Name + "] "}, args...)...) +} + +// Infof logs an information message, patterned after log.Printf. +func (l Logger) Infof(format string, args ...interface{}) { + log.Printf("I! ["+l.Name+"] "+format, args...) +} + +// Info logs an information message, patterned after log.Print. +func (l Logger) Info(args ...interface{}) { + log.Print(append([]interface{}{"I! [" + l.Name + "] "}, args...)...) +} diff --git a/testutil/metric.go b/testutil/metric.go new file mode 100644 index 000000000..36ba63af9 --- /dev/null +++ b/testutil/metric.go @@ -0,0 +1,205 @@ +package testutil + +import ( + "reflect" + "sort" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +type metricDiff struct { + Measurement string + Tags []*telegraf.Tag + Fields []*telegraf.Field + Type telegraf.ValueType + Time time.Time +} + +func lessFunc(lhs, rhs *metricDiff) bool { + if lhs.Measurement != rhs.Measurement { + return lhs.Measurement < rhs.Measurement + } + + for i := 0; ; i++ { + if i >= len(lhs.Tags) && i >= len(rhs.Tags) { + break + } else if i >= len(lhs.Tags) { + return true + } else if i >= len(rhs.Tags) { + return false + } + + if lhs.Tags[i].Key != rhs.Tags[i].Key { + return lhs.Tags[i].Key < rhs.Tags[i].Key + } + if lhs.Tags[i].Value != rhs.Tags[i].Value { + return lhs.Tags[i].Value < rhs.Tags[i].Value + } + } + + for i := 0; ; i++ { + if i >= len(lhs.Fields) && i >= len(rhs.Fields) { + break + } else if i >= len(lhs.Fields) { + return true + } else if i >= len(rhs.Fields) { + return false + } + + if lhs.Fields[i].Key != rhs.Fields[i].Key { + return lhs.Fields[i].Key < rhs.Fields[i].Key + } + + if lhs.Fields[i].Value != rhs.Fields[i].Value { + ltype := reflect.TypeOf(lhs.Fields[i].Value) + rtype := reflect.TypeOf(lhs.Fields[i].Value) + + if ltype.Kind() != rtype.Kind() { + return ltype.Kind() < rtype.Kind() + } + + switch v := lhs.Fields[i].Value.(type) { + case int64: + return v < lhs.Fields[i].Value.(int64) + case uint64: + return v < lhs.Fields[i].Value.(uint64) + case float64: + return v < lhs.Fields[i].Value.(float64) + case string: + return v < lhs.Fields[i].Value.(string) + case bool: + return !v + default: + panic("unknown type") + } + } + } + + if lhs.Type != rhs.Type { + return lhs.Type < rhs.Type + } + + if lhs.Time.UnixNano() != rhs.Time.UnixNano() { + return lhs.Time.UnixNano() < rhs.Time.UnixNano() + } + + return false +} + +func newMetricDiff(metric telegraf.Metric) *metricDiff { + if metric == nil { + return nil + } + + m := &metricDiff{} + m.Measurement = metric.Name() + + for _, tag := range metric.TagList() { + m.Tags = append(m.Tags, tag) + } + sort.Slice(m.Tags, func(i, j int) bool { + return m.Tags[i].Key < m.Tags[j].Key + }) + + for _, field := range metric.FieldList() { + m.Fields = append(m.Fields, field) + } + sort.Slice(m.Fields, func(i, j int) bool { + return m.Fields[i].Key < m.Fields[j].Key + }) + + m.Type = metric.Type() + m.Time = metric.Time() + return m +} + +// SortMetrics enables sorting metrics before comparison. +func SortMetrics() cmp.Option { + return cmpopts.SortSlices(lessFunc) +} + +// IgnoreTime disables comparison of timestamp. +func IgnoreTime() cmp.Option { + return cmpopts.IgnoreFields(metricDiff{}, "Time") +} + +// MetricEqual returns true if the metrics are equal. +func MetricEqual(expected, actual telegraf.Metric, opts ...cmp.Option) bool { + var lhs, rhs *metricDiff + if expected != nil { + lhs = newMetricDiff(expected) + } + if actual != nil { + rhs = newMetricDiff(actual) + } + + opts = append(opts, cmpopts.EquateNaNs()) + return cmp.Equal(lhs, rhs, opts...) +} + +// RequireMetricEqual halts the test with an error if the metrics are not +// equal. +func RequireMetricEqual(t *testing.T, expected, actual telegraf.Metric, opts ...cmp.Option) { + t.Helper() + + var lhs, rhs *metricDiff + if expected != nil { + lhs = newMetricDiff(expected) + } + if actual != nil { + rhs = newMetricDiff(actual) + } + + opts = append(opts, cmpopts.EquateNaNs()) + if diff := cmp.Diff(lhs, rhs, opts...); diff != "" { + t.Fatalf("telegraf.Metric\n--- expected\n+++ actual\n%s", diff) + } +} + +// RequireMetricsEqual halts the test with an error if the array of metrics +// are not equal. +func RequireMetricsEqual(t *testing.T, expected, actual []telegraf.Metric, opts ...cmp.Option) { + t.Helper() + + lhs := make([]*metricDiff, 0, len(expected)) + for _, m := range expected { + lhs = append(lhs, newMetricDiff(m)) + } + rhs := make([]*metricDiff, 0, len(actual)) + for _, m := range actual { + rhs = append(rhs, newMetricDiff(m)) + } + + opts = append(opts, cmpopts.EquateNaNs()) + if diff := cmp.Diff(lhs, rhs, opts...); diff != "" { + t.Fatalf("[]telegraf.Metric\n--- expected\n+++ actual\n%s", diff) + } +} + +// Metric creates a new metric or panics on error. +func MustMetric( + name string, + tags map[string]string, + fields map[string]interface{}, + tm time.Time, + tp ...telegraf.ValueType, +) telegraf.Metric { + m, err := metric.New(name, tags, fields, tm, tp...) + if err != nil { + panic("MustMetric") + } + return m +} + +func FromTestMetric(met *Metric) telegraf.Metric { + m, err := metric.New(met.Measurement, met.Tags, met.Fields, met.Time, met.Type) + if err != nil { + panic("MustMetric") + } + return m +} diff --git a/testutil/metric_test.go b/testutil/metric_test.go new file mode 100644 index 000000000..0c999185a --- /dev/null +++ b/testutil/metric_test.go @@ -0,0 +1,106 @@ +package testutil + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/telegraf" + "github.com/influxdata/telegraf/metric" +) + +func TestRequireMetricEqual(t *testing.T) { + tests := []struct { + name string + got telegraf.Metric + want telegraf.Metric + }{ + { + name: "equal metrics should be equal", + got: func() telegraf.Metric { + m, _ := metric.New( + "test", + map[string]string{ + "t1": "v1", + "t2": "v2", + }, + map[string]interface{}{ + "f1": 1, + "f2": 3.14, + "f3": "v3", + }, + time.Unix(0, 0), + ) + return m + }(), + want: func() telegraf.Metric { + m, _ := metric.New( + "test", + map[string]string{ + "t1": "v1", + "t2": "v2", + }, + map[string]interface{}{ + "f1": int64(1), + "f2": 3.14, + "f3": "v3", + }, + time.Unix(0, 0), + ) + return m + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + RequireMetricEqual(t, tt.want, tt.got) + }) + } +} + +func TestRequireMetricsEqual(t *testing.T) { + tests := []struct { + name string + got []telegraf.Metric + want []telegraf.Metric + opts []cmp.Option + }{ + { + name: "sort metrics option sorts by name", + got: []telegraf.Metric{ + MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + MustMetric( + "net", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + want: []telegraf.Metric{ + MustMetric( + "net", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + MustMetric( + "cpu", + map[string]string{}, + map[string]interface{}{}, + time.Unix(0, 0), + ), + }, + opts: []cmp.Option{SortMetrics()}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + RequireMetricsEqual(t, tt.want, tt.got, tt.opts...) + }) + } +} diff --git a/testutil/tls.go b/testutil/tls.go index 4f7fc012a..333db3838 100644 --- a/testutil/tls.go +++ b/testutil/tls.go @@ -30,6 +30,9 @@ func (p *pki) TLSServerConfig() *tls.ServerConfig { TLSAllowedCACerts: []string{p.CACertPath()}, TLSCert: p.ServerCertPath(), TLSKey: p.ServerKeyPath(), + TLSCipherSuites: []string{p.CipherSuite()}, + TLSMinVersion: p.TLSMinVersion(), + TLSMaxVersion: p.TLSMaxVersion(), } } @@ -41,6 +44,18 @@ func (p *pki) CACertPath() string { return path.Join(p.path, "cacert.pem") } +func (p *pki) CipherSuite() string { + return "TLS_RSA_WITH_3DES_EDE_CBC_SHA" +} + +func (p *pki) TLSMinVersion() string { + return "TLS11" +} + +func (p *pki) TLSMaxVersion() string { + return "TLS12" +} + func (p *pki) ReadClientCert() string { return readCertificate(p.ClientCertPath()) }